problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_13846 | rasdani/github-patches | git_diff | wagtail__wagtail-7122 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hallo editor does not work in StructBlock
I upgraded an old Wagtail project to test Wagtail 2.13rc1 and found that text is not rendered into the Hallo.js rich text editor if the editor is part of a StructBlock. The editor behaves just fine if it is used in its own RichTextField.
### Steps to Reproduce
I created a minimal repository to demostrate this issue: https://github.com/cnk/hallo-issue I have included the sqlite database in the repository so you should be able to see this in action by:
1. check out https://github.com/cnk/hallo-issue
2. Activate a virtual environment and `pip install -r requirements.txt`
3. `./manage.py createsuperuser`
4. `./manage.py runserver`
### Technical details
* Python version: I used 3.7.7
* Django version: 3.2
* Wagtail version: 2.13rc1
* Browser version: I tried it with Firefox and Chrome.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/rich_text/editors/hallo.py`
Content:
```
1 import json
2
3 from collections import OrderedDict
4
5 from django.forms import Media, widgets
6 from django.utils.functional import cached_property
7
8 from wagtail.admin.edit_handlers import RichTextFieldPanel
9 from wagtail.admin.rich_text.converters.editor_html import EditorHTMLConverter
10 from wagtail.admin.staticfiles import versioned_static
11 from wagtail.core.rich_text import features
12
13
14 class HalloPlugin:
15 def __init__(self, **kwargs):
16 self.name = kwargs.get('name', None)
17 self.options = kwargs.get('options', {})
18 self.js = kwargs.get('js', [])
19 self.css = kwargs.get('css', {})
20 self.order = kwargs.get('order', 100)
21
22 def construct_plugins_list(self, plugins):
23 if self.name is not None:
24 plugins[self.name] = self.options
25
26 @property
27 def media(self):
28 js = [versioned_static(js_file) for js_file in self.js]
29 css = {}
30 for media_type, css_files in self.css.items():
31 css[media_type] = [versioned_static(css_file) for css_file in css_files]
32
33 return Media(js=js, css=css)
34
35
36 class HalloFormatPlugin(HalloPlugin):
37 def __init__(self, **kwargs):
38 kwargs.setdefault('name', 'halloformat')
39 kwargs.setdefault('order', 10)
40 self.format_name = kwargs['format_name']
41 super().__init__(**kwargs)
42
43 def construct_plugins_list(self, plugins):
44 plugins.setdefault(self.name, {'formattings': {
45 'bold': False, 'italic': False, 'strikeThrough': False, 'underline': False
46 }})
47 plugins[self.name]['formattings'][self.format_name] = True
48
49
50 class HalloHeadingPlugin(HalloPlugin):
51 default_order = 20
52
53 def __init__(self, **kwargs):
54 kwargs.setdefault('name', 'halloheadings')
55 kwargs.setdefault('order', self.default_order)
56 self.element = kwargs.pop('element')
57 super().__init__(**kwargs)
58
59 def construct_plugins_list(self, plugins):
60 plugins.setdefault(self.name, {'formatBlocks': []})
61 plugins[self.name]['formatBlocks'].append(self.element)
62
63
64 class HalloListPlugin(HalloPlugin):
65 def __init__(self, **kwargs):
66 kwargs.setdefault('name', 'hallolists')
67 kwargs.setdefault('order', 40)
68 self.list_type = kwargs['list_type']
69 super().__init__(**kwargs)
70
71 def construct_plugins_list(self, plugins):
72 plugins.setdefault(self.name, {'lists': {
73 'ordered': False, 'unordered': False
74 }})
75 plugins[self.name]['lists'][self.list_type] = True
76
77
78 class HalloRequireParagraphsPlugin(HalloPlugin):
79 @property
80 def media(self):
81 return Media(js=[
82 versioned_static('wagtailadmin/js/hallo-plugins/hallo-requireparagraphs.js'),
83 ]) + super().media
84
85
86 # Plugins which are always imported, and cannot be enabled/disabled via 'features'
87 CORE_HALLO_PLUGINS = [
88 HalloPlugin(name='halloreundo', order=50),
89 HalloRequireParagraphsPlugin(name='hallorequireparagraphs'),
90 HalloHeadingPlugin(element='p')
91 ]
92
93
94 class HalloRichTextArea(widgets.Textarea):
95 template_name = 'wagtailadmin/widgets/hallo_rich_text_area.html'
96
97 # this class's constructor accepts a 'features' kwarg
98 accepts_features = True
99
100 def get_panel(self):
101 return RichTextFieldPanel
102
103 def __init__(self, *args, **kwargs):
104 self.options = kwargs.pop('options', None)
105
106 self.features = kwargs.pop('features', None)
107 if self.features is None:
108 self.features = features.get_default_features()
109
110 self.converter = EditorHTMLConverter(self.features)
111
112 # construct a list of plugin objects, by querying the feature registry
113 # and keeping the non-null responses from get_editor_plugin
114 self.plugins = CORE_HALLO_PLUGINS + list(filter(None, [
115 features.get_editor_plugin('hallo', feature_name)
116 for feature_name in self.features
117 ]))
118 self.plugins.sort(key=lambda plugin: plugin.order)
119
120 super().__init__(*args, **kwargs)
121
122 def format_value(self, value):
123 # Convert database rich text representation to the format required by
124 # the input field
125 value = super().format_value(value)
126
127 if value is None:
128 return None
129
130 return self.converter.from_database_format(value)
131
132 def get_context(self, name, value, attrs):
133 context = super().get_context(name, value, attrs)
134
135 if self.options is not None and 'plugins' in self.options:
136 # explicit 'plugins' config passed in options, so use that
137 plugin_data = self.options['plugins']
138 else:
139 plugin_data = OrderedDict()
140 for plugin in self.plugins:
141 plugin.construct_plugins_list(plugin_data)
142 context['widget']['plugins_json'] = json.dumps(plugin_data)
143
144 return context
145
146 def value_from_datadict(self, data, files, name):
147 original_value = super().value_from_datadict(data, files, name)
148 if original_value is None:
149 return None
150 return self.converter.to_database_format(original_value)
151
152 @cached_property
153 def media(self):
154 media = Media(js=[
155 versioned_static('wagtailadmin/js/vendor/hallo.js'),
156 versioned_static('wagtailadmin/js/hallo-bootstrap.js'),
157 ], css={
158 'all': [versioned_static('wagtailadmin/css/panels/hallo.css')]
159 })
160
161 for plugin in self.plugins:
162 media += plugin.media
163
164 return media
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/admin/rich_text/editors/hallo.py b/wagtail/admin/rich_text/editors/hallo.py
--- a/wagtail/admin/rich_text/editors/hallo.py
+++ b/wagtail/admin/rich_text/editors/hallo.py
@@ -9,6 +9,8 @@
from wagtail.admin.rich_text.converters.editor_html import EditorHTMLConverter
from wagtail.admin.staticfiles import versioned_static
from wagtail.core.rich_text import features
+from wagtail.core.telepath import register
+from wagtail.core.widget_adapters import WidgetAdapter
class HalloPlugin:
@@ -162,3 +164,10 @@
media += plugin.media
return media
+
+
+class HalloRichTextAreaAdapter(WidgetAdapter):
+ js_constructor = 'wagtail.widgets.HalloRichTextArea'
+
+
+register(HalloRichTextAreaAdapter(), HalloRichTextArea)
| {"golden_diff": "diff --git a/wagtail/admin/rich_text/editors/hallo.py b/wagtail/admin/rich_text/editors/hallo.py\n--- a/wagtail/admin/rich_text/editors/hallo.py\n+++ b/wagtail/admin/rich_text/editors/hallo.py\n@@ -9,6 +9,8 @@\n from wagtail.admin.rich_text.converters.editor_html import EditorHTMLConverter\n from wagtail.admin.staticfiles import versioned_static\n from wagtail.core.rich_text import features\n+from wagtail.core.telepath import register\n+from wagtail.core.widget_adapters import WidgetAdapter\n \n \n class HalloPlugin:\n@@ -162,3 +164,10 @@\n media += plugin.media\n \n return media\n+\n+\n+class HalloRichTextAreaAdapter(WidgetAdapter):\n+ js_constructor = 'wagtail.widgets.HalloRichTextArea'\n+\n+\n+register(HalloRichTextAreaAdapter(), HalloRichTextArea)\n", "issue": "Hallo editor does not work in StructBlock\nI upgraded an old Wagtail project to test Wagtail 2.13rc1 and found that text is not rendered into the Hallo.js rich text editor if the editor is part of a StructBlock. The editor behaves just fine if it is used in its own RichTextField. \r\n\r\n### Steps to Reproduce\r\n\r\nI created a minimal repository to demostrate this issue: https://github.com/cnk/hallo-issue I have included the sqlite database in the repository so you should be able to see this in action by: \r\n\r\n1. check out https://github.com/cnk/hallo-issue\r\n2. Activate a virtual environment and `pip install -r requirements.txt`\r\n3. `./manage.py createsuperuser`\r\n4. `./manage.py runserver`\r\n\r\n### Technical details\r\n\r\n* Python version: I used 3.7.7\r\n* Django version: 3.2\r\n* Wagtail version: 2.13rc1\r\n* Browser version: I tried it with Firefox and Chrome.\r\n\n", "before_files": [{"content": "import json\n\nfrom collections import OrderedDict\n\nfrom django.forms import Media, widgets\nfrom django.utils.functional import cached_property\n\nfrom wagtail.admin.edit_handlers import RichTextFieldPanel\nfrom wagtail.admin.rich_text.converters.editor_html import EditorHTMLConverter\nfrom wagtail.admin.staticfiles import versioned_static\nfrom wagtail.core.rich_text import features\n\n\nclass HalloPlugin:\n def __init__(self, **kwargs):\n self.name = kwargs.get('name', None)\n self.options = kwargs.get('options', {})\n self.js = kwargs.get('js', [])\n self.css = kwargs.get('css', {})\n self.order = kwargs.get('order', 100)\n\n def construct_plugins_list(self, plugins):\n if self.name is not None:\n plugins[self.name] = self.options\n\n @property\n def media(self):\n js = [versioned_static(js_file) for js_file in self.js]\n css = {}\n for media_type, css_files in self.css.items():\n css[media_type] = [versioned_static(css_file) for css_file in css_files]\n\n return Media(js=js, css=css)\n\n\nclass HalloFormatPlugin(HalloPlugin):\n def __init__(self, **kwargs):\n kwargs.setdefault('name', 'halloformat')\n kwargs.setdefault('order', 10)\n self.format_name = kwargs['format_name']\n super().__init__(**kwargs)\n\n def construct_plugins_list(self, plugins):\n plugins.setdefault(self.name, {'formattings': {\n 'bold': False, 'italic': False, 'strikeThrough': False, 'underline': False\n }})\n plugins[self.name]['formattings'][self.format_name] = True\n\n\nclass HalloHeadingPlugin(HalloPlugin):\n default_order = 20\n\n def __init__(self, **kwargs):\n kwargs.setdefault('name', 'halloheadings')\n kwargs.setdefault('order', self.default_order)\n self.element = kwargs.pop('element')\n super().__init__(**kwargs)\n\n def construct_plugins_list(self, plugins):\n plugins.setdefault(self.name, {'formatBlocks': []})\n plugins[self.name]['formatBlocks'].append(self.element)\n\n\nclass HalloListPlugin(HalloPlugin):\n def __init__(self, **kwargs):\n kwargs.setdefault('name', 'hallolists')\n kwargs.setdefault('order', 40)\n self.list_type = kwargs['list_type']\n super().__init__(**kwargs)\n\n def construct_plugins_list(self, plugins):\n plugins.setdefault(self.name, {'lists': {\n 'ordered': False, 'unordered': False\n }})\n plugins[self.name]['lists'][self.list_type] = True\n\n\nclass HalloRequireParagraphsPlugin(HalloPlugin):\n @property\n def media(self):\n return Media(js=[\n versioned_static('wagtailadmin/js/hallo-plugins/hallo-requireparagraphs.js'),\n ]) + super().media\n\n\n# Plugins which are always imported, and cannot be enabled/disabled via 'features'\nCORE_HALLO_PLUGINS = [\n HalloPlugin(name='halloreundo', order=50),\n HalloRequireParagraphsPlugin(name='hallorequireparagraphs'),\n HalloHeadingPlugin(element='p')\n]\n\n\nclass HalloRichTextArea(widgets.Textarea):\n template_name = 'wagtailadmin/widgets/hallo_rich_text_area.html'\n\n # this class's constructor accepts a 'features' kwarg\n accepts_features = True\n\n def get_panel(self):\n return RichTextFieldPanel\n\n def __init__(self, *args, **kwargs):\n self.options = kwargs.pop('options', None)\n\n self.features = kwargs.pop('features', None)\n if self.features is None:\n self.features = features.get_default_features()\n\n self.converter = EditorHTMLConverter(self.features)\n\n # construct a list of plugin objects, by querying the feature registry\n # and keeping the non-null responses from get_editor_plugin\n self.plugins = CORE_HALLO_PLUGINS + list(filter(None, [\n features.get_editor_plugin('hallo', feature_name)\n for feature_name in self.features\n ]))\n self.plugins.sort(key=lambda plugin: plugin.order)\n\n super().__init__(*args, **kwargs)\n\n def format_value(self, value):\n # Convert database rich text representation to the format required by\n # the input field\n value = super().format_value(value)\n\n if value is None:\n return None\n\n return self.converter.from_database_format(value)\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n\n if self.options is not None and 'plugins' in self.options:\n # explicit 'plugins' config passed in options, so use that\n plugin_data = self.options['plugins']\n else:\n plugin_data = OrderedDict()\n for plugin in self.plugins:\n plugin.construct_plugins_list(plugin_data)\n context['widget']['plugins_json'] = json.dumps(plugin_data)\n\n return context\n\n def value_from_datadict(self, data, files, name):\n original_value = super().value_from_datadict(data, files, name)\n if original_value is None:\n return None\n return self.converter.to_database_format(original_value)\n\n @cached_property\n def media(self):\n media = Media(js=[\n versioned_static('wagtailadmin/js/vendor/hallo.js'),\n versioned_static('wagtailadmin/js/hallo-bootstrap.js'),\n ], css={\n 'all': [versioned_static('wagtailadmin/css/panels/hallo.css')]\n })\n\n for plugin in self.plugins:\n media += plugin.media\n\n return media\n", "path": "wagtail/admin/rich_text/editors/hallo.py"}], "after_files": [{"content": "import json\n\nfrom collections import OrderedDict\n\nfrom django.forms import Media, widgets\nfrom django.utils.functional import cached_property\n\nfrom wagtail.admin.edit_handlers import RichTextFieldPanel\nfrom wagtail.admin.rich_text.converters.editor_html import EditorHTMLConverter\nfrom wagtail.admin.staticfiles import versioned_static\nfrom wagtail.core.rich_text import features\nfrom wagtail.core.telepath import register\nfrom wagtail.core.widget_adapters import WidgetAdapter\n\n\nclass HalloPlugin:\n def __init__(self, **kwargs):\n self.name = kwargs.get('name', None)\n self.options = kwargs.get('options', {})\n self.js = kwargs.get('js', [])\n self.css = kwargs.get('css', {})\n self.order = kwargs.get('order', 100)\n\n def construct_plugins_list(self, plugins):\n if self.name is not None:\n plugins[self.name] = self.options\n\n @property\n def media(self):\n js = [versioned_static(js_file) for js_file in self.js]\n css = {}\n for media_type, css_files in self.css.items():\n css[media_type] = [versioned_static(css_file) for css_file in css_files]\n\n return Media(js=js, css=css)\n\n\nclass HalloFormatPlugin(HalloPlugin):\n def __init__(self, **kwargs):\n kwargs.setdefault('name', 'halloformat')\n kwargs.setdefault('order', 10)\n self.format_name = kwargs['format_name']\n super().__init__(**kwargs)\n\n def construct_plugins_list(self, plugins):\n plugins.setdefault(self.name, {'formattings': {\n 'bold': False, 'italic': False, 'strikeThrough': False, 'underline': False\n }})\n plugins[self.name]['formattings'][self.format_name] = True\n\n\nclass HalloHeadingPlugin(HalloPlugin):\n default_order = 20\n\n def __init__(self, **kwargs):\n kwargs.setdefault('name', 'halloheadings')\n kwargs.setdefault('order', self.default_order)\n self.element = kwargs.pop('element')\n super().__init__(**kwargs)\n\n def construct_plugins_list(self, plugins):\n plugins.setdefault(self.name, {'formatBlocks': []})\n plugins[self.name]['formatBlocks'].append(self.element)\n\n\nclass HalloListPlugin(HalloPlugin):\n def __init__(self, **kwargs):\n kwargs.setdefault('name', 'hallolists')\n kwargs.setdefault('order', 40)\n self.list_type = kwargs['list_type']\n super().__init__(**kwargs)\n\n def construct_plugins_list(self, plugins):\n plugins.setdefault(self.name, {'lists': {\n 'ordered': False, 'unordered': False\n }})\n plugins[self.name]['lists'][self.list_type] = True\n\n\nclass HalloRequireParagraphsPlugin(HalloPlugin):\n @property\n def media(self):\n return Media(js=[\n versioned_static('wagtailadmin/js/hallo-plugins/hallo-requireparagraphs.js'),\n ]) + super().media\n\n\n# Plugins which are always imported, and cannot be enabled/disabled via 'features'\nCORE_HALLO_PLUGINS = [\n HalloPlugin(name='halloreundo', order=50),\n HalloRequireParagraphsPlugin(name='hallorequireparagraphs'),\n HalloHeadingPlugin(element='p')\n]\n\n\nclass HalloRichTextArea(widgets.Textarea):\n template_name = 'wagtailadmin/widgets/hallo_rich_text_area.html'\n\n # this class's constructor accepts a 'features' kwarg\n accepts_features = True\n\n def get_panel(self):\n return RichTextFieldPanel\n\n def __init__(self, *args, **kwargs):\n self.options = kwargs.pop('options', None)\n\n self.features = kwargs.pop('features', None)\n if self.features is None:\n self.features = features.get_default_features()\n\n self.converter = EditorHTMLConverter(self.features)\n\n # construct a list of plugin objects, by querying the feature registry\n # and keeping the non-null responses from get_editor_plugin\n self.plugins = CORE_HALLO_PLUGINS + list(filter(None, [\n features.get_editor_plugin('hallo', feature_name)\n for feature_name in self.features\n ]))\n self.plugins.sort(key=lambda plugin: plugin.order)\n\n super().__init__(*args, **kwargs)\n\n def format_value(self, value):\n # Convert database rich text representation to the format required by\n # the input field\n value = super().format_value(value)\n\n if value is None:\n return None\n\n return self.converter.from_database_format(value)\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n\n if self.options is not None and 'plugins' in self.options:\n # explicit 'plugins' config passed in options, so use that\n plugin_data = self.options['plugins']\n else:\n plugin_data = OrderedDict()\n for plugin in self.plugins:\n plugin.construct_plugins_list(plugin_data)\n context['widget']['plugins_json'] = json.dumps(plugin_data)\n\n return context\n\n def value_from_datadict(self, data, files, name):\n original_value = super().value_from_datadict(data, files, name)\n if original_value is None:\n return None\n return self.converter.to_database_format(original_value)\n\n @cached_property\n def media(self):\n media = Media(js=[\n versioned_static('wagtailadmin/js/vendor/hallo.js'),\n versioned_static('wagtailadmin/js/hallo-bootstrap.js'),\n ], css={\n 'all': [versioned_static('wagtailadmin/css/panels/hallo.css')]\n })\n\n for plugin in self.plugins:\n media += plugin.media\n\n return media\n\n\nclass HalloRichTextAreaAdapter(WidgetAdapter):\n js_constructor = 'wagtail.widgets.HalloRichTextArea'\n\n\nregister(HalloRichTextAreaAdapter(), HalloRichTextArea)\n", "path": "wagtail/admin/rich_text/editors/hallo.py"}]} | 2,106 | 203 |
gh_patches_debug_23123 | rasdani/github-patches | git_diff | streamlink__streamlink-5762 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.vidio: 403 Client Error on stream token acquirement
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Unable to open URL: https://www.vidio.com/live/204/tokens (403 Client Error: Forbidden for url: https://www.vidio.com/live/204/tokens?type=hls)
### Description
The live stream: https://www.vidio.com/live/204-sctv
the output: https://www.vidio.com/live/204/tokens (403 Client Error: Forbidden for url: https://www.vidio.com/live/204/tokens?type=hls)
It is missing sctv
### Debug log
```text
streamlink https://www.vidio.com/live/204-sctv best
[cli][info] Found matching plugin vidio for URL https://www.vidio.com/live/204-sctv
error: Unable to open URL: https://www.vidio.com/live/204/tokens (403 Client Error: Forbidden for url: https://www.vidio.com/live/204/tokens?type=hls)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/vidio.py`
Content:
```
1 """
2 $description Indonesian & international live TV channels and video on-demand service. OTT service from Vidio.
3 $url vidio.com
4 $type live, vod
5 """
6 import logging
7 import re
8 from urllib.parse import urlsplit, urlunsplit
9
10 from streamlink.plugin import Plugin, pluginmatcher
11 from streamlink.plugin.api import validate
12 from streamlink.stream.dash import DASHStream
13 from streamlink.stream.hls import HLSStream
14
15
16 log = logging.getLogger(__name__)
17
18
19 @pluginmatcher(re.compile(
20 r"https?://(?:www\.)?vidio\.com/",
21 ))
22 class Vidio(Plugin):
23 tokens_url = "https://www.vidio.com/live/{id}/tokens"
24
25 def _get_stream_token(self, stream_id, stream_type):
26 log.debug("Getting stream token")
27 return self.session.http.post(
28 self.tokens_url.format(id=stream_id),
29 params={"type": stream_type},
30 headers={"Referer": self.url},
31 schema=validate.Schema(
32 validate.parse_json(),
33 {"token": str},
34 validate.get("token"),
35 ),
36 )
37
38 def _get_streams(self):
39 stream_id, has_token, hls_url, dash_url = self.session.http.get(
40 self.url,
41 schema=validate.Schema(
42 validate.parse_html(),
43 validate.xml_find(".//*[@data-video-id]"),
44 validate.union((
45 validate.get("data-video-id"),
46 validate.all(
47 validate.get("data-video-has-token"),
48 validate.transform(lambda val: val and val != "false"),
49 ),
50 validate.get("data-vjs-clip-hls-url"),
51 validate.get("data-vjs-clip-dash-url"),
52 )),
53 ),
54 )
55
56 if dash_url and has_token:
57 token = self._get_stream_token(stream_id, "dash")
58 parsed = urlsplit(dash_url)
59 dash_url = urlunsplit(parsed._replace(path=f"{token}{parsed.path}"))
60 return DASHStream.parse_manifest(
61 self.session,
62 dash_url,
63 headers={"Referer": "https://www.vidio.com/"},
64 )
65
66 if not hls_url:
67 return
68
69 if has_token:
70 token = self._get_stream_token(stream_id, "hls")
71 hls_url = f"{hls_url}?{token}"
72
73 return HLSStream.parse_variant_playlist(
74 self.session,
75 hls_url,
76 headers={"Referer": "https://www.vidio.com/"},
77 )
78
79
80 __plugin__ = Vidio
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/vidio.py b/src/streamlink/plugins/vidio.py
--- a/src/streamlink/plugins/vidio.py
+++ b/src/streamlink/plugins/vidio.py
@@ -6,6 +6,7 @@
import logging
import re
from urllib.parse import urlsplit, urlunsplit
+from uuid import uuid4
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
@@ -17,7 +18,7 @@
@pluginmatcher(re.compile(
- r"https?://(?:www\.)?vidio\.com/",
+ r"https?://(?:www\.)?vidio\.com/.+",
))
class Vidio(Plugin):
tokens_url = "https://www.vidio.com/live/{id}/tokens"
@@ -28,6 +29,10 @@
self.tokens_url.format(id=stream_id),
params={"type": stream_type},
headers={"Referer": self.url},
+ cookies={
+ "ahoy_visit": str(uuid4()),
+ "ahoy_visitor": str(uuid4()),
+ },
schema=validate.Schema(
validate.parse_json(),
{"token": str},
| {"golden_diff": "diff --git a/src/streamlink/plugins/vidio.py b/src/streamlink/plugins/vidio.py\n--- a/src/streamlink/plugins/vidio.py\n+++ b/src/streamlink/plugins/vidio.py\n@@ -6,6 +6,7 @@\n import logging\n import re\n from urllib.parse import urlsplit, urlunsplit\n+from uuid import uuid4\n \n from streamlink.plugin import Plugin, pluginmatcher\n from streamlink.plugin.api import validate\n@@ -17,7 +18,7 @@\n \n \n @pluginmatcher(re.compile(\n- r\"https?://(?:www\\.)?vidio\\.com/\",\n+ r\"https?://(?:www\\.)?vidio\\.com/.+\",\n ))\n class Vidio(Plugin):\n tokens_url = \"https://www.vidio.com/live/{id}/tokens\"\n@@ -28,6 +29,10 @@\n self.tokens_url.format(id=stream_id),\n params={\"type\": stream_type},\n headers={\"Referer\": self.url},\n+ cookies={\n+ \"ahoy_visit\": str(uuid4()),\n+ \"ahoy_visitor\": str(uuid4()),\n+ },\n schema=validate.Schema(\n validate.parse_json(),\n {\"token\": str},\n", "issue": "plugins.vidio: 403 Client Error on stream token acquirement\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nUnable to open URL: https://www.vidio.com/live/204/tokens (403 Client Error: Forbidden for url: https://www.vidio.com/live/204/tokens?type=hls)\n\n### Description\n\nThe live stream: https://www.vidio.com/live/204-sctv\r\nthe output: https://www.vidio.com/live/204/tokens (403 Client Error: Forbidden for url: https://www.vidio.com/live/204/tokens?type=hls)\r\n\r\nIt is missing sctv\n\n### Debug log\n\n```text\nstreamlink https://www.vidio.com/live/204-sctv best\r\n[cli][info] Found matching plugin vidio for URL https://www.vidio.com/live/204-sctv\r\nerror: Unable to open URL: https://www.vidio.com/live/204/tokens (403 Client Error: Forbidden for url: https://www.vidio.com/live/204/tokens?type=hls)\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Indonesian & international live TV channels and video on-demand service. OTT service from Vidio.\n$url vidio.com\n$type live, vod\n\"\"\"\nimport logging\nimport re\nfrom urllib.parse import urlsplit, urlunsplit\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?vidio\\.com/\",\n))\nclass Vidio(Plugin):\n tokens_url = \"https://www.vidio.com/live/{id}/tokens\"\n\n def _get_stream_token(self, stream_id, stream_type):\n log.debug(\"Getting stream token\")\n return self.session.http.post(\n self.tokens_url.format(id=stream_id),\n params={\"type\": stream_type},\n headers={\"Referer\": self.url},\n schema=validate.Schema(\n validate.parse_json(),\n {\"token\": str},\n validate.get(\"token\"),\n ),\n )\n\n def _get_streams(self):\n stream_id, has_token, hls_url, dash_url = self.session.http.get(\n self.url,\n schema=validate.Schema(\n validate.parse_html(),\n validate.xml_find(\".//*[@data-video-id]\"),\n validate.union((\n validate.get(\"data-video-id\"),\n validate.all(\n validate.get(\"data-video-has-token\"),\n validate.transform(lambda val: val and val != \"false\"),\n ),\n validate.get(\"data-vjs-clip-hls-url\"),\n validate.get(\"data-vjs-clip-dash-url\"),\n )),\n ),\n )\n\n if dash_url and has_token:\n token = self._get_stream_token(stream_id, \"dash\")\n parsed = urlsplit(dash_url)\n dash_url = urlunsplit(parsed._replace(path=f\"{token}{parsed.path}\"))\n return DASHStream.parse_manifest(\n self.session,\n dash_url,\n headers={\"Referer\": \"https://www.vidio.com/\"},\n )\n\n if not hls_url:\n return\n\n if has_token:\n token = self._get_stream_token(stream_id, \"hls\")\n hls_url = f\"{hls_url}?{token}\"\n\n return HLSStream.parse_variant_playlist(\n self.session,\n hls_url,\n headers={\"Referer\": \"https://www.vidio.com/\"},\n )\n\n\n__plugin__ = Vidio\n", "path": "src/streamlink/plugins/vidio.py"}], "after_files": [{"content": "\"\"\"\n$description Indonesian & international live TV channels and video on-demand service. OTT service from Vidio.\n$url vidio.com\n$type live, vod\n\"\"\"\nimport logging\nimport re\nfrom urllib.parse import urlsplit, urlunsplit\nfrom uuid import uuid4\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?vidio\\.com/.+\",\n))\nclass Vidio(Plugin):\n tokens_url = \"https://www.vidio.com/live/{id}/tokens\"\n\n def _get_stream_token(self, stream_id, stream_type):\n log.debug(\"Getting stream token\")\n return self.session.http.post(\n self.tokens_url.format(id=stream_id),\n params={\"type\": stream_type},\n headers={\"Referer\": self.url},\n cookies={\n \"ahoy_visit\": str(uuid4()),\n \"ahoy_visitor\": str(uuid4()),\n },\n schema=validate.Schema(\n validate.parse_json(),\n {\"token\": str},\n validate.get(\"token\"),\n ),\n )\n\n def _get_streams(self):\n stream_id, has_token, hls_url, dash_url = self.session.http.get(\n self.url,\n schema=validate.Schema(\n validate.parse_html(),\n validate.xml_find(\".//*[@data-video-id]\"),\n validate.union((\n validate.get(\"data-video-id\"),\n validate.all(\n validate.get(\"data-video-has-token\"),\n validate.transform(lambda val: val and val != \"false\"),\n ),\n validate.get(\"data-vjs-clip-hls-url\"),\n validate.get(\"data-vjs-clip-dash-url\"),\n )),\n ),\n )\n\n if dash_url and has_token:\n token = self._get_stream_token(stream_id, \"dash\")\n parsed = urlsplit(dash_url)\n dash_url = urlunsplit(parsed._replace(path=f\"{token}{parsed.path}\"))\n return DASHStream.parse_manifest(\n self.session,\n dash_url,\n headers={\"Referer\": \"https://www.vidio.com/\"},\n )\n\n if not hls_url:\n return\n\n if has_token:\n token = self._get_stream_token(stream_id, \"hls\")\n hls_url = f\"{hls_url}?{token}\"\n\n return HLSStream.parse_variant_playlist(\n self.session,\n hls_url,\n headers={\"Referer\": \"https://www.vidio.com/\"},\n )\n\n\n__plugin__ = Vidio\n", "path": "src/streamlink/plugins/vidio.py"}]} | 1,360 | 260 |
gh_patches_debug_19732 | rasdani/github-patches | git_diff | joke2k__faker-1368 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Random sentence of length 1 fails
* Faker version: 5.5.0
* OS: Ubuntu 20.04.1
Creating a random sentence of length 1 fails because it attempts to titleize the first word of the sentence, but `random_choices` is returning a tuple which is immutable.
This also occurs sometimes with 2 or 3 length sentences, although I presume that under the hood they are randomly selecting sentences of length 1.
This works as expected in Faker version 5.4.1.
### Steps to reproduce
```
>>> import faker
>>> thing = faker.Faker()
>>> print(thing.sentence(1))
```
### Expected behavior
A random titleized word is printed with a fullstop
### Actual behavior
```
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "~/.../venv/lib/python3.6/site-packages/faker/providers/lorem/__init__.py", line 86, in sentence
words[0] = words[0].title()
TypeError: 'tuple' object does not support item assignment
```
Random sentence of length 1 fails
* Faker version: 5.5.0
* OS: Ubuntu 20.04.1
Creating a random sentence of length 1 fails because it attempts to titleize the first word of the sentence, but `random_choices` is returning a tuple which is immutable.
This also occurs sometimes with 2 or 3 length sentences, although I presume that under the hood they are randomly selecting sentences of length 1.
This works as expected in Faker version 5.4.1.
### Steps to reproduce
```
>>> import faker
>>> thing = faker.Faker()
>>> print(thing.sentence(1))
```
### Expected behavior
A random titleized word is printed with a fullstop
### Actual behavior
```
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "~/.../venv/lib/python3.6/site-packages/faker/providers/lorem/__init__.py", line 86, in sentence
words[0] = words[0].title()
TypeError: 'tuple' object does not support item assignment
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/lorem/__init__.py`
Content:
```
1 from .. import BaseProvider
2
3 localized = True
4
5 # 'Latin' is the default locale
6 default_locale = 'la'
7
8
9 class Provider(BaseProvider):
10 """Implement default lorem provider for Faker.
11
12 .. important::
13 The default locale of the lorem provider is ``la``. When using a locale
14 without a localized lorem provider, the ``la`` lorem provider will be
15 used, so generated words will be in pseudo-Latin. The locale used for
16 the standard provider docs was ``en_US``, and ``en_US`` has a localized
17 lorem provider which is why the samples here show words in American
18 English.
19 """
20
21 word_connector = ' '
22 sentence_punctuation = '.'
23
24 def words(self, nb=3, ext_word_list=None, unique=False):
25 """Generate a list of words.
26
27 The ``nb`` argument controls the number of words in the resulting list,
28 and if ``ext_word_list`` is provided, words from that list will be used
29 instead of those from the locale provider's built-in word list.
30
31 If ``unique`` is ``True``, this method will return a list containing
32 unique words. Under the hood, |random_sample| will be used for sampling
33 without replacement. If ``unique`` is ``False``, |random_choices| is
34 used instead, and the list returned may contain duplicates.
35
36 .. warning::
37 Depending on the length of a locale provider's built-in word list or
38 on the length of ``ext_word_list`` if provided, a large ``nb`` can
39 exhaust said lists if ``unique`` is ``True``, raising an exception.
40
41 :sample:
42 :sample: nb=5
43 :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']
44 :sample: nb=4, ext_word_list=['abc', 'def', 'ghi', 'jkl'], unique=True
45 """
46 word_list = ext_word_list if ext_word_list else self.word_list
47 if unique:
48 return self.random_sample(word_list, length=nb)
49 return self.random_choices(word_list, length=nb)
50
51 def word(self, ext_word_list=None):
52 """Generate a word.
53
54 This method uses |words| under the hood with the ``nb`` argument set to
55 ``1`` to generate the result.
56
57 :sample:
58 :sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']
59 """
60 return self.words(1, ext_word_list)[0]
61
62 def sentence(self, nb_words=6, variable_nb_words=True, ext_word_list=None):
63 """Generate a sentence.
64
65 The ``nb_words`` argument controls how many words the sentence will
66 contain, and setting ``variable_nb_words`` to ``False`` will generate
67 the exact amount, while setting it to ``True`` (default) will generate
68 a random amount (+/-40%, minimum of 1) using |randomize_nb_elements|.
69
70 Under the hood, |words| is used to generate the words, so the argument
71 ``ext_word_list`` works in the same way here as it would in that method.
72
73 :sample: nb_words=10
74 :sample: nb_words=10, variable_nb_words=False
75 :sample: nb_words=10, ext_word_list=['abc', 'def', 'ghi', 'jkl']
76 :sample: nb_words=10, variable_nb_words=True,
77 ext_word_list=['abc', 'def', 'ghi', 'jkl']
78 """
79 if nb_words <= 0:
80 return ''
81
82 if variable_nb_words:
83 nb_words = self.randomize_nb_elements(nb_words, min=1)
84
85 words = self.words(nb=nb_words, ext_word_list=ext_word_list)
86 words[0] = words[0].title()
87
88 return self.word_connector.join(words) + self.sentence_punctuation
89
90 def sentences(self, nb=3, ext_word_list=None):
91 """Generate a list of sentences.
92
93 This method uses |sentence| under the hood to generate sentences, and
94 the ``nb`` argument controls exactly how many sentences the list will
95 contain. The ``ext_word_list`` argument works in exactly the same way
96 as well.
97
98 :sample:
99 :sample: nb=5
100 :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']
101 """
102 return [self.sentence(ext_word_list=ext_word_list)
103 for _ in range(0, nb)]
104
105 def paragraph(
106 self,
107 nb_sentences=3,
108 variable_nb_sentences=True,
109 ext_word_list=None):
110 """Generate a paragraph.
111
112 The ``nb_sentences`` argument controls how many sentences the paragraph
113 will contain, and setting ``variable_nb_sentences`` to ``False`` will
114 generate the exact amount, while setting it to ``True`` (default) will
115 generate a random amount (+/-40%, minimum of 1) using
116 |randomize_nb_elements|.
117
118 Under the hood, |sentences| is used to generate the sentences, so the
119 argument ``ext_word_list`` works in the same way here as it would in
120 that method.
121
122 :sample: nb_sentences=5
123 :sample: nb_sentences=5, variable_nb_sentences=False
124 :sample: nb_sentences=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']
125 :sample: nb_sentences=5, variable_nb_sentences=False,
126 ext_word_list=['abc', 'def', 'ghi', 'jkl']
127 """
128 if nb_sentences <= 0:
129 return ''
130
131 if variable_nb_sentences:
132 nb_sentences = self.randomize_nb_elements(nb_sentences, min=1)
133
134 para = self.word_connector.join(self.sentences(
135 nb_sentences, ext_word_list=ext_word_list,
136 ))
137
138 return para
139
140 def paragraphs(self, nb=3, ext_word_list=None):
141 """Generate a list of paragraphs.
142
143 This method uses |paragraph| under the hood to generate paragraphs, and
144 the ``nb`` argument controls exactly how many sentences the list will
145 contain. The ``ext_word_list`` argument works in exactly the same way
146 as well.
147
148 :sample: nb=5
149 :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']
150 """
151 return [self.paragraph(ext_word_list=ext_word_list)
152 for _ in range(0, nb)]
153
154 def text(self, max_nb_chars=200, ext_word_list=None):
155 """Generate a text string.
156
157 The ``max_nb_chars`` argument controls the approximate number of
158 characters the text string will have, and depending on its value, this
159 method may use either |words|, |sentences|, or |paragraphs| for text
160 generation. The ``ext_word_list`` argument works in exactly the same way
161 it would in any of those methods.
162
163 :sample: max_nb_chars=20
164 :sample: max_nb_chars=80
165 :sample: max_nb_chars=160
166 :sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']
167 """
168 text = []
169 if max_nb_chars < 5:
170 raise ValueError(
171 'text() can only generate text of at least 5 characters')
172
173 if max_nb_chars < 25:
174 # join words
175 while not text:
176 size = 0
177 # determine how many words are needed to reach the $max_nb_chars
178 # once;
179 while size < max_nb_chars:
180 word = (self.word_connector if size else '') + \
181 self.word(ext_word_list=ext_word_list)
182 text.append(word)
183 size += len(word)
184 text.pop()
185 text[0] = text[0][0].upper() + text[0][1:]
186 last_index = len(text) - 1
187 text[last_index] += self.sentence_punctuation
188 elif max_nb_chars < 100:
189 # join sentences
190 while not text:
191 size = 0
192 # determine how many sentences are needed to reach the
193 # $max_nb_chars once
194 while size < max_nb_chars:
195 sentence = (self.word_connector if size else '') + \
196 self.sentence(ext_word_list=ext_word_list)
197 text.append(sentence)
198 size += len(sentence)
199 text.pop()
200 else:
201 # join paragraphs
202 while not text:
203 size = 0
204 # determine how many paragraphs are needed to reach the
205 # $max_nb_chars once
206 while size < max_nb_chars:
207 paragraph = ('\n' if size else '') + \
208 self.paragraph(ext_word_list=ext_word_list)
209 text.append(paragraph)
210 size += len(paragraph)
211 text.pop()
212
213 return "".join(text)
214
215 def texts(self, nb_texts=3, max_nb_chars=200, ext_word_list=None):
216 """Generate a list of text strings.
217
218 The ``nb_texts`` argument controls how many text strings the list will
219 contain, and this method uses |text| under the hood for text generation,
220 so the two remaining arguments, ``max_nb_chars`` and ``ext_word_list``
221 will work in exactly the same way as well.
222
223 :sample: nb_texts=5
224 :sample: nb_texts=5, max_nb_chars=50
225 :sample: nb_texts=5, max_nb_chars=50,
226 ext_word_list=['abc', 'def', 'ghi', 'jkl']
227 """
228 return [self.text(max_nb_chars, ext_word_list)
229 for _ in range(0, nb_texts)]
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/providers/lorem/__init__.py b/faker/providers/lorem/__init__.py
--- a/faker/providers/lorem/__init__.py
+++ b/faker/providers/lorem/__init__.py
@@ -22,7 +22,7 @@
sentence_punctuation = '.'
def words(self, nb=3, ext_word_list=None, unique=False):
- """Generate a list of words.
+ """Generate a tuple of words.
The ``nb`` argument controls the number of words in the resulting list,
and if ``ext_word_list`` is provided, words from that list will be used
@@ -82,7 +82,7 @@
if variable_nb_words:
nb_words = self.randomize_nb_elements(nb_words, min=1)
- words = self.words(nb=nb_words, ext_word_list=ext_word_list)
+ words = list(self.words(nb=nb_words, ext_word_list=ext_word_list))
words[0] = words[0].title()
return self.word_connector.join(words) + self.sentence_punctuation
| {"golden_diff": "diff --git a/faker/providers/lorem/__init__.py b/faker/providers/lorem/__init__.py\n--- a/faker/providers/lorem/__init__.py\n+++ b/faker/providers/lorem/__init__.py\n@@ -22,7 +22,7 @@\n sentence_punctuation = '.'\n \n def words(self, nb=3, ext_word_list=None, unique=False):\n- \"\"\"Generate a list of words.\n+ \"\"\"Generate a tuple of words.\n \n The ``nb`` argument controls the number of words in the resulting list,\n and if ``ext_word_list`` is provided, words from that list will be used\n@@ -82,7 +82,7 @@\n if variable_nb_words:\n nb_words = self.randomize_nb_elements(nb_words, min=1)\n \n- words = self.words(nb=nb_words, ext_word_list=ext_word_list)\n+ words = list(self.words(nb=nb_words, ext_word_list=ext_word_list))\n words[0] = words[0].title()\n \n return self.word_connector.join(words) + self.sentence_punctuation\n", "issue": "Random sentence of length 1 fails\n* Faker version: 5.5.0\r\n* OS: Ubuntu 20.04.1\r\n\r\nCreating a random sentence of length 1 fails because it attempts to titleize the first word of the sentence, but `random_choices` is returning a tuple which is immutable. \r\n\r\nThis also occurs sometimes with 2 or 3 length sentences, although I presume that under the hood they are randomly selecting sentences of length 1.\r\n\r\nThis works as expected in Faker version 5.4.1.\r\n\r\n### Steps to reproduce\r\n\r\n```\r\n>>> import faker\r\n>>> thing = faker.Faker()\r\n>>> print(thing.sentence(1))\r\n```\r\n\r\n### Expected behavior\r\n\r\nA random titleized word is printed with a fullstop\r\n\r\n### Actual behavior\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<input>\", line 1, in <module>\r\n File \"~/.../venv/lib/python3.6/site-packages/faker/providers/lorem/__init__.py\", line 86, in sentence\r\n words[0] = words[0].title()\r\nTypeError: 'tuple' object does not support item assignment\r\n```\r\n\nRandom sentence of length 1 fails\n* Faker version: 5.5.0\r\n* OS: Ubuntu 20.04.1\r\n\r\nCreating a random sentence of length 1 fails because it attempts to titleize the first word of the sentence, but `random_choices` is returning a tuple which is immutable. \r\n\r\nThis also occurs sometimes with 2 or 3 length sentences, although I presume that under the hood they are randomly selecting sentences of length 1.\r\n\r\nThis works as expected in Faker version 5.4.1.\r\n\r\n### Steps to reproduce\r\n\r\n```\r\n>>> import faker\r\n>>> thing = faker.Faker()\r\n>>> print(thing.sentence(1))\r\n```\r\n\r\n### Expected behavior\r\n\r\nA random titleized word is printed with a fullstop\r\n\r\n### Actual behavior\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<input>\", line 1, in <module>\r\n File \"~/.../venv/lib/python3.6/site-packages/faker/providers/lorem/__init__.py\", line 86, in sentence\r\n words[0] = words[0].title()\r\nTypeError: 'tuple' object does not support item assignment\r\n```\r\n\n", "before_files": [{"content": "from .. import BaseProvider\n\nlocalized = True\n\n# 'Latin' is the default locale\ndefault_locale = 'la'\n\n\nclass Provider(BaseProvider):\n \"\"\"Implement default lorem provider for Faker.\n\n .. important::\n The default locale of the lorem provider is ``la``. When using a locale\n without a localized lorem provider, the ``la`` lorem provider will be\n used, so generated words will be in pseudo-Latin. The locale used for\n the standard provider docs was ``en_US``, and ``en_US`` has a localized\n lorem provider which is why the samples here show words in American\n English.\n \"\"\"\n\n word_connector = ' '\n sentence_punctuation = '.'\n\n def words(self, nb=3, ext_word_list=None, unique=False):\n \"\"\"Generate a list of words.\n\n The ``nb`` argument controls the number of words in the resulting list,\n and if ``ext_word_list`` is provided, words from that list will be used\n instead of those from the locale provider's built-in word list.\n\n If ``unique`` is ``True``, this method will return a list containing\n unique words. Under the hood, |random_sample| will be used for sampling\n without replacement. If ``unique`` is ``False``, |random_choices| is\n used instead, and the list returned may contain duplicates.\n\n .. warning::\n Depending on the length of a locale provider's built-in word list or\n on the length of ``ext_word_list`` if provided, a large ``nb`` can\n exhaust said lists if ``unique`` is ``True``, raising an exception.\n\n :sample:\n :sample: nb=5\n :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n :sample: nb=4, ext_word_list=['abc', 'def', 'ghi', 'jkl'], unique=True\n \"\"\"\n word_list = ext_word_list if ext_word_list else self.word_list\n if unique:\n return self.random_sample(word_list, length=nb)\n return self.random_choices(word_list, length=nb)\n\n def word(self, ext_word_list=None):\n \"\"\"Generate a word.\n\n This method uses |words| under the hood with the ``nb`` argument set to\n ``1`` to generate the result.\n\n :sample:\n :sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n return self.words(1, ext_word_list)[0]\n\n def sentence(self, nb_words=6, variable_nb_words=True, ext_word_list=None):\n \"\"\"Generate a sentence.\n\n The ``nb_words`` argument controls how many words the sentence will\n contain, and setting ``variable_nb_words`` to ``False`` will generate\n the exact amount, while setting it to ``True`` (default) will generate\n a random amount (+/-40%, minimum of 1) using |randomize_nb_elements|.\n\n Under the hood, |words| is used to generate the words, so the argument\n ``ext_word_list`` works in the same way here as it would in that method.\n\n :sample: nb_words=10\n :sample: nb_words=10, variable_nb_words=False\n :sample: nb_words=10, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n :sample: nb_words=10, variable_nb_words=True,\n ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n if nb_words <= 0:\n return ''\n\n if variable_nb_words:\n nb_words = self.randomize_nb_elements(nb_words, min=1)\n\n words = self.words(nb=nb_words, ext_word_list=ext_word_list)\n words[0] = words[0].title()\n\n return self.word_connector.join(words) + self.sentence_punctuation\n\n def sentences(self, nb=3, ext_word_list=None):\n \"\"\"Generate a list of sentences.\n\n This method uses |sentence| under the hood to generate sentences, and\n the ``nb`` argument controls exactly how many sentences the list will\n contain. The ``ext_word_list`` argument works in exactly the same way\n as well.\n\n :sample:\n :sample: nb=5\n :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n return [self.sentence(ext_word_list=ext_word_list)\n for _ in range(0, nb)]\n\n def paragraph(\n self,\n nb_sentences=3,\n variable_nb_sentences=True,\n ext_word_list=None):\n \"\"\"Generate a paragraph.\n\n The ``nb_sentences`` argument controls how many sentences the paragraph\n will contain, and setting ``variable_nb_sentences`` to ``False`` will\n generate the exact amount, while setting it to ``True`` (default) will\n generate a random amount (+/-40%, minimum of 1) using\n |randomize_nb_elements|.\n\n Under the hood, |sentences| is used to generate the sentences, so the\n argument ``ext_word_list`` works in the same way here as it would in\n that method.\n\n :sample: nb_sentences=5\n :sample: nb_sentences=5, variable_nb_sentences=False\n :sample: nb_sentences=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n :sample: nb_sentences=5, variable_nb_sentences=False,\n ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n if nb_sentences <= 0:\n return ''\n\n if variable_nb_sentences:\n nb_sentences = self.randomize_nb_elements(nb_sentences, min=1)\n\n para = self.word_connector.join(self.sentences(\n nb_sentences, ext_word_list=ext_word_list,\n ))\n\n return para\n\n def paragraphs(self, nb=3, ext_word_list=None):\n \"\"\"Generate a list of paragraphs.\n\n This method uses |paragraph| under the hood to generate paragraphs, and\n the ``nb`` argument controls exactly how many sentences the list will\n contain. The ``ext_word_list`` argument works in exactly the same way\n as well.\n\n :sample: nb=5\n :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n return [self.paragraph(ext_word_list=ext_word_list)\n for _ in range(0, nb)]\n\n def text(self, max_nb_chars=200, ext_word_list=None):\n \"\"\"Generate a text string.\n\n The ``max_nb_chars`` argument controls the approximate number of\n characters the text string will have, and depending on its value, this\n method may use either |words|, |sentences|, or |paragraphs| for text\n generation. The ``ext_word_list`` argument works in exactly the same way\n it would in any of those methods.\n\n :sample: max_nb_chars=20\n :sample: max_nb_chars=80\n :sample: max_nb_chars=160\n :sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n text = []\n if max_nb_chars < 5:\n raise ValueError(\n 'text() can only generate text of at least 5 characters')\n\n if max_nb_chars < 25:\n # join words\n while not text:\n size = 0\n # determine how many words are needed to reach the $max_nb_chars\n # once;\n while size < max_nb_chars:\n word = (self.word_connector if size else '') + \\\n self.word(ext_word_list=ext_word_list)\n text.append(word)\n size += len(word)\n text.pop()\n text[0] = text[0][0].upper() + text[0][1:]\n last_index = len(text) - 1\n text[last_index] += self.sentence_punctuation\n elif max_nb_chars < 100:\n # join sentences\n while not text:\n size = 0\n # determine how many sentences are needed to reach the\n # $max_nb_chars once\n while size < max_nb_chars:\n sentence = (self.word_connector if size else '') + \\\n self.sentence(ext_word_list=ext_word_list)\n text.append(sentence)\n size += len(sentence)\n text.pop()\n else:\n # join paragraphs\n while not text:\n size = 0\n # determine how many paragraphs are needed to reach the\n # $max_nb_chars once\n while size < max_nb_chars:\n paragraph = ('\\n' if size else '') + \\\n self.paragraph(ext_word_list=ext_word_list)\n text.append(paragraph)\n size += len(paragraph)\n text.pop()\n\n return \"\".join(text)\n\n def texts(self, nb_texts=3, max_nb_chars=200, ext_word_list=None):\n \"\"\"Generate a list of text strings.\n\n The ``nb_texts`` argument controls how many text strings the list will\n contain, and this method uses |text| under the hood for text generation,\n so the two remaining arguments, ``max_nb_chars`` and ``ext_word_list``\n will work in exactly the same way as well.\n\n :sample: nb_texts=5\n :sample: nb_texts=5, max_nb_chars=50\n :sample: nb_texts=5, max_nb_chars=50,\n ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n return [self.text(max_nb_chars, ext_word_list)\n for _ in range(0, nb_texts)]\n", "path": "faker/providers/lorem/__init__.py"}], "after_files": [{"content": "from .. import BaseProvider\n\nlocalized = True\n\n# 'Latin' is the default locale\ndefault_locale = 'la'\n\n\nclass Provider(BaseProvider):\n \"\"\"Implement default lorem provider for Faker.\n\n .. important::\n The default locale of the lorem provider is ``la``. When using a locale\n without a localized lorem provider, the ``la`` lorem provider will be\n used, so generated words will be in pseudo-Latin. The locale used for\n the standard provider docs was ``en_US``, and ``en_US`` has a localized\n lorem provider which is why the samples here show words in American\n English.\n \"\"\"\n\n word_connector = ' '\n sentence_punctuation = '.'\n\n def words(self, nb=3, ext_word_list=None, unique=False):\n \"\"\"Generate a tuple of words.\n\n The ``nb`` argument controls the number of words in the resulting list,\n and if ``ext_word_list`` is provided, words from that list will be used\n instead of those from the locale provider's built-in word list.\n\n If ``unique`` is ``True``, this method will return a list containing\n unique words. Under the hood, |random_sample| will be used for sampling\n without replacement. If ``unique`` is ``False``, |random_choices| is\n used instead, and the list returned may contain duplicates.\n\n .. warning::\n Depending on the length of a locale provider's built-in word list or\n on the length of ``ext_word_list`` if provided, a large ``nb`` can\n exhaust said lists if ``unique`` is ``True``, raising an exception.\n\n :sample:\n :sample: nb=5\n :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n :sample: nb=4, ext_word_list=['abc', 'def', 'ghi', 'jkl'], unique=True\n \"\"\"\n word_list = ext_word_list if ext_word_list else self.word_list\n if unique:\n return self.random_sample(word_list, length=nb)\n return self.random_choices(word_list, length=nb)\n\n def word(self, ext_word_list=None):\n \"\"\"Generate a word.\n\n This method uses |words| under the hood with the ``nb`` argument set to\n ``1`` to generate the result.\n\n :sample:\n :sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n return self.words(1, ext_word_list)[0]\n\n def sentence(self, nb_words=6, variable_nb_words=True, ext_word_list=None):\n \"\"\"Generate a sentence.\n\n The ``nb_words`` argument controls how many words the sentence will\n contain, and setting ``variable_nb_words`` to ``False`` will generate\n the exact amount, while setting it to ``True`` (default) will generate\n a random amount (+/-40%, minimum of 1) using |randomize_nb_elements|.\n\n Under the hood, |words| is used to generate the words, so the argument\n ``ext_word_list`` works in the same way here as it would in that method.\n\n :sample: nb_words=10\n :sample: nb_words=10, variable_nb_words=False\n :sample: nb_words=10, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n :sample: nb_words=10, variable_nb_words=True,\n ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n if nb_words <= 0:\n return ''\n\n if variable_nb_words:\n nb_words = self.randomize_nb_elements(nb_words, min=1)\n\n words = list(self.words(nb=nb_words, ext_word_list=ext_word_list))\n words[0] = words[0].title()\n\n return self.word_connector.join(words) + self.sentence_punctuation\n\n def sentences(self, nb=3, ext_word_list=None):\n \"\"\"Generate a list of sentences.\n\n This method uses |sentence| under the hood to generate sentences, and\n the ``nb`` argument controls exactly how many sentences the list will\n contain. The ``ext_word_list`` argument works in exactly the same way\n as well.\n\n :sample:\n :sample: nb=5\n :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n return [self.sentence(ext_word_list=ext_word_list)\n for _ in range(0, nb)]\n\n def paragraph(\n self,\n nb_sentences=3,\n variable_nb_sentences=True,\n ext_word_list=None):\n \"\"\"Generate a paragraph.\n\n The ``nb_sentences`` argument controls how many sentences the paragraph\n will contain, and setting ``variable_nb_sentences`` to ``False`` will\n generate the exact amount, while setting it to ``True`` (default) will\n generate a random amount (+/-40%, minimum of 1) using\n |randomize_nb_elements|.\n\n Under the hood, |sentences| is used to generate the sentences, so the\n argument ``ext_word_list`` works in the same way here as it would in\n that method.\n\n :sample: nb_sentences=5\n :sample: nb_sentences=5, variable_nb_sentences=False\n :sample: nb_sentences=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n :sample: nb_sentences=5, variable_nb_sentences=False,\n ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n if nb_sentences <= 0:\n return ''\n\n if variable_nb_sentences:\n nb_sentences = self.randomize_nb_elements(nb_sentences, min=1)\n\n para = self.word_connector.join(self.sentences(\n nb_sentences, ext_word_list=ext_word_list,\n ))\n\n return para\n\n def paragraphs(self, nb=3, ext_word_list=None):\n \"\"\"Generate a list of paragraphs.\n\n This method uses |paragraph| under the hood to generate paragraphs, and\n the ``nb`` argument controls exactly how many sentences the list will\n contain. The ``ext_word_list`` argument works in exactly the same way\n as well.\n\n :sample: nb=5\n :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n return [self.paragraph(ext_word_list=ext_word_list)\n for _ in range(0, nb)]\n\n def text(self, max_nb_chars=200, ext_word_list=None):\n \"\"\"Generate a text string.\n\n The ``max_nb_chars`` argument controls the approximate number of\n characters the text string will have, and depending on its value, this\n method may use either |words|, |sentences|, or |paragraphs| for text\n generation. The ``ext_word_list`` argument works in exactly the same way\n it would in any of those methods.\n\n :sample: max_nb_chars=20\n :sample: max_nb_chars=80\n :sample: max_nb_chars=160\n :sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n text = []\n if max_nb_chars < 5:\n raise ValueError(\n 'text() can only generate text of at least 5 characters')\n\n if max_nb_chars < 25:\n # join words\n while not text:\n size = 0\n # determine how many words are needed to reach the $max_nb_chars\n # once;\n while size < max_nb_chars:\n word = (self.word_connector if size else '') + \\\n self.word(ext_word_list=ext_word_list)\n text.append(word)\n size += len(word)\n text.pop()\n text[0] = text[0][0].upper() + text[0][1:]\n last_index = len(text) - 1\n text[last_index] += self.sentence_punctuation\n elif max_nb_chars < 100:\n # join sentences\n while not text:\n size = 0\n # determine how many sentences are needed to reach the\n # $max_nb_chars once\n while size < max_nb_chars:\n sentence = (self.word_connector if size else '') + \\\n self.sentence(ext_word_list=ext_word_list)\n text.append(sentence)\n size += len(sentence)\n text.pop()\n else:\n # join paragraphs\n while not text:\n size = 0\n # determine how many paragraphs are needed to reach the\n # $max_nb_chars once\n while size < max_nb_chars:\n paragraph = ('\\n' if size else '') + \\\n self.paragraph(ext_word_list=ext_word_list)\n text.append(paragraph)\n size += len(paragraph)\n text.pop()\n\n return \"\".join(text)\n\n def texts(self, nb_texts=3, max_nb_chars=200, ext_word_list=None):\n \"\"\"Generate a list of text strings.\n\n The ``nb_texts`` argument controls how many text strings the list will\n contain, and this method uses |text| under the hood for text generation,\n so the two remaining arguments, ``max_nb_chars`` and ``ext_word_list``\n will work in exactly the same way as well.\n\n :sample: nb_texts=5\n :sample: nb_texts=5, max_nb_chars=50\n :sample: nb_texts=5, max_nb_chars=50,\n ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n return [self.text(max_nb_chars, ext_word_list)\n for _ in range(0, nb_texts)]\n", "path": "faker/providers/lorem/__init__.py"}]} | 3,456 | 245 |
gh_patches_debug_6989 | rasdani/github-patches | git_diff | vyperlang__vyper-2533 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot access `.balance` of address constants
### What's your issue about?
If I try to cal the `balance` attribute of an address constant
```python
ADDR: constant(address) = 0x000000000000000000000000000000000000dEaD
@external
def foo() -> uint256:
return ADDR.balance
```
I get an unhandled typing exception
```python
...
~/.pyenv/versions/3.8.5/lib/python3.8/site-packages/vyper/parser/expr.py in __init__(self, node, context)
201 self.lll_node = fn()
202 if self.lll_node is None:
--> 203 raise TypeCheckFailure(f"{type(node).__name__} node did not produce LLL")
204
205 def parse_Int(self):
TypeCheckFailure: Name node did not produce LLL
This is an unhandled internal compiler error. Please create an issue on Github to notify the developers.
https://github.com/vyperlang/vyper/issues/new?template=bug.md
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vyper/ast/folding.py`
Content:
```
1 from decimal import Decimal
2 from typing import Union
3
4 from vyper.ast import nodes as vy_ast
5 from vyper.builtin_functions import DISPATCH_TABLE
6 from vyper.exceptions import UnfoldableNode
7
8 BUILTIN_CONSTANTS = {
9 "EMPTY_BYTES32": (
10 vy_ast.Hex,
11 "0x0000000000000000000000000000000000000000000000000000000000000000",
12 ), # NOQA: E501
13 "ZERO_ADDRESS": (vy_ast.Hex, "0x0000000000000000000000000000000000000000"),
14 "MAX_INT128": (vy_ast.Int, 2 ** 127 - 1),
15 "MIN_INT128": (vy_ast.Int, -(2 ** 127)),
16 "MAX_DECIMAL": (vy_ast.Decimal, Decimal(2 ** 127 - 1)),
17 "MIN_DECIMAL": (vy_ast.Decimal, Decimal(-(2 ** 127))),
18 "MAX_UINT256": (vy_ast.Int, 2 ** 256 - 1),
19 }
20
21
22 def fold(vyper_module: vy_ast.Module) -> None:
23 """
24 Perform literal folding operations on a Vyper AST.
25
26 Arguments
27 ---------
28 vyper_module : Module
29 Top-level Vyper AST node.
30 """
31 replace_builtin_constants(vyper_module)
32
33 changed_nodes = 1
34 while changed_nodes:
35 changed_nodes = 0
36 changed_nodes += replace_user_defined_constants(vyper_module)
37 changed_nodes += replace_literal_ops(vyper_module)
38 changed_nodes += replace_subscripts(vyper_module)
39 changed_nodes += replace_builtin_functions(vyper_module)
40
41
42 def replace_literal_ops(vyper_module: vy_ast.Module) -> int:
43 """
44 Find and evaluate operation and comparison nodes within the Vyper AST,
45 replacing them with Constant nodes where possible.
46
47 Arguments
48 ---------
49 vyper_module : Module
50 Top-level Vyper AST node.
51
52 Returns
53 -------
54 int
55 Number of nodes that were replaced.
56 """
57 changed_nodes = 0
58
59 node_types = (vy_ast.BoolOp, vy_ast.BinOp, vy_ast.UnaryOp, vy_ast.Compare)
60 for node in vyper_module.get_descendants(node_types, reverse=True):
61 try:
62 new_node = node.evaluate()
63 except UnfoldableNode:
64 continue
65
66 changed_nodes += 1
67 vyper_module.replace_in_tree(node, new_node)
68
69 return changed_nodes
70
71
72 def replace_subscripts(vyper_module: vy_ast.Module) -> int:
73 """
74 Find and evaluate Subscript nodes within the Vyper AST, replacing them with
75 Constant nodes where possible.
76
77 Arguments
78 ---------
79 vyper_module : Module
80 Top-level Vyper AST node.
81
82 Returns
83 -------
84 int
85 Number of nodes that were replaced.
86 """
87 changed_nodes = 0
88
89 for node in vyper_module.get_descendants(vy_ast.Subscript, reverse=True):
90 try:
91 new_node = node.evaluate()
92 except UnfoldableNode:
93 continue
94
95 changed_nodes += 1
96 vyper_module.replace_in_tree(node, new_node)
97
98 return changed_nodes
99
100
101 def replace_builtin_functions(vyper_module: vy_ast.Module) -> int:
102 """
103 Find and evaluate builtin function calls within the Vyper AST, replacing
104 them with Constant nodes where possible.
105
106 Arguments
107 ---------
108 vyper_module : Module
109 Top-level Vyper AST node.
110
111 Returns
112 -------
113 int
114 Number of nodes that were replaced.
115 """
116 changed_nodes = 0
117
118 for node in vyper_module.get_descendants(vy_ast.Call, reverse=True):
119 if not isinstance(node.func, vy_ast.Name):
120 continue
121
122 name = node.func.id
123 func = DISPATCH_TABLE.get(name)
124 if func is None or not hasattr(func, "evaluate"):
125 continue
126 try:
127 new_node = func.evaluate(node) # type: ignore
128 except UnfoldableNode:
129 continue
130
131 changed_nodes += 1
132 vyper_module.replace_in_tree(node, new_node)
133
134 return changed_nodes
135
136
137 def replace_builtin_constants(vyper_module: vy_ast.Module) -> None:
138 """
139 Replace references to builtin constants with their literal values.
140
141 Arguments
142 ---------
143 vyper_module : Module
144 Top-level Vyper AST node.
145 """
146 for name, (node, value) in BUILTIN_CONSTANTS.items():
147 replace_constant(vyper_module, name, node(value=value), True) # type: ignore
148
149
150 def replace_user_defined_constants(vyper_module: vy_ast.Module) -> int:
151 """
152 Find user-defined constant assignments, and replace references
153 to the constants with their literal values.
154
155 Arguments
156 ---------
157 vyper_module : Module
158 Top-level Vyper AST node.
159
160 Returns
161 -------
162 int
163 Number of nodes that were replaced.
164 """
165 changed_nodes = 0
166
167 for node in vyper_module.get_children(vy_ast.AnnAssign):
168 if not isinstance(node.target, vy_ast.Name):
169 # left-hand-side of assignment is not a variable
170 continue
171 if node.get("annotation.func.id") != "constant":
172 # annotation is not wrapped in `constant(...)`
173 continue
174
175 changed_nodes += replace_constant(vyper_module, node.target.id, node.value, False)
176
177 return changed_nodes
178
179
180 # TODO constant folding on log events
181
182
183 def _replace(old_node, new_node):
184 if isinstance(new_node, vy_ast.Constant):
185 return new_node.from_node(old_node, value=new_node.value)
186 elif isinstance(new_node, vy_ast.List):
187 list_values = [_replace(old_node, i) for i in new_node.elements]
188 return new_node.from_node(old_node, elements=list_values)
189 else:
190 raise UnfoldableNode
191
192
193 def replace_constant(
194 vyper_module: vy_ast.Module,
195 id_: str,
196 replacement_node: Union[vy_ast.Constant, vy_ast.List],
197 raise_on_error: bool,
198 ) -> int:
199 """
200 Replace references to a variable name with a literal value.
201
202 Arguments
203 ---------
204 vyper_module : Module
205 Module-level ast node to perform replacement in.
206 id_ : str
207 String representing the `.id` attribute of the node(s) to be replaced.
208 replacement_node : Constant | List
209 Vyper ast node representing the literal value to be substituted in.
210 raise_on_error: bool
211 Boolean indicating if `UnfoldableNode` exception should be raised or ignored.
212
213 Returns
214 -------
215 int
216 Number of nodes that were replaced.
217 """
218 changed_nodes = 0
219
220 for node in vyper_module.get_descendants(vy_ast.Name, {"id": id_}, reverse=True):
221 parent = node.get_ancestor()
222
223 if isinstance(parent, vy_ast.Attribute):
224 # do not replace attributes
225 continue
226 if isinstance(parent, vy_ast.Call) and node == parent.func:
227 # do not replace calls
228 continue
229
230 # do not replace dictionary keys
231 if isinstance(parent, vy_ast.Dict) and node in parent.keys:
232 continue
233
234 if not node.get_ancestor(vy_ast.Index):
235 # do not replace left-hand side of assignments
236 assign = node.get_ancestor((vy_ast.Assign, vy_ast.AnnAssign, vy_ast.AugAssign))
237
238 if assign and node in assign.target.get_descendants(include_self=True):
239 continue
240
241 try:
242 new_node = _replace(node, replacement_node)
243 except UnfoldableNode:
244 if raise_on_error:
245 raise
246 continue
247
248 changed_nodes += 1
249 vyper_module.replace_in_tree(node, new_node)
250
251 return changed_nodes
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vyper/ast/folding.py b/vyper/ast/folding.py
--- a/vyper/ast/folding.py
+++ b/vyper/ast/folding.py
@@ -220,9 +220,6 @@
for node in vyper_module.get_descendants(vy_ast.Name, {"id": id_}, reverse=True):
parent = node.get_ancestor()
- if isinstance(parent, vy_ast.Attribute):
- # do not replace attributes
- continue
if isinstance(parent, vy_ast.Call) and node == parent.func:
# do not replace calls
continue
| {"golden_diff": "diff --git a/vyper/ast/folding.py b/vyper/ast/folding.py\n--- a/vyper/ast/folding.py\n+++ b/vyper/ast/folding.py\n@@ -220,9 +220,6 @@\n for node in vyper_module.get_descendants(vy_ast.Name, {\"id\": id_}, reverse=True):\n parent = node.get_ancestor()\n \n- if isinstance(parent, vy_ast.Attribute):\n- # do not replace attributes\n- continue\n if isinstance(parent, vy_ast.Call) and node == parent.func:\n # do not replace calls\n continue\n", "issue": "Cannot access `.balance` of address constants\n### What's your issue about?\r\nIf I try to cal the `balance` attribute of an address constant\r\n```python\r\nADDR: constant(address) = 0x000000000000000000000000000000000000dEaD\r\n\r\n@external\r\ndef foo() -> uint256:\r\n return ADDR.balance\r\n```\r\n\r\nI get an unhandled typing exception\r\n```python\r\n...\r\n~/.pyenv/versions/3.8.5/lib/python3.8/site-packages/vyper/parser/expr.py in __init__(self, node, context)\r\n 201 self.lll_node = fn()\r\n 202 if self.lll_node is None:\r\n--> 203 raise TypeCheckFailure(f\"{type(node).__name__} node did not produce LLL\")\r\n 204 \r\n 205 def parse_Int(self):\r\n\r\nTypeCheckFailure: Name node did not produce LLL\r\n\r\nThis is an unhandled internal compiler error. Please create an issue on Github to notify the developers.\r\nhttps://github.com/vyperlang/vyper/issues/new?template=bug.md\r\n```\n", "before_files": [{"content": "from decimal import Decimal\nfrom typing import Union\n\nfrom vyper.ast import nodes as vy_ast\nfrom vyper.builtin_functions import DISPATCH_TABLE\nfrom vyper.exceptions import UnfoldableNode\n\nBUILTIN_CONSTANTS = {\n \"EMPTY_BYTES32\": (\n vy_ast.Hex,\n \"0x0000000000000000000000000000000000000000000000000000000000000000\",\n ), # NOQA: E501\n \"ZERO_ADDRESS\": (vy_ast.Hex, \"0x0000000000000000000000000000000000000000\"),\n \"MAX_INT128\": (vy_ast.Int, 2 ** 127 - 1),\n \"MIN_INT128\": (vy_ast.Int, -(2 ** 127)),\n \"MAX_DECIMAL\": (vy_ast.Decimal, Decimal(2 ** 127 - 1)),\n \"MIN_DECIMAL\": (vy_ast.Decimal, Decimal(-(2 ** 127))),\n \"MAX_UINT256\": (vy_ast.Int, 2 ** 256 - 1),\n}\n\n\ndef fold(vyper_module: vy_ast.Module) -> None:\n \"\"\"\n Perform literal folding operations on a Vyper AST.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n \"\"\"\n replace_builtin_constants(vyper_module)\n\n changed_nodes = 1\n while changed_nodes:\n changed_nodes = 0\n changed_nodes += replace_user_defined_constants(vyper_module)\n changed_nodes += replace_literal_ops(vyper_module)\n changed_nodes += replace_subscripts(vyper_module)\n changed_nodes += replace_builtin_functions(vyper_module)\n\n\ndef replace_literal_ops(vyper_module: vy_ast.Module) -> int:\n \"\"\"\n Find and evaluate operation and comparison nodes within the Vyper AST,\n replacing them with Constant nodes where possible.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n node_types = (vy_ast.BoolOp, vy_ast.BinOp, vy_ast.UnaryOp, vy_ast.Compare)\n for node in vyper_module.get_descendants(node_types, reverse=True):\n try:\n new_node = node.evaluate()\n except UnfoldableNode:\n continue\n\n changed_nodes += 1\n vyper_module.replace_in_tree(node, new_node)\n\n return changed_nodes\n\n\ndef replace_subscripts(vyper_module: vy_ast.Module) -> int:\n \"\"\"\n Find and evaluate Subscript nodes within the Vyper AST, replacing them with\n Constant nodes where possible.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n for node in vyper_module.get_descendants(vy_ast.Subscript, reverse=True):\n try:\n new_node = node.evaluate()\n except UnfoldableNode:\n continue\n\n changed_nodes += 1\n vyper_module.replace_in_tree(node, new_node)\n\n return changed_nodes\n\n\ndef replace_builtin_functions(vyper_module: vy_ast.Module) -> int:\n \"\"\"\n Find and evaluate builtin function calls within the Vyper AST, replacing\n them with Constant nodes where possible.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n for node in vyper_module.get_descendants(vy_ast.Call, reverse=True):\n if not isinstance(node.func, vy_ast.Name):\n continue\n\n name = node.func.id\n func = DISPATCH_TABLE.get(name)\n if func is None or not hasattr(func, \"evaluate\"):\n continue\n try:\n new_node = func.evaluate(node) # type: ignore\n except UnfoldableNode:\n continue\n\n changed_nodes += 1\n vyper_module.replace_in_tree(node, new_node)\n\n return changed_nodes\n\n\ndef replace_builtin_constants(vyper_module: vy_ast.Module) -> None:\n \"\"\"\n Replace references to builtin constants with their literal values.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n \"\"\"\n for name, (node, value) in BUILTIN_CONSTANTS.items():\n replace_constant(vyper_module, name, node(value=value), True) # type: ignore\n\n\ndef replace_user_defined_constants(vyper_module: vy_ast.Module) -> int:\n \"\"\"\n Find user-defined constant assignments, and replace references\n to the constants with their literal values.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n for node in vyper_module.get_children(vy_ast.AnnAssign):\n if not isinstance(node.target, vy_ast.Name):\n # left-hand-side of assignment is not a variable\n continue\n if node.get(\"annotation.func.id\") != \"constant\":\n # annotation is not wrapped in `constant(...)`\n continue\n\n changed_nodes += replace_constant(vyper_module, node.target.id, node.value, False)\n\n return changed_nodes\n\n\n# TODO constant folding on log events\n\n\ndef _replace(old_node, new_node):\n if isinstance(new_node, vy_ast.Constant):\n return new_node.from_node(old_node, value=new_node.value)\n elif isinstance(new_node, vy_ast.List):\n list_values = [_replace(old_node, i) for i in new_node.elements]\n return new_node.from_node(old_node, elements=list_values)\n else:\n raise UnfoldableNode\n\n\ndef replace_constant(\n vyper_module: vy_ast.Module,\n id_: str,\n replacement_node: Union[vy_ast.Constant, vy_ast.List],\n raise_on_error: bool,\n) -> int:\n \"\"\"\n Replace references to a variable name with a literal value.\n\n Arguments\n ---------\n vyper_module : Module\n Module-level ast node to perform replacement in.\n id_ : str\n String representing the `.id` attribute of the node(s) to be replaced.\n replacement_node : Constant | List\n Vyper ast node representing the literal value to be substituted in.\n raise_on_error: bool\n Boolean indicating if `UnfoldableNode` exception should be raised or ignored.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n for node in vyper_module.get_descendants(vy_ast.Name, {\"id\": id_}, reverse=True):\n parent = node.get_ancestor()\n\n if isinstance(parent, vy_ast.Attribute):\n # do not replace attributes\n continue\n if isinstance(parent, vy_ast.Call) and node == parent.func:\n # do not replace calls\n continue\n\n # do not replace dictionary keys\n if isinstance(parent, vy_ast.Dict) and node in parent.keys:\n continue\n\n if not node.get_ancestor(vy_ast.Index):\n # do not replace left-hand side of assignments\n assign = node.get_ancestor((vy_ast.Assign, vy_ast.AnnAssign, vy_ast.AugAssign))\n\n if assign and node in assign.target.get_descendants(include_self=True):\n continue\n\n try:\n new_node = _replace(node, replacement_node)\n except UnfoldableNode:\n if raise_on_error:\n raise\n continue\n\n changed_nodes += 1\n vyper_module.replace_in_tree(node, new_node)\n\n return changed_nodes\n", "path": "vyper/ast/folding.py"}], "after_files": [{"content": "from decimal import Decimal\nfrom typing import Union\n\nfrom vyper.ast import nodes as vy_ast\nfrom vyper.builtin_functions import DISPATCH_TABLE\nfrom vyper.exceptions import UnfoldableNode\n\nBUILTIN_CONSTANTS = {\n \"EMPTY_BYTES32\": (\n vy_ast.Hex,\n \"0x0000000000000000000000000000000000000000000000000000000000000000\",\n ), # NOQA: E501\n \"ZERO_ADDRESS\": (vy_ast.Hex, \"0x0000000000000000000000000000000000000000\"),\n \"MAX_INT128\": (vy_ast.Int, 2 ** 127 - 1),\n \"MIN_INT128\": (vy_ast.Int, -(2 ** 127)),\n \"MAX_DECIMAL\": (vy_ast.Decimal, Decimal(2 ** 127 - 1)),\n \"MIN_DECIMAL\": (vy_ast.Decimal, Decimal(-(2 ** 127))),\n \"MAX_UINT256\": (vy_ast.Int, 2 ** 256 - 1),\n}\n\n\ndef fold(vyper_module: vy_ast.Module) -> None:\n \"\"\"\n Perform literal folding operations on a Vyper AST.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n \"\"\"\n replace_builtin_constants(vyper_module)\n\n changed_nodes = 1\n while changed_nodes:\n changed_nodes = 0\n changed_nodes += replace_user_defined_constants(vyper_module)\n changed_nodes += replace_literal_ops(vyper_module)\n changed_nodes += replace_subscripts(vyper_module)\n changed_nodes += replace_builtin_functions(vyper_module)\n\n\ndef replace_literal_ops(vyper_module: vy_ast.Module) -> int:\n \"\"\"\n Find and evaluate operation and comparison nodes within the Vyper AST,\n replacing them with Constant nodes where possible.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n node_types = (vy_ast.BoolOp, vy_ast.BinOp, vy_ast.UnaryOp, vy_ast.Compare)\n for node in vyper_module.get_descendants(node_types, reverse=True):\n try:\n new_node = node.evaluate()\n except UnfoldableNode:\n continue\n\n changed_nodes += 1\n vyper_module.replace_in_tree(node, new_node)\n\n return changed_nodes\n\n\ndef replace_subscripts(vyper_module: vy_ast.Module) -> int:\n \"\"\"\n Find and evaluate Subscript nodes within the Vyper AST, replacing them with\n Constant nodes where possible.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n for node in vyper_module.get_descendants(vy_ast.Subscript, reverse=True):\n try:\n new_node = node.evaluate()\n except UnfoldableNode:\n continue\n\n changed_nodes += 1\n vyper_module.replace_in_tree(node, new_node)\n\n return changed_nodes\n\n\ndef replace_builtin_functions(vyper_module: vy_ast.Module) -> int:\n \"\"\"\n Find and evaluate builtin function calls within the Vyper AST, replacing\n them with Constant nodes where possible.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n for node in vyper_module.get_descendants(vy_ast.Call, reverse=True):\n if not isinstance(node.func, vy_ast.Name):\n continue\n\n name = node.func.id\n func = DISPATCH_TABLE.get(name)\n if func is None or not hasattr(func, \"evaluate\"):\n continue\n try:\n new_node = func.evaluate(node) # type: ignore\n except UnfoldableNode:\n continue\n\n changed_nodes += 1\n vyper_module.replace_in_tree(node, new_node)\n\n return changed_nodes\n\n\ndef replace_builtin_constants(vyper_module: vy_ast.Module) -> None:\n \"\"\"\n Replace references to builtin constants with their literal values.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n \"\"\"\n for name, (node, value) in BUILTIN_CONSTANTS.items():\n replace_constant(vyper_module, name, node(value=value), True) # type: ignore\n\n\ndef replace_user_defined_constants(vyper_module: vy_ast.Module) -> int:\n \"\"\"\n Find user-defined constant assignments, and replace references\n to the constants with their literal values.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n for node in vyper_module.get_children(vy_ast.AnnAssign):\n if not isinstance(node.target, vy_ast.Name):\n # left-hand-side of assignment is not a variable\n continue\n if node.get(\"annotation.func.id\") != \"constant\":\n # annotation is not wrapped in `constant(...)`\n continue\n\n changed_nodes += replace_constant(vyper_module, node.target.id, node.value, False)\n\n return changed_nodes\n\n\n# TODO constant folding on log events\n\n\ndef _replace(old_node, new_node):\n if isinstance(new_node, vy_ast.Constant):\n return new_node.from_node(old_node, value=new_node.value)\n elif isinstance(new_node, vy_ast.List):\n list_values = [_replace(old_node, i) for i in new_node.elements]\n return new_node.from_node(old_node, elements=list_values)\n else:\n raise UnfoldableNode\n\n\ndef replace_constant(\n vyper_module: vy_ast.Module,\n id_: str,\n replacement_node: Union[vy_ast.Constant, vy_ast.List],\n raise_on_error: bool,\n) -> int:\n \"\"\"\n Replace references to a variable name with a literal value.\n\n Arguments\n ---------\n vyper_module : Module\n Module-level ast node to perform replacement in.\n id_ : str\n String representing the `.id` attribute of the node(s) to be replaced.\n replacement_node : Constant | List\n Vyper ast node representing the literal value to be substituted in.\n raise_on_error: bool\n Boolean indicating if `UnfoldableNode` exception should be raised or ignored.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n for node in vyper_module.get_descendants(vy_ast.Name, {\"id\": id_}, reverse=True):\n parent = node.get_ancestor()\n\n if isinstance(parent, vy_ast.Call) and node == parent.func:\n # do not replace calls\n continue\n\n # do not replace dictionary keys\n if isinstance(parent, vy_ast.Dict) and node in parent.keys:\n continue\n\n if not node.get_ancestor(vy_ast.Index):\n # do not replace left-hand side of assignments\n assign = node.get_ancestor((vy_ast.Assign, vy_ast.AnnAssign, vy_ast.AugAssign))\n\n if assign and node in assign.target.get_descendants(include_self=True):\n continue\n\n try:\n new_node = _replace(node, replacement_node)\n except UnfoldableNode:\n if raise_on_error:\n raise\n continue\n\n changed_nodes += 1\n vyper_module.replace_in_tree(node, new_node)\n\n return changed_nodes\n", "path": "vyper/ast/folding.py"}]} | 2,937 | 134 |
gh_patches_debug_27102 | rasdani/github-patches | git_diff | fossasia__open-event-server-5627 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
copied events have no organizer
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
Currently, copied events have no associated event roles and are being showed with no organizers
**To Reproduce**
Steps to reproduce the behavior:
1. Go to any event
2. Click on copy
3. Go to Admin/Events
4. See error

**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
The copied event should also be organized by previous organiser himself
**Additional details (please complete the following information):**
- OS: [e.g. MacOS, Ubuntu, CentOS] Ubuntu
- Python Version [e.g. `3.5`, `3.6`] 3.5
**Additional context**
<!-- Add any other context about the problem here. -->
Working on it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/event_copy.py`
Content:
```
1 from flask import jsonify, Blueprint, abort, make_response
2 from sqlalchemy.orm import make_transient
3
4 from app.api.helpers.db import safe_query
5 from app.api.helpers.files import create_save_resized_image
6 from app.api.helpers.permission_manager import has_access
7 from app.models.custom_form import CustomForms
8 from app.models.discount_code import DiscountCode
9 from app.models.event import Event, get_new_event_identifier
10 from app.models import db
11 from app.models.microlocation import Microlocation
12 from app.models.social_link import SocialLink
13 from app.models.speakers_call import SpeakersCall
14 from app.models.sponsor import Sponsor
15 from app.models.ticket import Ticket
16 from app.models.track import Track
17
18 event_copy = Blueprint('event_copy', __name__, url_prefix='/v1/events')
19
20
21 @event_copy.route('/<identifier>/copy', methods=['POST'])
22 def create_event_copy(identifier):
23 id = 'identifier'
24
25 if identifier.isdigit():
26 id = 'id'
27
28 event = safe_query(db, Event, id, identifier, 'event_' + id)
29
30 if not has_access('is_coorganizer', event_id=event.id):
31 return abort(
32 make_response(jsonify(error="Access Forbidden"), 403)
33 )
34 tickets = Ticket.query.filter_by(event_id=event.id).all()
35 social_links = SocialLink.query.filter_by(event_id=event.id).all()
36 sponsors = Sponsor.query.filter_by(event_id=event.id).all()
37 microlocations = Microlocation.query.filter_by(event_id=event.id).all()
38 tracks = Track.query.filter_by(event_id=event.id).all()
39 custom_forms = CustomForms.query.filter_by(event_id=event.id).all()
40 discount_codes = DiscountCode.query.filter_by(event_id=event.id).all()
41 speaker_calls = SpeakersCall.query.filter_by(event_id=event.id).all()
42
43 db.session.expunge(event) # expunge the object from session
44 make_transient(event)
45 delattr(event, 'id')
46 event.identifier = get_new_event_identifier()
47 db.session.add(event)
48 db.session.commit()
49
50 # Removes access_codes, order_tickets, ticket_tags for the new tickets created.
51 for ticket in tickets:
52 ticket_id = ticket.id
53 db.session.expunge(ticket) # expunge the object from session
54 make_transient(ticket)
55 ticket.event_id = event.id
56 delattr(ticket, 'id')
57 db.session.add(ticket)
58 db.session.commit()
59
60 for link in social_links:
61 link_id = link.id
62 db.session.expunge(link) # expunge the object from session
63 make_transient(link)
64 link.event_id = event.id
65 delattr(link, 'id')
66 db.session.add(link)
67 db.session.commit()
68
69 for sponsor in sponsors:
70 sponsor_id = sponsor.id
71 db.session.expunge(sponsor) # expunge the object from session
72 make_transient(sponsor)
73 sponsor.event_id = event.id
74 logo_url = create_save_resized_image(image_file=sponsor.logo_url, resize=False)
75 delattr(sponsor, 'id')
76 sponsor.logo_url = logo_url
77 db.session.add(sponsor)
78 db.session.commit()
79
80 for location in microlocations:
81 location_id = location.id
82 db.session.expunge(location) # expunge the object from session
83 make_transient(location)
84 location.event_id = event.id
85 delattr(location, 'id')
86 db.session.add(location)
87 db.session.commit()
88
89 # No sessions are copied for new tracks
90 for track in tracks:
91 track_id = track.id
92 db.session.expunge(track) # expunge the object from session
93 make_transient(track)
94 track.event_id = event.id
95 delattr(track, 'id')
96 db.session.add(track)
97 db.session.commit()
98
99 for call in speaker_calls:
100 call_id = call.id
101 db.session.expunge(call) # expunge the object from session
102 make_transient(call)
103 call.event_id = event.id
104 delattr(call, 'id')
105 db.session.add(call)
106 db.session.commit()
107
108 for code in discount_codes:
109 code_id = code.id
110 db.session.expunge(code) # expunge the object from session
111 make_transient(code)
112 code.event_id = event.id
113 delattr(code, 'id')
114 db.session.add(code)
115 db.session.commit()
116
117 for form in custom_forms:
118 form_id = form.id
119 db.session.expunge(form) # expunge the object from session
120 make_transient(form)
121 form.event_id = event.id
122 delattr(form, 'id')
123 db.session.add(form)
124 db.session.commit()
125
126 return jsonify({
127 'id': event.id,
128 'identifier': event.identifier,
129 "copied": True
130 })
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/event_copy.py b/app/api/event_copy.py
--- a/app/api/event_copy.py
+++ b/app/api/event_copy.py
@@ -14,6 +14,7 @@
from app.models.sponsor import Sponsor
from app.models.ticket import Ticket
from app.models.track import Track
+from app.models.users_events_role import UsersEventsRoles
event_copy = Blueprint('event_copy', __name__, url_prefix='/v1/events')
@@ -39,6 +40,7 @@
custom_forms = CustomForms.query.filter_by(event_id=event.id).all()
discount_codes = DiscountCode.query.filter_by(event_id=event.id).all()
speaker_calls = SpeakersCall.query.filter_by(event_id=event.id).all()
+ user_event_roles = UsersEventsRoles.query.filter_by(event_id=event.id).all()
db.session.expunge(event) # expunge the object from session
make_transient(event)
@@ -123,6 +125,14 @@
db.session.add(form)
db.session.commit()
+ for user_role in user_event_roles:
+ db.session.expunge(user_role)
+ make_transient(user_role)
+ user_role.event_id = event.id
+ delattr(user_role, 'id')
+ db.session.add(user_role)
+ db.session.commit()
+
return jsonify({
'id': event.id,
'identifier': event.identifier,
| {"golden_diff": "diff --git a/app/api/event_copy.py b/app/api/event_copy.py\n--- a/app/api/event_copy.py\n+++ b/app/api/event_copy.py\n@@ -14,6 +14,7 @@\n from app.models.sponsor import Sponsor\n from app.models.ticket import Ticket\n from app.models.track import Track\n+from app.models.users_events_role import UsersEventsRoles\n \n event_copy = Blueprint('event_copy', __name__, url_prefix='/v1/events')\n \n@@ -39,6 +40,7 @@\n custom_forms = CustomForms.query.filter_by(event_id=event.id).all()\n discount_codes = DiscountCode.query.filter_by(event_id=event.id).all()\n speaker_calls = SpeakersCall.query.filter_by(event_id=event.id).all()\n+ user_event_roles = UsersEventsRoles.query.filter_by(event_id=event.id).all()\n \n db.session.expunge(event) # expunge the object from session\n make_transient(event)\n@@ -123,6 +125,14 @@\n db.session.add(form)\n db.session.commit()\n \n+ for user_role in user_event_roles:\n+ db.session.expunge(user_role)\n+ make_transient(user_role)\n+ user_role.event_id = event.id\n+ delattr(user_role, 'id')\n+ db.session.add(user_role)\n+ db.session.commit()\n+\n return jsonify({\n 'id': event.id,\n 'identifier': event.identifier,\n", "issue": "copied events have no organizer\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nCurrently, copied events have no associated event roles and are being showed with no organizers\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to any event\r\n2. Click on copy\r\n3. Go to Admin/Events \r\n4. See error\r\n\r\n\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe copied event should also be organized by previous organiser himself\r\n\r\n**Additional details (please complete the following information):**\r\n - OS: [e.g. MacOS, Ubuntu, CentOS] Ubuntu\r\n - Python Version [e.g. `3.5`, `3.6`] 3.5\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\nWorking on it\n", "before_files": [{"content": "from flask import jsonify, Blueprint, abort, make_response\nfrom sqlalchemy.orm import make_transient\n\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.files import create_save_resized_image\nfrom app.api.helpers.permission_manager import has_access\nfrom app.models.custom_form import CustomForms\nfrom app.models.discount_code import DiscountCode\nfrom app.models.event import Event, get_new_event_identifier\nfrom app.models import db\nfrom app.models.microlocation import Microlocation\nfrom app.models.social_link import SocialLink\nfrom app.models.speakers_call import SpeakersCall\nfrom app.models.sponsor import Sponsor\nfrom app.models.ticket import Ticket\nfrom app.models.track import Track\n\nevent_copy = Blueprint('event_copy', __name__, url_prefix='/v1/events')\n\n\n@event_copy.route('/<identifier>/copy', methods=['POST'])\ndef create_event_copy(identifier):\n id = 'identifier'\n\n if identifier.isdigit():\n id = 'id'\n\n event = safe_query(db, Event, id, identifier, 'event_' + id)\n\n if not has_access('is_coorganizer', event_id=event.id):\n return abort(\n make_response(jsonify(error=\"Access Forbidden\"), 403)\n )\n tickets = Ticket.query.filter_by(event_id=event.id).all()\n social_links = SocialLink.query.filter_by(event_id=event.id).all()\n sponsors = Sponsor.query.filter_by(event_id=event.id).all()\n microlocations = Microlocation.query.filter_by(event_id=event.id).all()\n tracks = Track.query.filter_by(event_id=event.id).all()\n custom_forms = CustomForms.query.filter_by(event_id=event.id).all()\n discount_codes = DiscountCode.query.filter_by(event_id=event.id).all()\n speaker_calls = SpeakersCall.query.filter_by(event_id=event.id).all()\n\n db.session.expunge(event) # expunge the object from session\n make_transient(event)\n delattr(event, 'id')\n event.identifier = get_new_event_identifier()\n db.session.add(event)\n db.session.commit()\n\n # Removes access_codes, order_tickets, ticket_tags for the new tickets created.\n for ticket in tickets:\n ticket_id = ticket.id\n db.session.expunge(ticket) # expunge the object from session\n make_transient(ticket)\n ticket.event_id = event.id\n delattr(ticket, 'id')\n db.session.add(ticket)\n db.session.commit()\n\n for link in social_links:\n link_id = link.id\n db.session.expunge(link) # expunge the object from session\n make_transient(link)\n link.event_id = event.id\n delattr(link, 'id')\n db.session.add(link)\n db.session.commit()\n\n for sponsor in sponsors:\n sponsor_id = sponsor.id\n db.session.expunge(sponsor) # expunge the object from session\n make_transient(sponsor)\n sponsor.event_id = event.id\n logo_url = create_save_resized_image(image_file=sponsor.logo_url, resize=False)\n delattr(sponsor, 'id')\n sponsor.logo_url = logo_url\n db.session.add(sponsor)\n db.session.commit()\n\n for location in microlocations:\n location_id = location.id\n db.session.expunge(location) # expunge the object from session\n make_transient(location)\n location.event_id = event.id\n delattr(location, 'id')\n db.session.add(location)\n db.session.commit()\n\n # No sessions are copied for new tracks\n for track in tracks:\n track_id = track.id\n db.session.expunge(track) # expunge the object from session\n make_transient(track)\n track.event_id = event.id\n delattr(track, 'id')\n db.session.add(track)\n db.session.commit()\n\n for call in speaker_calls:\n call_id = call.id\n db.session.expunge(call) # expunge the object from session\n make_transient(call)\n call.event_id = event.id\n delattr(call, 'id')\n db.session.add(call)\n db.session.commit()\n\n for code in discount_codes:\n code_id = code.id\n db.session.expunge(code) # expunge the object from session\n make_transient(code)\n code.event_id = event.id\n delattr(code, 'id')\n db.session.add(code)\n db.session.commit()\n\n for form in custom_forms:\n form_id = form.id\n db.session.expunge(form) # expunge the object from session\n make_transient(form)\n form.event_id = event.id\n delattr(form, 'id')\n db.session.add(form)\n db.session.commit()\n\n return jsonify({\n 'id': event.id,\n 'identifier': event.identifier,\n \"copied\": True\n })\n", "path": "app/api/event_copy.py"}], "after_files": [{"content": "from flask import jsonify, Blueprint, abort, make_response\nfrom sqlalchemy.orm import make_transient\n\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.files import create_save_resized_image\nfrom app.api.helpers.permission_manager import has_access\nfrom app.models.custom_form import CustomForms\nfrom app.models.discount_code import DiscountCode\nfrom app.models.event import Event, get_new_event_identifier\nfrom app.models import db\nfrom app.models.microlocation import Microlocation\nfrom app.models.social_link import SocialLink\nfrom app.models.speakers_call import SpeakersCall\nfrom app.models.sponsor import Sponsor\nfrom app.models.ticket import Ticket\nfrom app.models.track import Track\nfrom app.models.users_events_role import UsersEventsRoles\n\nevent_copy = Blueprint('event_copy', __name__, url_prefix='/v1/events')\n\n\n@event_copy.route('/<identifier>/copy', methods=['POST'])\ndef create_event_copy(identifier):\n id = 'identifier'\n\n if identifier.isdigit():\n id = 'id'\n\n event = safe_query(db, Event, id, identifier, 'event_' + id)\n\n if not has_access('is_coorganizer', event_id=event.id):\n return abort(\n make_response(jsonify(error=\"Access Forbidden\"), 403)\n )\n tickets = Ticket.query.filter_by(event_id=event.id).all()\n social_links = SocialLink.query.filter_by(event_id=event.id).all()\n sponsors = Sponsor.query.filter_by(event_id=event.id).all()\n microlocations = Microlocation.query.filter_by(event_id=event.id).all()\n tracks = Track.query.filter_by(event_id=event.id).all()\n custom_forms = CustomForms.query.filter_by(event_id=event.id).all()\n discount_codes = DiscountCode.query.filter_by(event_id=event.id).all()\n speaker_calls = SpeakersCall.query.filter_by(event_id=event.id).all()\n user_event_roles = UsersEventsRoles.query.filter_by(event_id=event.id).all()\n\n db.session.expunge(event) # expunge the object from session\n make_transient(event)\n delattr(event, 'id')\n event.identifier = get_new_event_identifier()\n db.session.add(event)\n db.session.commit()\n\n # Removes access_codes, order_tickets, ticket_tags for the new tickets created.\n for ticket in tickets:\n ticket_id = ticket.id\n db.session.expunge(ticket) # expunge the object from session\n make_transient(ticket)\n ticket.event_id = event.id\n delattr(ticket, 'id')\n db.session.add(ticket)\n db.session.commit()\n\n for link in social_links:\n link_id = link.id\n db.session.expunge(link) # expunge the object from session\n make_transient(link)\n link.event_id = event.id\n delattr(link, 'id')\n db.session.add(link)\n db.session.commit()\n\n for sponsor in sponsors:\n sponsor_id = sponsor.id\n db.session.expunge(sponsor) # expunge the object from session\n make_transient(sponsor)\n sponsor.event_id = event.id\n logo_url = create_save_resized_image(image_file=sponsor.logo_url, resize=False)\n delattr(sponsor, 'id')\n sponsor.logo_url = logo_url\n db.session.add(sponsor)\n db.session.commit()\n\n for location in microlocations:\n location_id = location.id\n db.session.expunge(location) # expunge the object from session\n make_transient(location)\n location.event_id = event.id\n delattr(location, 'id')\n db.session.add(location)\n db.session.commit()\n\n # No sessions are copied for new tracks\n for track in tracks:\n track_id = track.id\n db.session.expunge(track) # expunge the object from session\n make_transient(track)\n track.event_id = event.id\n delattr(track, 'id')\n db.session.add(track)\n db.session.commit()\n\n for call in speaker_calls:\n call_id = call.id\n db.session.expunge(call) # expunge the object from session\n make_transient(call)\n call.event_id = event.id\n delattr(call, 'id')\n db.session.add(call)\n db.session.commit()\n\n for code in discount_codes:\n code_id = code.id\n db.session.expunge(code) # expunge the object from session\n make_transient(code)\n code.event_id = event.id\n delattr(code, 'id')\n db.session.add(code)\n db.session.commit()\n\n for form in custom_forms:\n form_id = form.id\n db.session.expunge(form) # expunge the object from session\n make_transient(form)\n form.event_id = event.id\n delattr(form, 'id')\n db.session.add(form)\n db.session.commit()\n\n for user_role in user_event_roles:\n db.session.expunge(user_role)\n make_transient(user_role)\n user_role.event_id = event.id\n delattr(user_role, 'id')\n db.session.add(user_role)\n db.session.commit()\n\n return jsonify({\n 'id': event.id,\n 'identifier': event.identifier,\n \"copied\": True\n })\n", "path": "app/api/event_copy.py"}]} | 1,821 | 304 |
gh_patches_debug_40419 | rasdani/github-patches | git_diff | cowrie__cowrie-1564 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/etc/shadow file contents are incorrect
**Describe the bug**
In the latest honeypot from master branch, a “cat /etc/shadow“ outputs a readme file instead of password contents. It looks the commit 937402ece56a4d272713ea38be32c6dc4191390a replaced the file contents.
**To Reproduce**
- enter honeypot as root
- Run: cat /etc/shadow
**Expected behavior**
Exoecting a shadow file format output, not readme
**Server (please complete the following information):**
- Using the docker image built on 14 may 2021
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup
4
5 setup(
6 name="Cowrie",
7 description="Cowrie SSH/Telnet Honeypot.",
8 long_description="Cowrie SSH/Telnet Honeypot.",
9 author="Michel Oosterhof",
10 author_email="[email protected]",
11 maintainer="Michel Oosterhof",
12 maintainer_email="[email protected]",
13 keywords="ssh telnet honeypot",
14 platforms="Unix, Mac OSX",
15 license="BSD",
16 url="https://www.cowrie.org/",
17 packages=["cowrie", "twisted"],
18 include_package_data=True,
19 package_dir={"": "src"},
20 package_data={"": ["*.md"]},
21 use_incremental=True,
22 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4",
23 scripts=["bin/fsctl", "bin/asciinema", "bin/cowrie", "bin/createfs", "bin/playlog"],
24 classifiers=[
25 "Development Status :: 5 - Production/Stable",
26 "Environment :: No Input/Output (Daemon)",
27 "Framework :: Twisted",
28 "Intended Audience :: Developers",
29 "Intended Audience :: System Administrators",
30 "License :: OSI Approved :: BSD License",
31 "Operating System :: MacOS :: MacOS X",
32 "Operating System :: POSIX :: Linux",
33 "Operating System :: POSIX",
34 "Programming Language :: Python",
35 "Topic :: Security",
36 ],
37 setup_requires=["incremental", "click"],
38 install_requires=[
39 "twisted>=17.1.0",
40 "cryptography>=0.9.1",
41 "configparser",
42 "pyopenssl",
43 "pyparsing",
44 "incremental",
45 "packaging",
46 "appdirs>=1.4.0",
47 "python-dateutil",
48 "service_identity>=14.0.0",
49 ],
50 extras_require={
51 "csirtg": ["csirtgsdk>=0.0.0a17"],
52 "dshield": ["requests"],
53 "elasticsearch": ["pyes"],
54 "mysql": ["mysqlclient"],
55 "mongodb": ["pymongo"],
56 "rethinkdblog": ["rethinkdb"],
57 "s3": ["botocore"],
58 "slack": ["slackclient"],
59 "influxdb": ["influxdb"],
60 },
61 )
62
```
Path: `src/cowrie/output/csirtg.py`
Content:
```
1 import os
2 from datetime import datetime
3
4 from csirtgsdk.client import Client
5 from csirtgsdk.indicator import Indicator
6
7 from twisted.python import log
8
9 import cowrie.core.output
10 from cowrie.core.config import CowrieConfig
11
12 USERNAME = os.environ.get("CSIRTG_USER")
13 FEED = os.environ.get("CSIRTG_FEED")
14 TOKEN = os.environ.get("CSIRG_TOKEN")
15 DESCRIPTION = os.environ.get("CSIRTG_DESCRIPTION", "random scanning activity")
16
17
18 class Output(cowrie.core.output.Output):
19 """
20 csirtg output
21 """
22
23 def start(
24 self,
25 ):
26 self.user = CowrieConfig.get("output_csirtg", "username") or USERNAME
27 self.feed = CowrieConfig.get("output_csirtg", "feed") or FEED
28 self.token = CowrieConfig.get("output_csirtg", "token") or TOKEN
29 self.description = CowrieConfig.get(
30 "output_csirtg", "description", fallback=DESCRIPTION
31 )
32 self.context = {}
33 self.client = Client(token=self.token)
34
35 def stop(self):
36 pass
37
38 def write(self, e):
39 peerIP = e["src_ip"]
40 ts = e["timestamp"]
41 system = e.get("system", None)
42
43 if system not in [
44 "cowrie.ssh.factory.CowrieSSHFactory",
45 "cowrie.telnet.transport.HoneyPotTelnetFactory",
46 ]:
47 return
48
49 today = str(datetime.now().date())
50
51 if not self.context.get(today):
52 self.context = {}
53 self.context[today] = set()
54
55 key = ",".join([peerIP, system])
56
57 if key in self.context[today]:
58 return
59
60 self.context[today].add(key)
61
62 tags = "scanner,ssh"
63 port = 22
64 if e["system"] == "cowrie.telnet.transport.HoneyPotTelnetFactory":
65 tags = "scanner,telnet"
66 port = 23
67
68 i = {
69 "user": self.user,
70 "feed": self.feed,
71 "indicator": peerIP,
72 "portlist": port,
73 "protocol": "tcp",
74 "tags": tags,
75 "firsttime": ts,
76 "lasttime": ts,
77 "description": self.description,
78 }
79
80 ret = Indicator(self.client, i).submit()
81 log.msg("logged to csirtg {} ".format(ret["location"]))
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@
],
setup_requires=["incremental", "click"],
install_requires=[
- "twisted>=17.1.0",
+ "twisted==21.1.0",
"cryptography>=0.9.1",
"configparser",
"pyopenssl",
@@ -48,7 +48,7 @@
"service_identity>=14.0.0",
],
extras_require={
- "csirtg": ["csirtgsdk>=0.0.0a17"],
+ "csirtg": ["csirtgsdk==1.1.5"],
"dshield": ["requests"],
"elasticsearch": ["pyes"],
"mysql": ["mysqlclient"],
diff --git a/src/cowrie/output/csirtg.py b/src/cowrie/output/csirtg.py
--- a/src/cowrie/output/csirtg.py
+++ b/src/cowrie/output/csirtg.py
@@ -1,41 +1,50 @@
import os
from datetime import datetime
-from csirtgsdk.client import Client
-from csirtgsdk.indicator import Indicator
-
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
-USERNAME = os.environ.get("CSIRTG_USER")
-FEED = os.environ.get("CSIRTG_FEED")
-TOKEN = os.environ.get("CSIRG_TOKEN")
-DESCRIPTION = os.environ.get("CSIRTG_DESCRIPTION", "random scanning activity")
+token = CowrieConfig.get("output_csirtg", "token", fallback="a1b2c3d4")
+if token == "a1b2c3d4":
+ log.msg("output_csirtg: token not found in configuration file")
+ exit(1)
+
+os.environ["CSIRTG_TOKEN"] = token
+import csirtgsdk # noqa: E402
class Output(cowrie.core.output.Output):
"""
- csirtg output
+ CSIRTG output
"""
- def start(
- self,
- ):
- self.user = CowrieConfig.get("output_csirtg", "username") or USERNAME
- self.feed = CowrieConfig.get("output_csirtg", "feed") or FEED
- self.token = CowrieConfig.get("output_csirtg", "token") or TOKEN
- self.description = CowrieConfig.get(
- "output_csirtg", "description", fallback=DESCRIPTION
- )
+ def start(self):
+ """
+ Start the output module.
+ Note that csirtsdk is imported here because it reads CSIRTG_TOKEN on import
+ Cowrie sets this environment variable.
+ """
+ self.user = CowrieConfig.get("output_csirtg", "username")
+ self.feed = CowrieConfig.get("output_csirtg", "feed")
+ self.debug = CowrieConfig.getboolean("output_csirtg", "debug", fallback=False)
+ self.description = CowrieConfig.get("output_csirtg", "description")
+
self.context = {}
- self.client = Client(token=self.token)
+ # self.client = csirtgsdk.client.Client()
def stop(self):
pass
def write(self, e):
+ """
+ Only pass on connection events
+ """
+ if e["eventid"] == "cowrie.session.connect":
+ self.submitIp(e)
+
+ def submitIp(self, e):
peerIP = e["src_ip"]
ts = e["timestamp"]
system = e.get("system", None)
@@ -77,5 +86,12 @@
"description": self.description,
}
- ret = Indicator(self.client, i).submit()
- log.msg("logged to csirtg {} ".format(ret["location"]))
+ if self.debug is True:
+ log.msg(f"output_csirtg: Submitting {i!r} to CSIRTG")
+
+ ind = csirtgsdk.indicator.Indicator(i).submit()
+
+ if self.debug is True:
+ log.msg(f"output_csirtg: Submitted {ind!r} to CSIRTG")
+
+ log.msg("output_csirtg: submitted to csirtg at {} ".format(ind["location"]))
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -36,7 +36,7 @@\n ],\n setup_requires=[\"incremental\", \"click\"],\n install_requires=[\n- \"twisted>=17.1.0\",\n+ \"twisted==21.1.0\",\n \"cryptography>=0.9.1\",\n \"configparser\",\n \"pyopenssl\",\n@@ -48,7 +48,7 @@\n \"service_identity>=14.0.0\",\n ],\n extras_require={\n- \"csirtg\": [\"csirtgsdk>=0.0.0a17\"],\n+ \"csirtg\": [\"csirtgsdk==1.1.5\"],\n \"dshield\": [\"requests\"],\n \"elasticsearch\": [\"pyes\"],\n \"mysql\": [\"mysqlclient\"],\ndiff --git a/src/cowrie/output/csirtg.py b/src/cowrie/output/csirtg.py\n--- a/src/cowrie/output/csirtg.py\n+++ b/src/cowrie/output/csirtg.py\n@@ -1,41 +1,50 @@\n import os\n from datetime import datetime\n \n-from csirtgsdk.client import Client\n-from csirtgsdk.indicator import Indicator\n-\n from twisted.python import log\n \n import cowrie.core.output\n from cowrie.core.config import CowrieConfig\n \n-USERNAME = os.environ.get(\"CSIRTG_USER\")\n-FEED = os.environ.get(\"CSIRTG_FEED\")\n-TOKEN = os.environ.get(\"CSIRG_TOKEN\")\n-DESCRIPTION = os.environ.get(\"CSIRTG_DESCRIPTION\", \"random scanning activity\")\n+token = CowrieConfig.get(\"output_csirtg\", \"token\", fallback=\"a1b2c3d4\")\n+if token == \"a1b2c3d4\":\n+ log.msg(\"output_csirtg: token not found in configuration file\")\n+ exit(1)\n+\n+os.environ[\"CSIRTG_TOKEN\"] = token\n+import csirtgsdk # noqa: E402\n \n \n class Output(cowrie.core.output.Output):\n \"\"\"\n- csirtg output\n+ CSIRTG output\n \"\"\"\n \n- def start(\n- self,\n- ):\n- self.user = CowrieConfig.get(\"output_csirtg\", \"username\") or USERNAME\n- self.feed = CowrieConfig.get(\"output_csirtg\", \"feed\") or FEED\n- self.token = CowrieConfig.get(\"output_csirtg\", \"token\") or TOKEN\n- self.description = CowrieConfig.get(\n- \"output_csirtg\", \"description\", fallback=DESCRIPTION\n- )\n+ def start(self):\n+ \"\"\"\n+ Start the output module.\n+ Note that csirtsdk is imported here because it reads CSIRTG_TOKEN on import\n+ Cowrie sets this environment variable.\n+ \"\"\"\n+ self.user = CowrieConfig.get(\"output_csirtg\", \"username\")\n+ self.feed = CowrieConfig.get(\"output_csirtg\", \"feed\")\n+ self.debug = CowrieConfig.getboolean(\"output_csirtg\", \"debug\", fallback=False)\n+ self.description = CowrieConfig.get(\"output_csirtg\", \"description\")\n+\n self.context = {}\n- self.client = Client(token=self.token)\n+ # self.client = csirtgsdk.client.Client()\n \n def stop(self):\n pass\n \n def write(self, e):\n+ \"\"\"\n+ Only pass on connection events\n+ \"\"\"\n+ if e[\"eventid\"] == \"cowrie.session.connect\":\n+ self.submitIp(e)\n+\n+ def submitIp(self, e):\n peerIP = e[\"src_ip\"]\n ts = e[\"timestamp\"]\n system = e.get(\"system\", None)\n@@ -77,5 +86,12 @@\n \"description\": self.description,\n }\n \n- ret = Indicator(self.client, i).submit()\n- log.msg(\"logged to csirtg {} \".format(ret[\"location\"]))\n+ if self.debug is True:\n+ log.msg(f\"output_csirtg: Submitting {i!r} to CSIRTG\")\n+\n+ ind = csirtgsdk.indicator.Indicator(i).submit()\n+\n+ if self.debug is True:\n+ log.msg(f\"output_csirtg: Submitted {ind!r} to CSIRTG\")\n+\n+ log.msg(\"output_csirtg: submitted to csirtg at {} \".format(ind[\"location\"]))\n", "issue": "/etc/shadow file contents are incorrect\n**Describe the bug**\r\nIn the latest honeypot from master branch, a \u201ccat /etc/shadow\u201c outputs a readme file instead of password contents. It looks the commit 937402ece56a4d272713ea38be32c6dc4191390a replaced the file contents.\r\n\r\n**To Reproduce**\r\n- enter honeypot as root\r\n- Run: cat /etc/shadow\r\n\r\n**Expected behavior**\r\nExoecting a shadow file format output, not readme\r\n\r\n**Server (please complete the following information):**\r\n - Using the docker image built on 14 may 2021\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nsetup(\n name=\"Cowrie\",\n description=\"Cowrie SSH/Telnet Honeypot.\",\n long_description=\"Cowrie SSH/Telnet Honeypot.\",\n author=\"Michel Oosterhof\",\n author_email=\"[email protected]\",\n maintainer=\"Michel Oosterhof\",\n maintainer_email=\"[email protected]\",\n keywords=\"ssh telnet honeypot\",\n platforms=\"Unix, Mac OSX\",\n license=\"BSD\",\n url=\"https://www.cowrie.org/\",\n packages=[\"cowrie\", \"twisted\"],\n include_package_data=True,\n package_dir={\"\": \"src\"},\n package_data={\"\": [\"*.md\"]},\n use_incremental=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4\",\n scripts=[\"bin/fsctl\", \"bin/asciinema\", \"bin/cowrie\", \"bin/createfs\", \"bin/playlog\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: No Input/Output (Daemon)\",\n \"Framework :: Twisted\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: POSIX\",\n \"Programming Language :: Python\",\n \"Topic :: Security\",\n ],\n setup_requires=[\"incremental\", \"click\"],\n install_requires=[\n \"twisted>=17.1.0\",\n \"cryptography>=0.9.1\",\n \"configparser\",\n \"pyopenssl\",\n \"pyparsing\",\n \"incremental\",\n \"packaging\",\n \"appdirs>=1.4.0\",\n \"python-dateutil\",\n \"service_identity>=14.0.0\",\n ],\n extras_require={\n \"csirtg\": [\"csirtgsdk>=0.0.0a17\"],\n \"dshield\": [\"requests\"],\n \"elasticsearch\": [\"pyes\"],\n \"mysql\": [\"mysqlclient\"],\n \"mongodb\": [\"pymongo\"],\n \"rethinkdblog\": [\"rethinkdb\"],\n \"s3\": [\"botocore\"],\n \"slack\": [\"slackclient\"],\n \"influxdb\": [\"influxdb\"],\n },\n)\n", "path": "setup.py"}, {"content": "import os\nfrom datetime import datetime\n\nfrom csirtgsdk.client import Client\nfrom csirtgsdk.indicator import Indicator\n\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CowrieConfig\n\nUSERNAME = os.environ.get(\"CSIRTG_USER\")\nFEED = os.environ.get(\"CSIRTG_FEED\")\nTOKEN = os.environ.get(\"CSIRG_TOKEN\")\nDESCRIPTION = os.environ.get(\"CSIRTG_DESCRIPTION\", \"random scanning activity\")\n\n\nclass Output(cowrie.core.output.Output):\n \"\"\"\n csirtg output\n \"\"\"\n\n def start(\n self,\n ):\n self.user = CowrieConfig.get(\"output_csirtg\", \"username\") or USERNAME\n self.feed = CowrieConfig.get(\"output_csirtg\", \"feed\") or FEED\n self.token = CowrieConfig.get(\"output_csirtg\", \"token\") or TOKEN\n self.description = CowrieConfig.get(\n \"output_csirtg\", \"description\", fallback=DESCRIPTION\n )\n self.context = {}\n self.client = Client(token=self.token)\n\n def stop(self):\n pass\n\n def write(self, e):\n peerIP = e[\"src_ip\"]\n ts = e[\"timestamp\"]\n system = e.get(\"system\", None)\n\n if system not in [\n \"cowrie.ssh.factory.CowrieSSHFactory\",\n \"cowrie.telnet.transport.HoneyPotTelnetFactory\",\n ]:\n return\n\n today = str(datetime.now().date())\n\n if not self.context.get(today):\n self.context = {}\n self.context[today] = set()\n\n key = \",\".join([peerIP, system])\n\n if key in self.context[today]:\n return\n\n self.context[today].add(key)\n\n tags = \"scanner,ssh\"\n port = 22\n if e[\"system\"] == \"cowrie.telnet.transport.HoneyPotTelnetFactory\":\n tags = \"scanner,telnet\"\n port = 23\n\n i = {\n \"user\": self.user,\n \"feed\": self.feed,\n \"indicator\": peerIP,\n \"portlist\": port,\n \"protocol\": \"tcp\",\n \"tags\": tags,\n \"firsttime\": ts,\n \"lasttime\": ts,\n \"description\": self.description,\n }\n\n ret = Indicator(self.client, i).submit()\n log.msg(\"logged to csirtg {} \".format(ret[\"location\"]))\n", "path": "src/cowrie/output/csirtg.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nsetup(\n name=\"Cowrie\",\n description=\"Cowrie SSH/Telnet Honeypot.\",\n long_description=\"Cowrie SSH/Telnet Honeypot.\",\n author=\"Michel Oosterhof\",\n author_email=\"[email protected]\",\n maintainer=\"Michel Oosterhof\",\n maintainer_email=\"[email protected]\",\n keywords=\"ssh telnet honeypot\",\n platforms=\"Unix, Mac OSX\",\n license=\"BSD\",\n url=\"https://www.cowrie.org/\",\n packages=[\"cowrie\", \"twisted\"],\n include_package_data=True,\n package_dir={\"\": \"src\"},\n package_data={\"\": [\"*.md\"]},\n use_incremental=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4\",\n scripts=[\"bin/fsctl\", \"bin/asciinema\", \"bin/cowrie\", \"bin/createfs\", \"bin/playlog\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: No Input/Output (Daemon)\",\n \"Framework :: Twisted\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: POSIX\",\n \"Programming Language :: Python\",\n \"Topic :: Security\",\n ],\n setup_requires=[\"incremental\", \"click\"],\n install_requires=[\n \"twisted==21.1.0\",\n \"cryptography>=0.9.1\",\n \"configparser\",\n \"pyopenssl\",\n \"pyparsing\",\n \"incremental\",\n \"packaging\",\n \"appdirs>=1.4.0\",\n \"python-dateutil\",\n \"service_identity>=14.0.0\",\n ],\n extras_require={\n \"csirtg\": [\"csirtgsdk==1.1.5\"],\n \"dshield\": [\"requests\"],\n \"elasticsearch\": [\"pyes\"],\n \"mysql\": [\"mysqlclient\"],\n \"mongodb\": [\"pymongo\"],\n \"rethinkdblog\": [\"rethinkdb\"],\n \"s3\": [\"botocore\"],\n \"slack\": [\"slackclient\"],\n \"influxdb\": [\"influxdb\"],\n },\n)\n", "path": "setup.py"}, {"content": "import os\nfrom datetime import datetime\n\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CowrieConfig\n\ntoken = CowrieConfig.get(\"output_csirtg\", \"token\", fallback=\"a1b2c3d4\")\nif token == \"a1b2c3d4\":\n log.msg(\"output_csirtg: token not found in configuration file\")\n exit(1)\n\nos.environ[\"CSIRTG_TOKEN\"] = token\nimport csirtgsdk # noqa: E402\n\n\nclass Output(cowrie.core.output.Output):\n \"\"\"\n CSIRTG output\n \"\"\"\n\n def start(self):\n \"\"\"\n Start the output module.\n Note that csirtsdk is imported here because it reads CSIRTG_TOKEN on import\n Cowrie sets this environment variable.\n \"\"\"\n self.user = CowrieConfig.get(\"output_csirtg\", \"username\")\n self.feed = CowrieConfig.get(\"output_csirtg\", \"feed\")\n self.debug = CowrieConfig.getboolean(\"output_csirtg\", \"debug\", fallback=False)\n self.description = CowrieConfig.get(\"output_csirtg\", \"description\")\n\n self.context = {}\n # self.client = csirtgsdk.client.Client()\n\n def stop(self):\n pass\n\n def write(self, e):\n \"\"\"\n Only pass on connection events\n \"\"\"\n if e[\"eventid\"] == \"cowrie.session.connect\":\n self.submitIp(e)\n\n def submitIp(self, e):\n peerIP = e[\"src_ip\"]\n ts = e[\"timestamp\"]\n system = e.get(\"system\", None)\n\n if system not in [\n \"cowrie.ssh.factory.CowrieSSHFactory\",\n \"cowrie.telnet.transport.HoneyPotTelnetFactory\",\n ]:\n return\n\n today = str(datetime.now().date())\n\n if not self.context.get(today):\n self.context = {}\n self.context[today] = set()\n\n key = \",\".join([peerIP, system])\n\n if key in self.context[today]:\n return\n\n self.context[today].add(key)\n\n tags = \"scanner,ssh\"\n port = 22\n if e[\"system\"] == \"cowrie.telnet.transport.HoneyPotTelnetFactory\":\n tags = \"scanner,telnet\"\n port = 23\n\n i = {\n \"user\": self.user,\n \"feed\": self.feed,\n \"indicator\": peerIP,\n \"portlist\": port,\n \"protocol\": \"tcp\",\n \"tags\": tags,\n \"firsttime\": ts,\n \"lasttime\": ts,\n \"description\": self.description,\n }\n\n if self.debug is True:\n log.msg(f\"output_csirtg: Submitting {i!r} to CSIRTG\")\n\n ind = csirtgsdk.indicator.Indicator(i).submit()\n\n if self.debug is True:\n log.msg(f\"output_csirtg: Submitted {ind!r} to CSIRTG\")\n\n log.msg(\"output_csirtg: submitted to csirtg at {} \".format(ind[\"location\"]))\n", "path": "src/cowrie/output/csirtg.py"}]} | 1,773 | 976 |
gh_patches_debug_50124 | rasdani/github-patches | git_diff | scrapy__scrapy-2649 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
After adding request flags subclasses of logformatter that rely on 'flags' format string are broken
#2082 added flags to request but it also renamed formatting string key from flags to response_flags/request_flags
```
CRAWLEDMSG = u"Crawled (%(status)s) %(request)s (referer: %(referer)s)%(flags)s"
+CRAWLEDMSG = u"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s"
```
Scrapy allows you to override logformatter and this is what I have in my project. I have logformatter looking rouhgly like this
```python
# dirbot/logf.py
from scrapy.logformatter import LogFormatter
class CustomLogFormatter(LogFormatter):
def crawled(self, request, response, spider):
kwargs = super(CustomLogFormatter, self).crawled(
request, response, spider)
kwargs['msg'] = (
u"Crawled (%(status)s) %(request)s "
u"(referer: %(referer)s, latency: %(latency).2f s)%(flags)s"
)
kwargs['args']['latency'] = response.meta.get('download_latency', 0)
return kwargs
```
now if you enable it in settings `LOG_FORMATTER = 'dirbot.logf.CustomLogFormatter'
` and try to run it with recent master you'll get KeyError
```
2017-03-13 14:15:26 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/__init__.py", line 851, in emit
msg = self.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 724, in format
return fmt.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 464, in format
record.message = record.getMessage()
File "/usr/lib/python2.7/logging/__init__.py", line 328, in getMessage
msg = msg % self.args
KeyError: u'flags'
Logged from file engine.py, line 238
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/__init__.py", line 851, in emit
msg = self.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 724, in format
return fmt.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 464, in format
record.message = record.getMessage()
File "/usr/lib/python2.7/logging/__init__.py", line 328, in getMessage
msg = msg % self.args
KeyError: u'flags'
Logged from file engine.py, line 238
2017-03-13 14:15:27 [scrapy.core.scraper] DEBUG: Scraped from <200 http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/>
```
So this change that renamed `flags` to `response_flags/request_flags` seems backward incompatible.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/logformatter.py`
Content:
```
1 import os
2 import logging
3
4 from twisted.python.failure import Failure
5
6 from scrapy.utils.request import referer_str
7
8 SCRAPEDMSG = u"Scraped from %(src)s" + os.linesep + "%(item)s"
9 DROPPEDMSG = u"Dropped: %(exception)s" + os.linesep + "%(item)s"
10 CRAWLEDMSG = u"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s"
11
12
13 class LogFormatter(object):
14 """Class for generating log messages for different actions.
15
16 All methods must return a dictionary listing the parameters `level`, `msg`
17 and `args` which are going to be used for constructing the log message when
18 calling logging.log.
19
20 Dictionary keys for the method outputs:
21 * `level` should be the log level for that action, you can use those
22 from the python logging library: logging.DEBUG, logging.INFO,
23 logging.WARNING, logging.ERROR and logging.CRITICAL.
24
25 * `msg` should be a string that can contain different formatting
26 placeholders. This string, formatted with the provided `args`, is going
27 to be the log message for that action.
28
29 * `args` should be a tuple or dict with the formatting placeholders for
30 `msg`. The final log message is computed as output['msg'] %
31 output['args'].
32 """
33
34 def crawled(self, request, response, spider):
35 request_flags = ' %s' % str(request.flags) if request.flags else ''
36 response_flags = ' %s' % str(response.flags) if response.flags else ''
37 return {
38 'level': logging.DEBUG,
39 'msg': CRAWLEDMSG,
40 'args': {
41 'status': response.status,
42 'request': request,
43 'request_flags' : request_flags,
44 'referer': referer_str(request),
45 'response_flags': response_flags,
46 }
47 }
48
49 def scraped(self, item, response, spider):
50 if isinstance(response, Failure):
51 src = response.getErrorMessage()
52 else:
53 src = response
54 return {
55 'level': logging.DEBUG,
56 'msg': SCRAPEDMSG,
57 'args': {
58 'src': src,
59 'item': item,
60 }
61 }
62
63 def dropped(self, item, exception, response, spider):
64 return {
65 'level': logging.WARNING,
66 'msg': DROPPEDMSG,
67 'args': {
68 'exception': exception,
69 'item': item,
70 }
71 }
72
73 @classmethod
74 def from_crawler(cls, crawler):
75 return cls()
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/logformatter.py b/scrapy/logformatter.py
--- a/scrapy/logformatter.py
+++ b/scrapy/logformatter.py
@@ -43,6 +43,8 @@
'request_flags' : request_flags,
'referer': referer_str(request),
'response_flags': response_flags,
+ # backward compatibility with Scrapy logformatter below 1.4 version
+ 'flags': response_flags
}
}
| {"golden_diff": "diff --git a/scrapy/logformatter.py b/scrapy/logformatter.py\n--- a/scrapy/logformatter.py\n+++ b/scrapy/logformatter.py\n@@ -43,6 +43,8 @@\n 'request_flags' : request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n+ # backward compatibility with Scrapy logformatter below 1.4 version\n+ 'flags': response_flags\n }\n }\n", "issue": "After adding request flags subclasses of logformatter that rely on 'flags' format string are broken\n#2082 added flags to request but it also renamed formatting string key from flags to response_flags/request_flags\r\n```\r\nCRAWLEDMSG = u\"Crawled (%(status)s) %(request)s (referer: %(referer)s)%(flags)s\"\r\n +CRAWLEDMSG = u\"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s\" \r\n```\r\n\r\nScrapy allows you to override logformatter and this is what I have in my project. I have logformatter looking rouhgly like this\r\n\r\n\r\n```python\r\n# dirbot/logf.py\r\nfrom scrapy.logformatter import LogFormatter\r\n\r\n\r\nclass CustomLogFormatter(LogFormatter):\r\n def crawled(self, request, response, spider):\r\n kwargs = super(CustomLogFormatter, self).crawled(\r\n request, response, spider)\r\n kwargs['msg'] = (\r\n u\"Crawled (%(status)s) %(request)s \"\r\n u\"(referer: %(referer)s, latency: %(latency).2f s)%(flags)s\"\r\n )\r\n kwargs['args']['latency'] = response.meta.get('download_latency', 0)\r\n return kwargs\r\n```\r\n\r\nnow if you enable it in settings `LOG_FORMATTER = 'dirbot.logf.CustomLogFormatter'\r\n` and try to run it with recent master you'll get KeyError\r\n\r\n```\r\n2017-03-13 14:15:26 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 851, in emit\r\n msg = self.format(record)\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 724, in format\r\n return fmt.format(record)\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 464, in format\r\n record.message = record.getMessage()\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 328, in getMessage\r\n msg = msg % self.args\r\nKeyError: u'flags'\r\nLogged from file engine.py, line 238\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 851, in emit\r\n msg = self.format(record)\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 724, in format\r\n return fmt.format(record)\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 464, in format\r\n record.message = record.getMessage()\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 328, in getMessage\r\n msg = msg % self.args\r\nKeyError: u'flags'\r\nLogged from file engine.py, line 238\r\n2017-03-13 14:15:27 [scrapy.core.scraper] DEBUG: Scraped from <200 http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/>\r\n```\r\n\r\nSo this change that renamed `flags` to `response_flags/request_flags` seems backward incompatible. \n", "before_files": [{"content": "import os\nimport logging\n\nfrom twisted.python.failure import Failure\n\nfrom scrapy.utils.request import referer_str\n\nSCRAPEDMSG = u\"Scraped from %(src)s\" + os.linesep + \"%(item)s\"\nDROPPEDMSG = u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\"\nCRAWLEDMSG = u\"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s\"\n\n\nclass LogFormatter(object):\n \"\"\"Class for generating log messages for different actions.\n\n All methods must return a dictionary listing the parameters `level`, `msg`\n and `args` which are going to be used for constructing the log message when\n calling logging.log.\n\n Dictionary keys for the method outputs:\n * `level` should be the log level for that action, you can use those\n from the python logging library: logging.DEBUG, logging.INFO,\n logging.WARNING, logging.ERROR and logging.CRITICAL.\n\n * `msg` should be a string that can contain different formatting\n placeholders. This string, formatted with the provided `args`, is going\n to be the log message for that action.\n\n * `args` should be a tuple or dict with the formatting placeholders for\n `msg`. The final log message is computed as output['msg'] %\n output['args'].\n \"\"\"\n\n def crawled(self, request, response, spider):\n request_flags = ' %s' % str(request.flags) if request.flags else ''\n response_flags = ' %s' % str(response.flags) if response.flags else ''\n return {\n 'level': logging.DEBUG,\n 'msg': CRAWLEDMSG,\n 'args': {\n 'status': response.status,\n 'request': request,\n 'request_flags' : request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n }\n }\n\n def scraped(self, item, response, spider):\n if isinstance(response, Failure):\n src = response.getErrorMessage()\n else:\n src = response\n return {\n 'level': logging.DEBUG,\n 'msg': SCRAPEDMSG,\n 'args': {\n 'src': src,\n 'item': item,\n }\n }\n\n def dropped(self, item, exception, response, spider):\n return {\n 'level': logging.WARNING,\n 'msg': DROPPEDMSG,\n 'args': {\n 'exception': exception,\n 'item': item,\n }\n }\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls()\n", "path": "scrapy/logformatter.py"}], "after_files": [{"content": "import os\nimport logging\n\nfrom twisted.python.failure import Failure\n\nfrom scrapy.utils.request import referer_str\n\nSCRAPEDMSG = u\"Scraped from %(src)s\" + os.linesep + \"%(item)s\"\nDROPPEDMSG = u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\"\nCRAWLEDMSG = u\"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s\"\n\n\nclass LogFormatter(object):\n \"\"\"Class for generating log messages for different actions.\n\n All methods must return a dictionary listing the parameters `level`, `msg`\n and `args` which are going to be used for constructing the log message when\n calling logging.log.\n\n Dictionary keys for the method outputs:\n * `level` should be the log level for that action, you can use those\n from the python logging library: logging.DEBUG, logging.INFO,\n logging.WARNING, logging.ERROR and logging.CRITICAL.\n\n * `msg` should be a string that can contain different formatting\n placeholders. This string, formatted with the provided `args`, is going\n to be the log message for that action.\n\n * `args` should be a tuple or dict with the formatting placeholders for\n `msg`. The final log message is computed as output['msg'] %\n output['args'].\n \"\"\"\n\n def crawled(self, request, response, spider):\n request_flags = ' %s' % str(request.flags) if request.flags else ''\n response_flags = ' %s' % str(response.flags) if response.flags else ''\n return {\n 'level': logging.DEBUG,\n 'msg': CRAWLEDMSG,\n 'args': {\n 'status': response.status,\n 'request': request,\n 'request_flags' : request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n # backward compatibility with Scrapy logformatter below 1.4 version\n 'flags': response_flags\n }\n }\n\n def scraped(self, item, response, spider):\n if isinstance(response, Failure):\n src = response.getErrorMessage()\n else:\n src = response\n return {\n 'level': logging.DEBUG,\n 'msg': SCRAPEDMSG,\n 'args': {\n 'src': src,\n 'item': item,\n }\n }\n\n def dropped(self, item, exception, response, spider):\n return {\n 'level': logging.WARNING,\n 'msg': DROPPEDMSG,\n 'args': {\n 'exception': exception,\n 'item': item,\n }\n }\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls()\n", "path": "scrapy/logformatter.py"}]} | 1,697 | 99 |
gh_patches_debug_15426 | rasdani/github-patches | git_diff | airctic__icevision-734 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't save a full model using torch.save (at least with faster-RCNN)
It is not possible to save a full model using default settings of `torch.save` (see stack trace below). This is because of the implementation of `remove_internal_model_transforms`, which uses inner functions in its implementation. The default pickle module does not support inner functions.
Workaround: use the `dill` module instead, which does support inner functions.
Suggested fix: It does not look as if the internal functions are necessary. If there were moved to standard functions, then the default pickle module should work.
`torch.save(model, 'mod.pth', pickle_module=pickle)` causes an error.
`torch.save(model, 'mod.pth', pickle_module=dill)` is a workaround.
**To Reproduce**
`torch.save(model, 'mod1-full.pth', pickle_module=pickle)`
results in:
```python
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-12-50f3761f4f3c> in <module>
----> 1 torch.save(model, 'mod1-full.pth', pickle_module=pickle)
~/anaconda3/envs/dlm/lib/python3.8/site-packages/torch/serialization.py in save(obj, f, pickle_module, pickle_protocol, _use_new_zipfile_serialization)
370 if _use_new_zipfile_serialization:
371 with _open_zipfile_writer(opened_file) as opened_zipfile:
--> 372 _save(obj, opened_zipfile, pickle_module, pickle_protocol)
373 return
374 _legacy_save(obj, opened_file, pickle_module, pickle_protocol)
~/anaconda3/envs/dlm/lib/python3.8/site-packages/torch/serialization.py in _save(obj, zip_file, pickle_module, pickle_protocol)
474 pickler = pickle_module.Pickler(data_buf, protocol=pickle_protocol)
475 pickler.persistent_id = persistent_id
--> 476 pickler.dump(obj)
477 data_value = data_buf.getvalue()
478 zip_file.write_record('data.pkl', data_value, len(data_value))
AttributeError: Can't pickle local object 'remove_internal_model_transforms.<locals>.noop_normalize'
```
Relevant definition:
```
def remove_internal_model_transforms(model: GeneralizedRCNN):
def noop_normalize(image: Tensor) -> Tensor:
return image
def noop_resize(
image: Tensor, target: Optional[Dict[str, Tensor]]
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
return image, target
model.transform.normalize = noop_normalize
model.transform.resize = noop_resize
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `icevision/models/torchvision/utils.py`
Content:
```
1 __all__ = [
2 "remove_internal_model_transforms",
3 "patch_rcnn_param_groups",
4 "patch_retinanet_param_groups",
5 ]
6
7 from icevision.imports import *
8 from icevision.utils import *
9 from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN
10
11
12 def remove_internal_model_transforms(model: GeneralizedRCNN):
13 def noop_normalize(image: Tensor) -> Tensor:
14 return image
15
16 def noop_resize(
17 image: Tensor, target: Optional[Dict[str, Tensor]]
18 ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
19 return image, target
20
21 model.transform.normalize = noop_normalize
22 model.transform.resize = noop_resize
23
24
25 def patch_param_groups(
26 model: nn.Module,
27 head_layers: List[nn.Module],
28 backbone_param_groups: List[List[nn.Parameter]],
29 ):
30 def param_groups(model: nn.Module) -> List[List[nn.Parameter]]:
31 head_param_groups = [list(layer.parameters()) for layer in head_layers]
32
33 _param_groups = backbone_param_groups + head_param_groups
34 check_all_model_params_in_groups2(model, _param_groups)
35
36 return _param_groups
37
38 model.param_groups = MethodType(param_groups, model)
39
40
41 def patch_rcnn_param_groups(model: nn.Module):
42 return patch_param_groups(
43 model=model,
44 head_layers=[model.rpn, model.roi_heads],
45 backbone_param_groups=model.backbone.param_groups(),
46 )
47
48
49 def patch_retinanet_param_groups(model: nn.Module):
50 return patch_param_groups(
51 model=model,
52 head_layers=[model.head],
53 backbone_param_groups=model.backbone.param_groups(),
54 )
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/icevision/models/torchvision/utils.py b/icevision/models/torchvision/utils.py
--- a/icevision/models/torchvision/utils.py
+++ b/icevision/models/torchvision/utils.py
@@ -9,17 +9,19 @@
from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN
-def remove_internal_model_transforms(model: GeneralizedRCNN):
- def noop_normalize(image: Tensor) -> Tensor:
- return image
+def _noop_normalize(image: Tensor) -> Tensor:
+ return image
+
- def noop_resize(
- image: Tensor, target: Optional[Dict[str, Tensor]]
- ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
- return image, target
+def _noop_resize(
+ image: Tensor, target: Optional[Dict[str, Tensor]]
+) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
+ return image, target
- model.transform.normalize = noop_normalize
- model.transform.resize = noop_resize
+
+def remove_internal_model_transforms(model: GeneralizedRCNN):
+ model.transform.normalize = _noop_normalize
+ model.transform.resize = _noop_resize
def patch_param_groups(
| {"golden_diff": "diff --git a/icevision/models/torchvision/utils.py b/icevision/models/torchvision/utils.py\n--- a/icevision/models/torchvision/utils.py\n+++ b/icevision/models/torchvision/utils.py\n@@ -9,17 +9,19 @@\n from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN\n \n \n-def remove_internal_model_transforms(model: GeneralizedRCNN):\n- def noop_normalize(image: Tensor) -> Tensor:\n- return image\n+def _noop_normalize(image: Tensor) -> Tensor:\n+ return image\n+\n \n- def noop_resize(\n- image: Tensor, target: Optional[Dict[str, Tensor]]\n- ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:\n- return image, target\n+def _noop_resize(\n+ image: Tensor, target: Optional[Dict[str, Tensor]]\n+) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:\n+ return image, target\n \n- model.transform.normalize = noop_normalize\n- model.transform.resize = noop_resize\n+\n+def remove_internal_model_transforms(model: GeneralizedRCNN):\n+ model.transform.normalize = _noop_normalize\n+ model.transform.resize = _noop_resize\n \n \n def patch_param_groups(\n", "issue": "Can't save a full model using torch.save (at least with faster-RCNN)\nIt is not possible to save a full model using default settings of `torch.save` (see stack trace below). This is because of the implementation of `remove_internal_model_transforms`, which uses inner functions in its implementation. The default pickle module does not support inner functions.\r\n\r\nWorkaround: use the `dill` module instead, which does support inner functions.\r\n\r\nSuggested fix: It does not look as if the internal functions are necessary. If there were moved to standard functions, then the default pickle module should work.\r\n`torch.save(model, 'mod.pth', pickle_module=pickle)` causes an error.\r\n\r\n`torch.save(model, 'mod.pth', pickle_module=dill)` is a workaround.\r\n\r\n**To Reproduce**\r\n\r\n`torch.save(model, 'mod1-full.pth', pickle_module=pickle)`\r\nresults in:\r\n\r\n```python\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-12-50f3761f4f3c> in <module>\r\n----> 1 torch.save(model, 'mod1-full.pth', pickle_module=pickle)\r\n\r\n~/anaconda3/envs/dlm/lib/python3.8/site-packages/torch/serialization.py in save(obj, f, pickle_module, pickle_protocol, _use_new_zipfile_serialization)\r\n 370 if _use_new_zipfile_serialization:\r\n 371 with _open_zipfile_writer(opened_file) as opened_zipfile:\r\n--> 372 _save(obj, opened_zipfile, pickle_module, pickle_protocol)\r\n 373 return\r\n 374 _legacy_save(obj, opened_file, pickle_module, pickle_protocol)\r\n\r\n~/anaconda3/envs/dlm/lib/python3.8/site-packages/torch/serialization.py in _save(obj, zip_file, pickle_module, pickle_protocol)\r\n 474 pickler = pickle_module.Pickler(data_buf, protocol=pickle_protocol)\r\n 475 pickler.persistent_id = persistent_id\r\n--> 476 pickler.dump(obj)\r\n 477 data_value = data_buf.getvalue()\r\n 478 zip_file.write_record('data.pkl', data_value, len(data_value))\r\n\r\nAttributeError: Can't pickle local object 'remove_internal_model_transforms.<locals>.noop_normalize'\r\n```\r\n\r\nRelevant definition:\r\n```\r\ndef remove_internal_model_transforms(model: GeneralizedRCNN):\r\n def noop_normalize(image: Tensor) -> Tensor:\r\n return image\r\n\r\n def noop_resize(\r\n image: Tensor, target: Optional[Dict[str, Tensor]]\r\n ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:\r\n return image, target\r\n\r\n model.transform.normalize = noop_normalize\r\n model.transform.resize = noop_resize\r\n```\r\n\r\n\n", "before_files": [{"content": "__all__ = [\n \"remove_internal_model_transforms\",\n \"patch_rcnn_param_groups\",\n \"patch_retinanet_param_groups\",\n]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom torchvision.models.detection.generalized_rcnn import GeneralizedRCNN\n\n\ndef remove_internal_model_transforms(model: GeneralizedRCNN):\n def noop_normalize(image: Tensor) -> Tensor:\n return image\n\n def noop_resize(\n image: Tensor, target: Optional[Dict[str, Tensor]]\n ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:\n return image, target\n\n model.transform.normalize = noop_normalize\n model.transform.resize = noop_resize\n\n\ndef patch_param_groups(\n model: nn.Module,\n head_layers: List[nn.Module],\n backbone_param_groups: List[List[nn.Parameter]],\n):\n def param_groups(model: nn.Module) -> List[List[nn.Parameter]]:\n head_param_groups = [list(layer.parameters()) for layer in head_layers]\n\n _param_groups = backbone_param_groups + head_param_groups\n check_all_model_params_in_groups2(model, _param_groups)\n\n return _param_groups\n\n model.param_groups = MethodType(param_groups, model)\n\n\ndef patch_rcnn_param_groups(model: nn.Module):\n return patch_param_groups(\n model=model,\n head_layers=[model.rpn, model.roi_heads],\n backbone_param_groups=model.backbone.param_groups(),\n )\n\n\ndef patch_retinanet_param_groups(model: nn.Module):\n return patch_param_groups(\n model=model,\n head_layers=[model.head],\n backbone_param_groups=model.backbone.param_groups(),\n )\n", "path": "icevision/models/torchvision/utils.py"}], "after_files": [{"content": "__all__ = [\n \"remove_internal_model_transforms\",\n \"patch_rcnn_param_groups\",\n \"patch_retinanet_param_groups\",\n]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom torchvision.models.detection.generalized_rcnn import GeneralizedRCNN\n\n\ndef _noop_normalize(image: Tensor) -> Tensor:\n return image\n\n\ndef _noop_resize(\n image: Tensor, target: Optional[Dict[str, Tensor]]\n) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:\n return image, target\n\n\ndef remove_internal_model_transforms(model: GeneralizedRCNN):\n model.transform.normalize = _noop_normalize\n model.transform.resize = _noop_resize\n\n\ndef patch_param_groups(\n model: nn.Module,\n head_layers: List[nn.Module],\n backbone_param_groups: List[List[nn.Parameter]],\n):\n def param_groups(model: nn.Module) -> List[List[nn.Parameter]]:\n head_param_groups = [list(layer.parameters()) for layer in head_layers]\n\n _param_groups = backbone_param_groups + head_param_groups\n check_all_model_params_in_groups2(model, _param_groups)\n\n return _param_groups\n\n model.param_groups = MethodType(param_groups, model)\n\n\ndef patch_rcnn_param_groups(model: nn.Module):\n return patch_param_groups(\n model=model,\n head_layers=[model.rpn, model.roi_heads],\n backbone_param_groups=model.backbone.param_groups(),\n )\n\n\ndef patch_retinanet_param_groups(model: nn.Module):\n return patch_param_groups(\n model=model,\n head_layers=[model.head],\n backbone_param_groups=model.backbone.param_groups(),\n )\n", "path": "icevision/models/torchvision/utils.py"}]} | 1,316 | 270 |
gh_patches_debug_40489 | rasdani/github-patches | git_diff | mindsdb__lightwood-979 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in SHAP analysis block
Trying out this module, the following error arises with the cloud tutorial for home rentals:
```python
File "/tmp/e74830c1ef5914dfafe52537b8b33cd0479265f508bfd6c616614666305205822.py", line 392, in predict
insights, global_insights = explain(data=data,encoded_data=encoded_data,predictions=df,ts_analysis=None,problem_definition=self.problem_definition,stat_analysis=self.statistical_analysis,runtime_analysis=self.runtime_analyzer,target_name=self.target,target_dtype=self.dtype_dict[self.target],explainer_blocks=self.analysis_blocks,pred_args=self.pred_args)
File "/MindsDB/lightwood/lightwood/analysis/explain.py", line 81, in explain
row_insights, global_insights = block.explain(row_insights, global_insights, **kwargs)
File "/MindsDB/lightwood/lightwood/analysis/helpers/shap.py", line 79, in explain
shap_values = shap_explainer.shap_values(ns.data, silent=True)
File "/MindsDB/nenv/lib/python3.8/site-packages/shap/explainers/_kernel.py", line 186, in shap_values
explanations.append(self.explain(data, **kwargs))
File "/MindsDB/nenv/lib/python3.8/site-packages/shap/explainers/_kernel.py", line 207, in explain
match_instance_to_data(instance, self.data)
File "/MindsDB/nenv/lib/python3.8/site-packages/shap/utils/_legacy.py", line 87, in match_instance_to_data
instance.group_display_values = [instance.x[0, group[0]] if len(group) == 1 else "" for group in data.groups]
File "/MindsDB/nenv/lib/python3.8/site-packages/shap/utils/_legacy.py", line 87, in <listcomp>
instance.group_display_values = [instance.x[0, group[0]] if len(group) == 1 else "" for group in data.groups]
IndexError: index 7 is out of bounds for axis 1 with size 7
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightwood/analysis/helpers/shap.py`
Content:
```
1 import warnings
2 from types import SimpleNamespace
3 from typing import Dict, Optional, Tuple
4
5 import numpy as np
6 import pandas as pd
7 from lightwood.analysis.base import BaseAnalysisBlock
8 from lightwood.api import dtype
9 from lightwood.api.types import PredictionArguments
10 from lightwood.data.encoded_ds import EncodedDs
11 from lightwood.helpers.log import log
12 from sklearn.preprocessing import LabelEncoder
13
14 import shap
15
16
17 class ShapleyValues(BaseAnalysisBlock):
18 """
19 Analysis block that estimates column importance with SHAP (SHapley Additive exPlanations), a game theoretic approach
20 to explain the ouput of any machine learning model. SHAP assigns each feature an importance value for a particular
21 prediction.
22
23 Reference:
24 https://shap.readthedocs.io/en/stable/
25 https://proceedings.neurips.cc/paper/2017/file/8a20a8621978632d76c43dfd28b67767-Paper.pdf
26 """
27 label_encoder: LabelEncoder
28
29 def __init__(self, deps: Optional[Tuple] = ...):
30 super().__init__(deps=deps)
31 self.label_encoder = LabelEncoder()
32
33 def analyze(self, info: Dict[str, object], **kwargs) -> Dict[str, object]:
34 log.info('Preparing to compute feature importance values with SHAP')
35 ns = SimpleNamespace(**kwargs)
36
37 output_dtype = ns.dtype_dict[ns.target]
38 train_data: EncodedDs = ns.train_data
39
40 if output_dtype in (dtype.integer, dtype.float, dtype.quantity):
41 pass
42 elif output_dtype in (dtype.binary, dtype.categorical, dtype.tags):
43 self.label_encoder.fit(train_data.data_frame[ns.target].values)
44 else:
45 log.warning(f'ShapleyValues analyzers not supported for type: {output_dtype}')
46 return info
47
48 def model(x: np.ndarray) -> np.ndarray:
49 assert(isinstance(x, np.ndarray))
50 df = pd.DataFrame(data=x, columns=train_data.data_frame.columns)
51 ds = EncodedDs(encoders=train_data.encoders, data_frame=df, target=train_data.target)
52
53 decoded_predictions = ns.predictor(ds=ds, args=PredictionArguments())
54 if output_dtype in (dtype.integer, dtype.float, dtype.quantity):
55 encoded_predictions = decoded_predictions['prediction'].values
56 elif output_dtype in (dtype.binary, dtype.categorical, dtype.tags):
57 encoded_predictions = self.label_encoder.transform(decoded_predictions['prediction'].values)
58
59 return encoded_predictions
60
61 info['shap_explainer'] = shap.KernelExplainer(model=model, data=train_data.data_frame)
62
63 return info
64
65 def explain(self,
66 row_insights: pd.DataFrame,
67 global_insights: Dict[str, object],
68 **kwargs
69 ) -> Tuple[pd.DataFrame, Dict[str, object]]:
70 log.info('Computing feature importance values with Kernel SHAP method')
71 ns = SimpleNamespace(**kwargs)
72
73 shap_explainer = ns.analysis.get('shap_explainer', None)
74 if shap_explainer is None:
75 return row_insights, global_insights
76
77 with warnings.catch_warnings():
78 warnings.filterwarnings("ignore", category=DeprecationWarning)
79 shap_values = shap_explainer.shap_values(ns.data, silent=True)
80
81 shap_values_df = pd.DataFrame(shap_values).rename(
82 mapper=lambda i: f"feature_{i}_impact", axis='columns')
83
84 if kwargs.get('target_dtype', None) in (dtype.binary, dtype.categorical, dtype.tags):
85 predictions = self.label_encoder.transform(row_insights['prediction'])
86 else:
87 predictions = row_insights['prediction']
88
89 base_response = (predictions - shap_values_df.sum(axis='columns')).mean()
90 global_insights['base_response'] = base_response
91
92 row_insights = pd.concat([row_insights, shap_values_df], axis='columns')
93
94 return row_insights, global_insights
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lightwood/analysis/helpers/shap.py b/lightwood/analysis/helpers/shap.py
--- a/lightwood/analysis/helpers/shap.py
+++ b/lightwood/analysis/helpers/shap.py
@@ -29,6 +29,8 @@
def __init__(self, deps: Optional[Tuple] = ...):
super().__init__(deps=deps)
self.label_encoder = LabelEncoder()
+ self.columns = []
+ self.target = None
def analyze(self, info: Dict[str, object], **kwargs) -> Dict[str, object]:
log.info('Preparing to compute feature importance values with SHAP')
@@ -45,9 +47,13 @@
log.warning(f'ShapleyValues analyzers not supported for type: {output_dtype}')
return info
+ self.target = ns.target
+ self.columns = list(set(ns.dtype_dict.keys()) - {self.target})
+ input_df = train_data.data_frame[self.columns]
+
def model(x: np.ndarray) -> np.ndarray:
assert(isinstance(x, np.ndarray))
- df = pd.DataFrame(data=x, columns=train_data.data_frame.columns)
+ df = pd.DataFrame(data=x, columns=self.columns)
ds = EncodedDs(encoders=train_data.encoders, data_frame=df, target=train_data.target)
decoded_predictions = ns.predictor(ds=ds, args=PredictionArguments())
@@ -58,7 +64,7 @@
return encoded_predictions
- info['shap_explainer'] = shap.KernelExplainer(model=model, data=train_data.data_frame)
+ info['shap_explainer'] = shap.KernelExplainer(model=model, data=input_df)
return info
@@ -76,10 +82,10 @@
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
- shap_values = shap_explainer.shap_values(ns.data, silent=True)
+ shap_values = shap_explainer.shap_values(ns.data[self.columns], silent=True)
shap_values_df = pd.DataFrame(shap_values).rename(
- mapper=lambda i: f"feature_{i}_impact", axis='columns')
+ mapper=lambda i: f"shap_contribution_{self.columns[i]}", axis='columns')
if kwargs.get('target_dtype', None) in (dtype.binary, dtype.categorical, dtype.tags):
predictions = self.label_encoder.transform(row_insights['prediction'])
@@ -87,8 +93,9 @@
predictions = row_insights['prediction']
base_response = (predictions - shap_values_df.sum(axis='columns')).mean()
- global_insights['base_response'] = base_response
row_insights = pd.concat([row_insights, shap_values_df], axis='columns')
+ row_insights['shap_base_response'] = base_response
+ row_insights['shap_final_response'] = predictions
return row_insights, global_insights
| {"golden_diff": "diff --git a/lightwood/analysis/helpers/shap.py b/lightwood/analysis/helpers/shap.py\n--- a/lightwood/analysis/helpers/shap.py\n+++ b/lightwood/analysis/helpers/shap.py\n@@ -29,6 +29,8 @@\n def __init__(self, deps: Optional[Tuple] = ...):\n super().__init__(deps=deps)\n self.label_encoder = LabelEncoder()\n+ self.columns = []\n+ self.target = None\n \n def analyze(self, info: Dict[str, object], **kwargs) -> Dict[str, object]:\n log.info('Preparing to compute feature importance values with SHAP')\n@@ -45,9 +47,13 @@\n log.warning(f'ShapleyValues analyzers not supported for type: {output_dtype}')\n return info\n \n+ self.target = ns.target\n+ self.columns = list(set(ns.dtype_dict.keys()) - {self.target})\n+ input_df = train_data.data_frame[self.columns]\n+\n def model(x: np.ndarray) -> np.ndarray:\n assert(isinstance(x, np.ndarray))\n- df = pd.DataFrame(data=x, columns=train_data.data_frame.columns)\n+ df = pd.DataFrame(data=x, columns=self.columns)\n ds = EncodedDs(encoders=train_data.encoders, data_frame=df, target=train_data.target)\n \n decoded_predictions = ns.predictor(ds=ds, args=PredictionArguments())\n@@ -58,7 +64,7 @@\n \n return encoded_predictions\n \n- info['shap_explainer'] = shap.KernelExplainer(model=model, data=train_data.data_frame)\n+ info['shap_explainer'] = shap.KernelExplainer(model=model, data=input_df)\n \n return info\n \n@@ -76,10 +82,10 @@\n \n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n- shap_values = shap_explainer.shap_values(ns.data, silent=True)\n+ shap_values = shap_explainer.shap_values(ns.data[self.columns], silent=True)\n \n shap_values_df = pd.DataFrame(shap_values).rename(\n- mapper=lambda i: f\"feature_{i}_impact\", axis='columns')\n+ mapper=lambda i: f\"shap_contribution_{self.columns[i]}\", axis='columns')\n \n if kwargs.get('target_dtype', None) in (dtype.binary, dtype.categorical, dtype.tags):\n predictions = self.label_encoder.transform(row_insights['prediction'])\n@@ -87,8 +93,9 @@\n predictions = row_insights['prediction']\n \n base_response = (predictions - shap_values_df.sum(axis='columns')).mean()\n- global_insights['base_response'] = base_response\n \n row_insights = pd.concat([row_insights, shap_values_df], axis='columns')\n+ row_insights['shap_base_response'] = base_response\n+ row_insights['shap_final_response'] = predictions\n \n return row_insights, global_insights\n", "issue": "Bug in SHAP analysis block\nTrying out this module, the following error arises with the cloud tutorial for home rentals:\r\n\r\n```python\r\n File \"/tmp/e74830c1ef5914dfafe52537b8b33cd0479265f508bfd6c616614666305205822.py\", line 392, in predict\r\n insights, global_insights = explain(data=data,encoded_data=encoded_data,predictions=df,ts_analysis=None,problem_definition=self.problem_definition,stat_analysis=self.statistical_analysis,runtime_analysis=self.runtime_analyzer,target_name=self.target,target_dtype=self.dtype_dict[self.target],explainer_blocks=self.analysis_blocks,pred_args=self.pred_args)\r\n File \"/MindsDB/lightwood/lightwood/analysis/explain.py\", line 81, in explain\r\n row_insights, global_insights = block.explain(row_insights, global_insights, **kwargs)\r\n File \"/MindsDB/lightwood/lightwood/analysis/helpers/shap.py\", line 79, in explain\r\n shap_values = shap_explainer.shap_values(ns.data, silent=True)\r\n File \"/MindsDB/nenv/lib/python3.8/site-packages/shap/explainers/_kernel.py\", line 186, in shap_values\r\n explanations.append(self.explain(data, **kwargs))\r\n File \"/MindsDB/nenv/lib/python3.8/site-packages/shap/explainers/_kernel.py\", line 207, in explain\r\n match_instance_to_data(instance, self.data)\r\n File \"/MindsDB/nenv/lib/python3.8/site-packages/shap/utils/_legacy.py\", line 87, in match_instance_to_data\r\n instance.group_display_values = [instance.x[0, group[0]] if len(group) == 1 else \"\" for group in data.groups]\r\n File \"/MindsDB/nenv/lib/python3.8/site-packages/shap/utils/_legacy.py\", line 87, in <listcomp>\r\n instance.group_display_values = [instance.x[0, group[0]] if len(group) == 1 else \"\" for group in data.groups]\r\nIndexError: index 7 is out of bounds for axis 1 with size 7\r\n\r\n```\n", "before_files": [{"content": "import warnings\nfrom types import SimpleNamespace\nfrom typing import Dict, Optional, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom lightwood.analysis.base import BaseAnalysisBlock\nfrom lightwood.api import dtype\nfrom lightwood.api.types import PredictionArguments\nfrom lightwood.data.encoded_ds import EncodedDs\nfrom lightwood.helpers.log import log\nfrom sklearn.preprocessing import LabelEncoder\n\nimport shap\n\n\nclass ShapleyValues(BaseAnalysisBlock):\n \"\"\"\n Analysis block that estimates column importance with SHAP (SHapley Additive exPlanations), a game theoretic approach\n to explain the ouput of any machine learning model. SHAP assigns each feature an importance value for a particular\n prediction.\n\n Reference:\n https://shap.readthedocs.io/en/stable/\n https://proceedings.neurips.cc/paper/2017/file/8a20a8621978632d76c43dfd28b67767-Paper.pdf\n \"\"\"\n label_encoder: LabelEncoder\n\n def __init__(self, deps: Optional[Tuple] = ...):\n super().__init__(deps=deps)\n self.label_encoder = LabelEncoder()\n\n def analyze(self, info: Dict[str, object], **kwargs) -> Dict[str, object]:\n log.info('Preparing to compute feature importance values with SHAP')\n ns = SimpleNamespace(**kwargs)\n\n output_dtype = ns.dtype_dict[ns.target]\n train_data: EncodedDs = ns.train_data\n\n if output_dtype in (dtype.integer, dtype.float, dtype.quantity):\n pass\n elif output_dtype in (dtype.binary, dtype.categorical, dtype.tags):\n self.label_encoder.fit(train_data.data_frame[ns.target].values)\n else:\n log.warning(f'ShapleyValues analyzers not supported for type: {output_dtype}')\n return info\n\n def model(x: np.ndarray) -> np.ndarray:\n assert(isinstance(x, np.ndarray))\n df = pd.DataFrame(data=x, columns=train_data.data_frame.columns)\n ds = EncodedDs(encoders=train_data.encoders, data_frame=df, target=train_data.target)\n\n decoded_predictions = ns.predictor(ds=ds, args=PredictionArguments())\n if output_dtype in (dtype.integer, dtype.float, dtype.quantity):\n encoded_predictions = decoded_predictions['prediction'].values\n elif output_dtype in (dtype.binary, dtype.categorical, dtype.tags):\n encoded_predictions = self.label_encoder.transform(decoded_predictions['prediction'].values)\n\n return encoded_predictions\n\n info['shap_explainer'] = shap.KernelExplainer(model=model, data=train_data.data_frame)\n\n return info\n\n def explain(self,\n row_insights: pd.DataFrame,\n global_insights: Dict[str, object],\n **kwargs\n ) -> Tuple[pd.DataFrame, Dict[str, object]]:\n log.info('Computing feature importance values with Kernel SHAP method')\n ns = SimpleNamespace(**kwargs)\n\n shap_explainer = ns.analysis.get('shap_explainer', None)\n if shap_explainer is None:\n return row_insights, global_insights\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n shap_values = shap_explainer.shap_values(ns.data, silent=True)\n\n shap_values_df = pd.DataFrame(shap_values).rename(\n mapper=lambda i: f\"feature_{i}_impact\", axis='columns')\n\n if kwargs.get('target_dtype', None) in (dtype.binary, dtype.categorical, dtype.tags):\n predictions = self.label_encoder.transform(row_insights['prediction'])\n else:\n predictions = row_insights['prediction']\n\n base_response = (predictions - shap_values_df.sum(axis='columns')).mean()\n global_insights['base_response'] = base_response\n\n row_insights = pd.concat([row_insights, shap_values_df], axis='columns')\n\n return row_insights, global_insights\n", "path": "lightwood/analysis/helpers/shap.py"}], "after_files": [{"content": "import warnings\nfrom types import SimpleNamespace\nfrom typing import Dict, Optional, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom lightwood.analysis.base import BaseAnalysisBlock\nfrom lightwood.api import dtype\nfrom lightwood.api.types import PredictionArguments\nfrom lightwood.data.encoded_ds import EncodedDs\nfrom lightwood.helpers.log import log\nfrom sklearn.preprocessing import LabelEncoder\n\nimport shap\n\n\nclass ShapleyValues(BaseAnalysisBlock):\n \"\"\"\n Analysis block that estimates column importance with SHAP (SHapley Additive exPlanations), a game theoretic approach\n to explain the ouput of any machine learning model. SHAP assigns each feature an importance value for a particular\n prediction.\n\n Reference:\n https://shap.readthedocs.io/en/stable/\n https://proceedings.neurips.cc/paper/2017/file/8a20a8621978632d76c43dfd28b67767-Paper.pdf\n \"\"\"\n label_encoder: LabelEncoder\n\n def __init__(self, deps: Optional[Tuple] = ...):\n super().__init__(deps=deps)\n self.label_encoder = LabelEncoder()\n self.columns = []\n self.target = None\n\n def analyze(self, info: Dict[str, object], **kwargs) -> Dict[str, object]:\n log.info('Preparing to compute feature importance values with SHAP')\n ns = SimpleNamespace(**kwargs)\n\n output_dtype = ns.dtype_dict[ns.target]\n train_data: EncodedDs = ns.train_data\n\n if output_dtype in (dtype.integer, dtype.float, dtype.quantity):\n pass\n elif output_dtype in (dtype.binary, dtype.categorical, dtype.tags):\n self.label_encoder.fit(train_data.data_frame[ns.target].values)\n else:\n log.warning(f'ShapleyValues analyzers not supported for type: {output_dtype}')\n return info\n\n self.target = ns.target\n self.columns = list(set(ns.dtype_dict.keys()) - {self.target})\n input_df = train_data.data_frame[self.columns]\n\n def model(x: np.ndarray) -> np.ndarray:\n assert(isinstance(x, np.ndarray))\n df = pd.DataFrame(data=x, columns=self.columns)\n ds = EncodedDs(encoders=train_data.encoders, data_frame=df, target=train_data.target)\n\n decoded_predictions = ns.predictor(ds=ds, args=PredictionArguments())\n if output_dtype in (dtype.integer, dtype.float, dtype.quantity):\n encoded_predictions = decoded_predictions['prediction'].values\n elif output_dtype in (dtype.binary, dtype.categorical, dtype.tags):\n encoded_predictions = self.label_encoder.transform(decoded_predictions['prediction'].values)\n\n return encoded_predictions\n\n info['shap_explainer'] = shap.KernelExplainer(model=model, data=input_df)\n\n return info\n\n def explain(self,\n row_insights: pd.DataFrame,\n global_insights: Dict[str, object],\n **kwargs\n ) -> Tuple[pd.DataFrame, Dict[str, object]]:\n log.info('Computing feature importance values with Kernel SHAP method')\n ns = SimpleNamespace(**kwargs)\n\n shap_explainer = ns.analysis.get('shap_explainer', None)\n if shap_explainer is None:\n return row_insights, global_insights\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n shap_values = shap_explainer.shap_values(ns.data[self.columns], silent=True)\n\n shap_values_df = pd.DataFrame(shap_values).rename(\n mapper=lambda i: f\"shap_contribution_{self.columns[i]}\", axis='columns')\n\n if kwargs.get('target_dtype', None) in (dtype.binary, dtype.categorical, dtype.tags):\n predictions = self.label_encoder.transform(row_insights['prediction'])\n else:\n predictions = row_insights['prediction']\n\n base_response = (predictions - shap_values_df.sum(axis='columns')).mean()\n\n row_insights = pd.concat([row_insights, shap_values_df], axis='columns')\n row_insights['shap_base_response'] = base_response\n row_insights['shap_final_response'] = predictions\n\n return row_insights, global_insights\n", "path": "lightwood/analysis/helpers/shap.py"}]} | 1,807 | 653 |
gh_patches_debug_44330 | rasdani/github-patches | git_diff | litestar-org__litestar-1678 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/datastructures/state.py`
Content:
```
1 from __future__ import annotations
2
3 from copy import copy, deepcopy
4 from threading import RLock
5 from typing import Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping
6
7 __all__ = ("ImmutableState", "State")
8
9
10 class ImmutableState(Mapping[str, Any]):
11 """An object meant to store arbitrary state.
12
13 It can be accessed using dot notation while exposing dict like functionalities.
14 """
15
16 __slots__ = ("_state",)
17
18 _state: dict[str, Any]
19
20 def __init__(
21 self, state: ImmutableState | Mapping[str, Any] | Iterable[tuple[str, Any]], deep_copy: bool = True
22 ) -> None:
23 """Initialize an ``ImmutableState`` instance.
24
25 Args:
26 state: An object to initialize the state from. Can be a dict, an instance of :class:`ImmutableState`, or a tuple
27 of key value paris.
28 deep_copy: Whether to 'deepcopy' the passed in state.
29
30 Examples:
31 .. code-block: python
32
33 from litestar.datastructures import ImmutableState
34
35 state_dict = {"first": 1, "second": 2, "third": 3, "fourth": 4}
36 state = ImmutableState(state_dict)
37
38 # state implements the Mapping type:
39 assert len(state) == 3
40 assert "first" in state
41 assert not "fourth" in state
42 assert state["first"] == 1
43 assert [(k, v) for k, v in state.items()] == [("first", 1), ("second", 2), ("third", 3)]
44
45 # state implements __bool__
46 assert state # state is true when it has values.
47 assert not State() # state is empty when it has no values.
48
49 # it has a 'dict' method to retrieve a shallow copy of the underlying dict
50 inner_dict = state.dict()
51 assert inner_dict == state_dict
52
53 # you can also retrieve a mutable State by calling 'mutable_copy'
54 mutable_state = state.mutable_copy()
55 del state["first"]
56 assert "first" not in state
57
58 """
59
60 if isinstance(state, ImmutableState):
61 state = state._state
62
63 if not isinstance(state, dict) and isinstance(state, Iterable):
64 state = dict(state)
65
66 super().__setattr__("_state", deepcopy(state) if deep_copy else state)
67
68 def __bool__(self) -> bool:
69 """Return a boolean indicating whether the wrapped dict instance has values."""
70 return bool(self._state)
71
72 def __getitem__(self, key: str) -> Any:
73 """Get the value for the corresponding key from the wrapped state object using subscription notation.
74
75 Args:
76 key: Key to access.
77
78 Raises:
79 KeyError
80
81 Returns:
82 A value from the wrapped state instance.
83 """
84 return self._state[key]
85
86 def __iter__(self) -> Iterator[str]:
87 """Return an iterator iterating the wrapped state dict.
88
89 Returns:
90 An iterator of strings
91 """
92 return iter(self._state)
93
94 def __len__(self) -> int:
95 """Return length of the wrapped state dict.
96
97 Returns:
98 An integer
99 """
100 return len(self._state)
101
102 def __getattr__(self, key: str) -> Any:
103 """Get the value for the corresponding key from the wrapped state object using attribute notation.
104
105 Args:
106 key: Key to retrieve
107
108 Raises:
109 AttributeError: if the given attribute is not set.
110
111 Returns:
112 The retrieved value
113 """
114 try:
115 return self._state[key]
116 except KeyError as e:
117 raise AttributeError from e
118
119 def __copy__(self) -> ImmutableState:
120 """Return a shallow copy of the given state object.
121
122 Customizes how the builtin "copy" function will work.
123 """
124 return self.__class__(deepcopy(self._state))
125
126 def mutable_copy(self) -> State:
127 """Return a mutable copy of the state object.
128
129 Returns:
130 A ``State``
131 """
132 return State(self._state)
133
134 def dict(self) -> dict[str, Any]:
135 """Return a shallow copy of the wrapped dict.
136
137 Returns:
138 A dict
139 """
140 return copy(self._state)
141
142 @classmethod
143 def __get_validators__(
144 cls,
145 ) -> Generator[Callable[[ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]], ImmutableState], None, None]: # type: ignore[valid-type]
146 """Pydantic compatible method to allow custom parsing of state instances in a SignatureModel."""
147 yield cls.validate
148
149 @classmethod
150 def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> ImmutableState: # type: ignore[valid-type]
151 """Parse a value and instantiate state inside a SignatureModel. This allows us to use custom subclasses of
152 state, as well as allows users to decide whether state is mutable or immutable.
153
154 Args:
155 value: The value from which to initialize the state instance.
156
157 Returns:
158 An ImmutableState instance
159 """
160 return cls(value)
161
162
163 class State(ImmutableState, MutableMapping[str, Any]):
164 """An object meant to store arbitrary state.
165
166 It can be accessed using dot notation while exposing dict like functionalities.
167 """
168
169 __slots__ = ("_lock",)
170
171 _lock: RLock
172
173 def __init__(
174 self,
175 state: ImmutableState | Mapping[str, Any] | Iterable[tuple[str, Any]] | None = None,
176 deep_copy: bool = False,
177 ) -> None:
178 """Initialize a ``State`` instance with an optional value.
179
180 Args:
181 state: An object to initialize the state from. Can be a dict, an instance of 'ImmutableState', or a tuple of key value paris.
182 deep_copy: Whether to 'deepcopy' the passed in state.
183
184 Examples:
185 .. code-block: python
186
187 from litestar.datastructures import State
188
189 state_dict = {"first": 1, "second": 2, "third": 3, "fourth": 4}
190 state = State(state_dict)
191
192 # state can be accessed using '.' notation
193 assert state.fourth == 4
194 del state.fourth
195
196 # state implements the Mapping type:
197 assert len(state) == 3
198 assert "first" in state
199 assert not "fourth" in state
200 assert state["first"] == 1
201 assert [(k, v) for k, v in state.items()] == [("first", 1), ("second", 2), ("third", 3)]
202
203 state["fourth"] = 4
204 assert "fourth" in state
205 del state["fourth"]
206
207 # state implements __bool__
208 assert state # state is true when it has values.
209 assert not State() # state is empty when it has no values.
210
211 # it has shallow copy
212 copied_state = state.copy()
213 del copied_state.first
214 assert state.first
215
216 # it has a 'dict' method to retrieve a shallow copy of the underlying dict
217 inner_dict = state.dict()
218 assert inner_dict == state_dict
219
220 # you can get an immutable copy of the state by calling 'immutable_immutable_copy'
221 immutable_copy = state.immutable_copy()
222 del immutable_copy.first # raises AttributeError
223
224 """
225
226 super().__init__(state if state is not None else {}, deep_copy=deep_copy)
227 super().__setattr__("_lock", RLock())
228
229 def __delitem__(self, key: str) -> None:
230 """Delete the value from the key from the wrapped state object using subscription notation.
231
232 Args:
233 key: Key to delete
234
235 Raises:
236 KeyError: if the given attribute is not set.
237
238 Returns:
239 None
240 """
241
242 with self._lock:
243 del self._state[key]
244
245 def __setitem__(self, key: str, value: Any) -> None:
246 """Set an item in the state using subscription notation.
247
248 Args:
249 key: Key to set.
250 value: Value to set.
251
252 Returns:
253 None
254 """
255
256 with self._lock:
257 self._state[key] = value
258
259 def __setattr__(self, key: str, value: Any) -> None:
260 """Set an item in the state using attribute notation.
261
262 Args:
263 key: Key to set.
264 value: Value to set.
265
266 Returns:
267 None
268 """
269
270 with self._lock:
271 self._state[key] = value
272
273 def __delattr__(self, key: str) -> None:
274 """Delete the value from the key from the wrapped state object using attribute notation.
275
276 Args:
277 key: Key to delete
278
279 Raises:
280 AttributeError: if the given attribute is not set.
281
282 Returns:
283 None
284 """
285
286 try:
287 with self._lock:
288 del self._state[key]
289 except KeyError as e:
290 raise AttributeError from e
291
292 def copy(self) -> State:
293 """Return a shallow copy of the state object.
294
295 Returns:
296 A ``State``
297 """
298 return self.__class__(self.dict())
299
300 def immutable_copy(self) -> ImmutableState:
301 """Return a shallow copy of the state object, setting it to be frozen.
302
303 Returns:
304 A ``State``
305 """
306 return ImmutableState(self)
307
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/datastructures/state.py b/litestar/datastructures/state.py
--- a/litestar/datastructures/state.py
+++ b/litestar/datastructures/state.py
@@ -2,7 +2,10 @@
from copy import copy, deepcopy
from threading import RLock
-from typing import Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping
+from typing import TYPE_CHECKING, Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping
+
+if TYPE_CHECKING:
+ from typing_extensions import Self
__all__ = ("ImmutableState", "State")
@@ -13,7 +16,10 @@
It can be accessed using dot notation while exposing dict like functionalities.
"""
- __slots__ = ("_state",)
+ __slots__ = (
+ "_state",
+ "_deep_copy",
+ )
_state: dict[str, Any]
@@ -56,13 +62,13 @@
assert "first" not in state
"""
-
if isinstance(state, ImmutableState):
state = state._state
if not isinstance(state, dict) and isinstance(state, Iterable):
state = dict(state)
+ super().__setattr__("_deep_copy", deep_copy)
super().__setattr__("_state", deepcopy(state) if deep_copy else state)
def __bool__(self) -> bool:
@@ -116,12 +122,12 @@
except KeyError as e:
raise AttributeError from e
- def __copy__(self) -> ImmutableState:
+ def __copy__(self) -> Self:
"""Return a shallow copy of the given state object.
Customizes how the builtin "copy" function will work.
"""
- return self.__class__(deepcopy(self._state))
+ return self.__class__(self._state, deep_copy=self._deep_copy)
def mutable_copy(self) -> State:
"""Return a mutable copy of the state object.
@@ -129,7 +135,7 @@
Returns:
A ``State``
"""
- return State(self._state)
+ return State(self._state, deep_copy=self._deep_copy)
def dict(self) -> dict[str, Any]:
"""Return a shallow copy of the wrapped dict.
@@ -147,7 +153,7 @@
yield cls.validate
@classmethod
- def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> ImmutableState: # type: ignore[valid-type]
+ def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> Self: # type: ignore[valid-type]
"""Parse a value and instantiate state inside a SignatureModel. This allows us to use custom subclasses of
state, as well as allows users to decide whether state is mutable or immutable.
@@ -157,7 +163,8 @@
Returns:
An ImmutableState instance
"""
- return cls(value)
+ deep_copy = value._deep_copy if isinstance(value, ImmutableState) else False
+ return cls(value, deep_copy=deep_copy)
class State(ImmutableState, MutableMapping[str, Any]):
@@ -289,13 +296,13 @@
except KeyError as e:
raise AttributeError from e
- def copy(self) -> State:
+ def copy(self) -> Self:
"""Return a shallow copy of the state object.
Returns:
A ``State``
"""
- return self.__class__(self.dict())
+ return self.__class__(self.dict(), deep_copy=self._deep_copy)
def immutable_copy(self) -> ImmutableState:
"""Return a shallow copy of the state object, setting it to be frozen.
@@ -303,4 +310,4 @@
Returns:
A ``State``
"""
- return ImmutableState(self)
+ return ImmutableState(self, deep_copy=self._deep_copy)
| {"golden_diff": "diff --git a/litestar/datastructures/state.py b/litestar/datastructures/state.py\n--- a/litestar/datastructures/state.py\n+++ b/litestar/datastructures/state.py\n@@ -2,7 +2,10 @@\n \n from copy import copy, deepcopy\n from threading import RLock\n-from typing import Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping\n+from typing import TYPE_CHECKING, Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping\n+\n+if TYPE_CHECKING:\n+ from typing_extensions import Self\n \n __all__ = (\"ImmutableState\", \"State\")\n \n@@ -13,7 +16,10 @@\n It can be accessed using dot notation while exposing dict like functionalities.\n \"\"\"\n \n- __slots__ = (\"_state\",)\n+ __slots__ = (\n+ \"_state\",\n+ \"_deep_copy\",\n+ )\n \n _state: dict[str, Any]\n \n@@ -56,13 +62,13 @@\n assert \"first\" not in state\n \n \"\"\"\n-\n if isinstance(state, ImmutableState):\n state = state._state\n \n if not isinstance(state, dict) and isinstance(state, Iterable):\n state = dict(state)\n \n+ super().__setattr__(\"_deep_copy\", deep_copy)\n super().__setattr__(\"_state\", deepcopy(state) if deep_copy else state)\n \n def __bool__(self) -> bool:\n@@ -116,12 +122,12 @@\n except KeyError as e:\n raise AttributeError from e\n \n- def __copy__(self) -> ImmutableState:\n+ def __copy__(self) -> Self:\n \"\"\"Return a shallow copy of the given state object.\n \n Customizes how the builtin \"copy\" function will work.\n \"\"\"\n- return self.__class__(deepcopy(self._state))\n+ return self.__class__(self._state, deep_copy=self._deep_copy)\n \n def mutable_copy(self) -> State:\n \"\"\"Return a mutable copy of the state object.\n@@ -129,7 +135,7 @@\n Returns:\n A ``State``\n \"\"\"\n- return State(self._state)\n+ return State(self._state, deep_copy=self._deep_copy)\n \n def dict(self) -> dict[str, Any]:\n \"\"\"Return a shallow copy of the wrapped dict.\n@@ -147,7 +153,7 @@\n yield cls.validate\n \n @classmethod\n- def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> ImmutableState: # type: ignore[valid-type]\n+ def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> Self: # type: ignore[valid-type]\n \"\"\"Parse a value and instantiate state inside a SignatureModel. This allows us to use custom subclasses of\n state, as well as allows users to decide whether state is mutable or immutable.\n \n@@ -157,7 +163,8 @@\n Returns:\n An ImmutableState instance\n \"\"\"\n- return cls(value)\n+ deep_copy = value._deep_copy if isinstance(value, ImmutableState) else False\n+ return cls(value, deep_copy=deep_copy)\n \n \n class State(ImmutableState, MutableMapping[str, Any]):\n@@ -289,13 +296,13 @@\n except KeyError as e:\n raise AttributeError from e\n \n- def copy(self) -> State:\n+ def copy(self) -> Self:\n \"\"\"Return a shallow copy of the state object.\n \n Returns:\n A ``State``\n \"\"\"\n- return self.__class__(self.dict())\n+ return self.__class__(self.dict(), deep_copy=self._deep_copy)\n \n def immutable_copy(self) -> ImmutableState:\n \"\"\"Return a shallow copy of the state object, setting it to be frozen.\n@@ -303,4 +310,4 @@\n Returns:\n A ``State``\n \"\"\"\n- return ImmutableState(self)\n+ return ImmutableState(self, deep_copy=self._deep_copy)\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom copy import copy, deepcopy\nfrom threading import RLock\nfrom typing import Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping\n\n__all__ = (\"ImmutableState\", \"State\")\n\n\nclass ImmutableState(Mapping[str, Any]):\n \"\"\"An object meant to store arbitrary state.\n\n It can be accessed using dot notation while exposing dict like functionalities.\n \"\"\"\n\n __slots__ = (\"_state\",)\n\n _state: dict[str, Any]\n\n def __init__(\n self, state: ImmutableState | Mapping[str, Any] | Iterable[tuple[str, Any]], deep_copy: bool = True\n ) -> None:\n \"\"\"Initialize an ``ImmutableState`` instance.\n\n Args:\n state: An object to initialize the state from. Can be a dict, an instance of :class:`ImmutableState`, or a tuple\n of key value paris.\n deep_copy: Whether to 'deepcopy' the passed in state.\n\n Examples:\n .. code-block: python\n\n from litestar.datastructures import ImmutableState\n\n state_dict = {\"first\": 1, \"second\": 2, \"third\": 3, \"fourth\": 4}\n state = ImmutableState(state_dict)\n\n # state implements the Mapping type:\n assert len(state) == 3\n assert \"first\" in state\n assert not \"fourth\" in state\n assert state[\"first\"] == 1\n assert [(k, v) for k, v in state.items()] == [(\"first\", 1), (\"second\", 2), (\"third\", 3)]\n\n # state implements __bool__\n assert state # state is true when it has values.\n assert not State() # state is empty when it has no values.\n\n # it has a 'dict' method to retrieve a shallow copy of the underlying dict\n inner_dict = state.dict()\n assert inner_dict == state_dict\n\n # you can also retrieve a mutable State by calling 'mutable_copy'\n mutable_state = state.mutable_copy()\n del state[\"first\"]\n assert \"first\" not in state\n\n \"\"\"\n\n if isinstance(state, ImmutableState):\n state = state._state\n\n if not isinstance(state, dict) and isinstance(state, Iterable):\n state = dict(state)\n\n super().__setattr__(\"_state\", deepcopy(state) if deep_copy else state)\n\n def __bool__(self) -> bool:\n \"\"\"Return a boolean indicating whether the wrapped dict instance has values.\"\"\"\n return bool(self._state)\n\n def __getitem__(self, key: str) -> Any:\n \"\"\"Get the value for the corresponding key from the wrapped state object using subscription notation.\n\n Args:\n key: Key to access.\n\n Raises:\n KeyError\n\n Returns:\n A value from the wrapped state instance.\n \"\"\"\n return self._state[key]\n\n def __iter__(self) -> Iterator[str]:\n \"\"\"Return an iterator iterating the wrapped state dict.\n\n Returns:\n An iterator of strings\n \"\"\"\n return iter(self._state)\n\n def __len__(self) -> int:\n \"\"\"Return length of the wrapped state dict.\n\n Returns:\n An integer\n \"\"\"\n return len(self._state)\n\n def __getattr__(self, key: str) -> Any:\n \"\"\"Get the value for the corresponding key from the wrapped state object using attribute notation.\n\n Args:\n key: Key to retrieve\n\n Raises:\n AttributeError: if the given attribute is not set.\n\n Returns:\n The retrieved value\n \"\"\"\n try:\n return self._state[key]\n except KeyError as e:\n raise AttributeError from e\n\n def __copy__(self) -> ImmutableState:\n \"\"\"Return a shallow copy of the given state object.\n\n Customizes how the builtin \"copy\" function will work.\n \"\"\"\n return self.__class__(deepcopy(self._state))\n\n def mutable_copy(self) -> State:\n \"\"\"Return a mutable copy of the state object.\n\n Returns:\n A ``State``\n \"\"\"\n return State(self._state)\n\n def dict(self) -> dict[str, Any]:\n \"\"\"Return a shallow copy of the wrapped dict.\n\n Returns:\n A dict\n \"\"\"\n return copy(self._state)\n\n @classmethod\n def __get_validators__(\n cls,\n ) -> Generator[Callable[[ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]], ImmutableState], None, None]: # type: ignore[valid-type]\n \"\"\"Pydantic compatible method to allow custom parsing of state instances in a SignatureModel.\"\"\"\n yield cls.validate\n\n @classmethod\n def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> ImmutableState: # type: ignore[valid-type]\n \"\"\"Parse a value and instantiate state inside a SignatureModel. This allows us to use custom subclasses of\n state, as well as allows users to decide whether state is mutable or immutable.\n\n Args:\n value: The value from which to initialize the state instance.\n\n Returns:\n An ImmutableState instance\n \"\"\"\n return cls(value)\n\n\nclass State(ImmutableState, MutableMapping[str, Any]):\n \"\"\"An object meant to store arbitrary state.\n\n It can be accessed using dot notation while exposing dict like functionalities.\n \"\"\"\n\n __slots__ = (\"_lock\",)\n\n _lock: RLock\n\n def __init__(\n self,\n state: ImmutableState | Mapping[str, Any] | Iterable[tuple[str, Any]] | None = None,\n deep_copy: bool = False,\n ) -> None:\n \"\"\"Initialize a ``State`` instance with an optional value.\n\n Args:\n state: An object to initialize the state from. Can be a dict, an instance of 'ImmutableState', or a tuple of key value paris.\n deep_copy: Whether to 'deepcopy' the passed in state.\n\n Examples:\n .. code-block: python\n\n from litestar.datastructures import State\n\n state_dict = {\"first\": 1, \"second\": 2, \"third\": 3, \"fourth\": 4}\n state = State(state_dict)\n\n # state can be accessed using '.' notation\n assert state.fourth == 4\n del state.fourth\n\n # state implements the Mapping type:\n assert len(state) == 3\n assert \"first\" in state\n assert not \"fourth\" in state\n assert state[\"first\"] == 1\n assert [(k, v) for k, v in state.items()] == [(\"first\", 1), (\"second\", 2), (\"third\", 3)]\n\n state[\"fourth\"] = 4\n assert \"fourth\" in state\n del state[\"fourth\"]\n\n # state implements __bool__\n assert state # state is true when it has values.\n assert not State() # state is empty when it has no values.\n\n # it has shallow copy\n copied_state = state.copy()\n del copied_state.first\n assert state.first\n\n # it has a 'dict' method to retrieve a shallow copy of the underlying dict\n inner_dict = state.dict()\n assert inner_dict == state_dict\n\n # you can get an immutable copy of the state by calling 'immutable_immutable_copy'\n immutable_copy = state.immutable_copy()\n del immutable_copy.first # raises AttributeError\n\n \"\"\"\n\n super().__init__(state if state is not None else {}, deep_copy=deep_copy)\n super().__setattr__(\"_lock\", RLock())\n\n def __delitem__(self, key: str) -> None:\n \"\"\"Delete the value from the key from the wrapped state object using subscription notation.\n\n Args:\n key: Key to delete\n\n Raises:\n KeyError: if the given attribute is not set.\n\n Returns:\n None\n \"\"\"\n\n with self._lock:\n del self._state[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n \"\"\"Set an item in the state using subscription notation.\n\n Args:\n key: Key to set.\n value: Value to set.\n\n Returns:\n None\n \"\"\"\n\n with self._lock:\n self._state[key] = value\n\n def __setattr__(self, key: str, value: Any) -> None:\n \"\"\"Set an item in the state using attribute notation.\n\n Args:\n key: Key to set.\n value: Value to set.\n\n Returns:\n None\n \"\"\"\n\n with self._lock:\n self._state[key] = value\n\n def __delattr__(self, key: str) -> None:\n \"\"\"Delete the value from the key from the wrapped state object using attribute notation.\n\n Args:\n key: Key to delete\n\n Raises:\n AttributeError: if the given attribute is not set.\n\n Returns:\n None\n \"\"\"\n\n try:\n with self._lock:\n del self._state[key]\n except KeyError as e:\n raise AttributeError from e\n\n def copy(self) -> State:\n \"\"\"Return a shallow copy of the state object.\n\n Returns:\n A ``State``\n \"\"\"\n return self.__class__(self.dict())\n\n def immutable_copy(self) -> ImmutableState:\n \"\"\"Return a shallow copy of the state object, setting it to be frozen.\n\n Returns:\n A ``State``\n \"\"\"\n return ImmutableState(self)\n", "path": "litestar/datastructures/state.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom copy import copy, deepcopy\nfrom threading import RLock\nfrom typing import TYPE_CHECKING, Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping\n\nif TYPE_CHECKING:\n from typing_extensions import Self\n\n__all__ = (\"ImmutableState\", \"State\")\n\n\nclass ImmutableState(Mapping[str, Any]):\n \"\"\"An object meant to store arbitrary state.\n\n It can be accessed using dot notation while exposing dict like functionalities.\n \"\"\"\n\n __slots__ = (\n \"_state\",\n \"_deep_copy\",\n )\n\n _state: dict[str, Any]\n\n def __init__(\n self, state: ImmutableState | Mapping[str, Any] | Iterable[tuple[str, Any]], deep_copy: bool = True\n ) -> None:\n \"\"\"Initialize an ``ImmutableState`` instance.\n\n Args:\n state: An object to initialize the state from. Can be a dict, an instance of :class:`ImmutableState`, or a tuple\n of key value paris.\n deep_copy: Whether to 'deepcopy' the passed in state.\n\n Examples:\n .. code-block: python\n\n from litestar.datastructures import ImmutableState\n\n state_dict = {\"first\": 1, \"second\": 2, \"third\": 3, \"fourth\": 4}\n state = ImmutableState(state_dict)\n\n # state implements the Mapping type:\n assert len(state) == 3\n assert \"first\" in state\n assert not \"fourth\" in state\n assert state[\"first\"] == 1\n assert [(k, v) for k, v in state.items()] == [(\"first\", 1), (\"second\", 2), (\"third\", 3)]\n\n # state implements __bool__\n assert state # state is true when it has values.\n assert not State() # state is empty when it has no values.\n\n # it has a 'dict' method to retrieve a shallow copy of the underlying dict\n inner_dict = state.dict()\n assert inner_dict == state_dict\n\n # you can also retrieve a mutable State by calling 'mutable_copy'\n mutable_state = state.mutable_copy()\n del state[\"first\"]\n assert \"first\" not in state\n\n \"\"\"\n if isinstance(state, ImmutableState):\n state = state._state\n\n if not isinstance(state, dict) and isinstance(state, Iterable):\n state = dict(state)\n\n super().__setattr__(\"_deep_copy\", deep_copy)\n super().__setattr__(\"_state\", deepcopy(state) if deep_copy else state)\n\n def __bool__(self) -> bool:\n \"\"\"Return a boolean indicating whether the wrapped dict instance has values.\"\"\"\n return bool(self._state)\n\n def __getitem__(self, key: str) -> Any:\n \"\"\"Get the value for the corresponding key from the wrapped state object using subscription notation.\n\n Args:\n key: Key to access.\n\n Raises:\n KeyError\n\n Returns:\n A value from the wrapped state instance.\n \"\"\"\n return self._state[key]\n\n def __iter__(self) -> Iterator[str]:\n \"\"\"Return an iterator iterating the wrapped state dict.\n\n Returns:\n An iterator of strings\n \"\"\"\n return iter(self._state)\n\n def __len__(self) -> int:\n \"\"\"Return length of the wrapped state dict.\n\n Returns:\n An integer\n \"\"\"\n return len(self._state)\n\n def __getattr__(self, key: str) -> Any:\n \"\"\"Get the value for the corresponding key from the wrapped state object using attribute notation.\n\n Args:\n key: Key to retrieve\n\n Raises:\n AttributeError: if the given attribute is not set.\n\n Returns:\n The retrieved value\n \"\"\"\n try:\n return self._state[key]\n except KeyError as e:\n raise AttributeError from e\n\n def __copy__(self) -> Self:\n \"\"\"Return a shallow copy of the given state object.\n\n Customizes how the builtin \"copy\" function will work.\n \"\"\"\n return self.__class__(self._state, deep_copy=self._deep_copy)\n\n def mutable_copy(self) -> State:\n \"\"\"Return a mutable copy of the state object.\n\n Returns:\n A ``State``\n \"\"\"\n return State(self._state, deep_copy=self._deep_copy)\n\n def dict(self) -> dict[str, Any]:\n \"\"\"Return a shallow copy of the wrapped dict.\n\n Returns:\n A dict\n \"\"\"\n return copy(self._state)\n\n @classmethod\n def __get_validators__(\n cls,\n ) -> Generator[Callable[[ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]], ImmutableState], None, None]: # type: ignore[valid-type]\n \"\"\"Pydantic compatible method to allow custom parsing of state instances in a SignatureModel.\"\"\"\n yield cls.validate\n\n @classmethod\n def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> Self: # type: ignore[valid-type]\n \"\"\"Parse a value and instantiate state inside a SignatureModel. This allows us to use custom subclasses of\n state, as well as allows users to decide whether state is mutable or immutable.\n\n Args:\n value: The value from which to initialize the state instance.\n\n Returns:\n An ImmutableState instance\n \"\"\"\n deep_copy = value._deep_copy if isinstance(value, ImmutableState) else False\n return cls(value, deep_copy=deep_copy)\n\n\nclass State(ImmutableState, MutableMapping[str, Any]):\n \"\"\"An object meant to store arbitrary state.\n\n It can be accessed using dot notation while exposing dict like functionalities.\n \"\"\"\n\n __slots__ = (\"_lock\",)\n\n _lock: RLock\n\n def __init__(\n self,\n state: ImmutableState | Mapping[str, Any] | Iterable[tuple[str, Any]] | None = None,\n deep_copy: bool = False,\n ) -> None:\n \"\"\"Initialize a ``State`` instance with an optional value.\n\n Args:\n state: An object to initialize the state from. Can be a dict, an instance of 'ImmutableState', or a tuple of key value paris.\n deep_copy: Whether to 'deepcopy' the passed in state.\n\n Examples:\n .. code-block: python\n\n from litestar.datastructures import State\n\n state_dict = {\"first\": 1, \"second\": 2, \"third\": 3, \"fourth\": 4}\n state = State(state_dict)\n\n # state can be accessed using '.' notation\n assert state.fourth == 4\n del state.fourth\n\n # state implements the Mapping type:\n assert len(state) == 3\n assert \"first\" in state\n assert not \"fourth\" in state\n assert state[\"first\"] == 1\n assert [(k, v) for k, v in state.items()] == [(\"first\", 1), (\"second\", 2), (\"third\", 3)]\n\n state[\"fourth\"] = 4\n assert \"fourth\" in state\n del state[\"fourth\"]\n\n # state implements __bool__\n assert state # state is true when it has values.\n assert not State() # state is empty when it has no values.\n\n # it has shallow copy\n copied_state = state.copy()\n del copied_state.first\n assert state.first\n\n # it has a 'dict' method to retrieve a shallow copy of the underlying dict\n inner_dict = state.dict()\n assert inner_dict == state_dict\n\n # you can get an immutable copy of the state by calling 'immutable_immutable_copy'\n immutable_copy = state.immutable_copy()\n del immutable_copy.first # raises AttributeError\n\n \"\"\"\n\n super().__init__(state if state is not None else {}, deep_copy=deep_copy)\n super().__setattr__(\"_lock\", RLock())\n\n def __delitem__(self, key: str) -> None:\n \"\"\"Delete the value from the key from the wrapped state object using subscription notation.\n\n Args:\n key: Key to delete\n\n Raises:\n KeyError: if the given attribute is not set.\n\n Returns:\n None\n \"\"\"\n\n with self._lock:\n del self._state[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n \"\"\"Set an item in the state using subscription notation.\n\n Args:\n key: Key to set.\n value: Value to set.\n\n Returns:\n None\n \"\"\"\n\n with self._lock:\n self._state[key] = value\n\n def __setattr__(self, key: str, value: Any) -> None:\n \"\"\"Set an item in the state using attribute notation.\n\n Args:\n key: Key to set.\n value: Value to set.\n\n Returns:\n None\n \"\"\"\n\n with self._lock:\n self._state[key] = value\n\n def __delattr__(self, key: str) -> None:\n \"\"\"Delete the value from the key from the wrapped state object using attribute notation.\n\n Args:\n key: Key to delete\n\n Raises:\n AttributeError: if the given attribute is not set.\n\n Returns:\n None\n \"\"\"\n\n try:\n with self._lock:\n del self._state[key]\n except KeyError as e:\n raise AttributeError from e\n\n def copy(self) -> Self:\n \"\"\"Return a shallow copy of the state object.\n\n Returns:\n A ``State``\n \"\"\"\n return self.__class__(self.dict(), deep_copy=self._deep_copy)\n\n def immutable_copy(self) -> ImmutableState:\n \"\"\"Return a shallow copy of the state object, setting it to be frozen.\n\n Returns:\n A ``State``\n \"\"\"\n return ImmutableState(self, deep_copy=self._deep_copy)\n", "path": "litestar/datastructures/state.py"}]} | 3,310 | 889 |
gh_patches_debug_3901 | rasdani/github-patches | git_diff | carpentries__amy-646 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API: don't return todos with unknown start
This breaks the timeline.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/views.py`
Content:
```
1 import datetime
2
3 from django.db.models import Q
4 from rest_framework.generics import ListAPIView
5 from rest_framework.metadata import SimpleMetadata
6 from rest_framework.permissions import (
7 IsAuthenticatedOrReadOnly, IsAuthenticated
8 )
9 from rest_framework.response import Response
10 from rest_framework.reverse import reverse
11 from rest_framework.views import APIView
12
13 from workshops.models import Badge, Airport, Event, TodoItem, Tag
14 from workshops.util import get_members, default_membership_cutoff
15
16 from .serializers import (
17 PersonNameEmailSerializer,
18 ExportBadgesSerializer,
19 ExportInstructorLocationsSerializer,
20 EventSerializer,
21 TodoSerializer,
22 )
23
24
25 class QueryMetadata(SimpleMetadata):
26 """Additionally include info about query parameters."""
27
28 def determine_metadata(self, request, view):
29 data = super().determine_metadata(request, view)
30
31 try:
32 data['query_params'] = view.get_query_params_description()
33 except AttributeError:
34 pass
35
36 return data
37
38
39 class ApiRoot(APIView):
40 def get(self, request, format=None):
41 return Response({
42 'export-badges': reverse('api:export-badges', request=request,
43 format=format),
44 'export-instructors': reverse('api:export-instructors',
45 request=request, format=format),
46 'export-members': reverse('api:export-members', request=request,
47 format=format),
48 'events-published': reverse('api:events-published',
49 request=request, format=format),
50 'user-todos': reverse('api:user-todos',
51 request=request, format=format),
52 })
53
54
55 class ExportBadgesView(ListAPIView):
56 """List all badges and people who have them."""
57 permission_classes = (IsAuthenticatedOrReadOnly, )
58 paginator = None # disable pagination
59
60 queryset = Badge.objects.prefetch_related('person_set')
61 serializer_class = ExportBadgesSerializer
62
63
64 class ExportInstructorLocationsView(ListAPIView):
65 """List all airports and instructors located near them."""
66 permission_classes = (IsAuthenticatedOrReadOnly, )
67 paginator = None # disable pagination
68
69 queryset = Airport.objects.exclude(person=None) \
70 .prefetch_related('person_set')
71 serializer_class = ExportInstructorLocationsSerializer
72
73
74 class ExportMembersView(ListAPIView):
75 """Show everyone who qualifies as an SCF member."""
76 permission_classes = (IsAuthenticatedOrReadOnly, )
77 paginator = None # disable pagination
78
79 serializer_class = PersonNameEmailSerializer
80
81 def get_queryset(self):
82 earliest_default, latest_default = default_membership_cutoff()
83
84 earliest = self.request.query_params.get('earliest', None)
85 if earliest is not None:
86 try:
87 earliest = datetime.datetime.strptime(earliest, '%Y-%m-%d') \
88 .date()
89 except ValueError:
90 earliest = earliest_default
91 else:
92 earliest = earliest_default
93
94 latest = self.request.query_params.get('latest', None)
95 if latest is not None:
96 try:
97 latest = datetime.datetime.strptime(latest, '%Y-%m-%d').date()
98 except ValueError:
99 latest = latest_default
100 else:
101 latest = latest_default
102
103 return get_members(earliest, latest)
104
105 def get_query_params_description(self):
106 return {
107 'earliest': 'Date of earliest workshop someone taught at.'
108 ' Defaults to -2*365 days from current date.',
109 'latest': 'Date of latest workshop someone taught at.'
110 ' Defaults to current date.',
111 }
112
113
114 class PublishedEvents(ListAPIView):
115 """List published events."""
116
117 # only events that have both a starting date and a URL
118 permission_classes = (IsAuthenticatedOrReadOnly, )
119 paginator = None # disable pagination
120
121 serializer_class = EventSerializer
122
123 metadata_class = QueryMetadata
124
125 def get_queryset(self):
126 """Optionally restrict the returned event set to events hosted by
127 specific host or administered by specific admin."""
128 queryset = Event.objects.published_events()
129
130 administrator = self.request.query_params.get('administrator', None)
131 if administrator is not None:
132 queryset = queryset.filter(administrator__pk=administrator)
133
134 host = self.request.query_params.get('host', None)
135 if host is not None:
136 queryset = queryset.filter(host__pk=host)
137
138 tags = self.request.query_params.getlist('tag', None)
139 if tags:
140 tags = Tag.objects.filter(name__in=tags)
141 for tag in tags:
142 queryset = queryset.filter(tags=tag)
143
144 return queryset
145
146 def get_query_params_description(self):
147 return {
148 'administrator': 'ID of the organization responsible for admin '
149 'work on events.',
150 'host': 'ID of the organization hosting the event.',
151 'tag': "Events' tag(s). You can use this parameter multiple "
152 "times.",
153 }
154
155
156 class UserTodoItems(ListAPIView):
157 permission_classes = (IsAuthenticated, )
158 paginator = None
159 serializer_class = TodoSerializer
160
161 def get_queryset(self):
162 """Return current TODOs for currently logged in user."""
163 return TodoItem.objects.user(self.request.user) \
164 .incomplete() \
165 .select_related('event')
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/api/views.py b/api/views.py
--- a/api/views.py
+++ b/api/views.py
@@ -162,4 +162,5 @@
"""Return current TODOs for currently logged in user."""
return TodoItem.objects.user(self.request.user) \
.incomplete() \
+ .exclude(due=None) \
.select_related('event')
| {"golden_diff": "diff --git a/api/views.py b/api/views.py\n--- a/api/views.py\n+++ b/api/views.py\n@@ -162,4 +162,5 @@\n \"\"\"Return current TODOs for currently logged in user.\"\"\"\n return TodoItem.objects.user(self.request.user) \\\n .incomplete() \\\n+ .exclude(due=None) \\\n .select_related('event')\n", "issue": "API: don't return todos with unknown start\nThis breaks the timeline.\n\n", "before_files": [{"content": "import datetime\n\nfrom django.db.models import Q\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.metadata import SimpleMetadata\nfrom rest_framework.permissions import (\n IsAuthenticatedOrReadOnly, IsAuthenticated\n)\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework.views import APIView\n\nfrom workshops.models import Badge, Airport, Event, TodoItem, Tag\nfrom workshops.util import get_members, default_membership_cutoff\n\nfrom .serializers import (\n PersonNameEmailSerializer,\n ExportBadgesSerializer,\n ExportInstructorLocationsSerializer,\n EventSerializer,\n TodoSerializer,\n)\n\n\nclass QueryMetadata(SimpleMetadata):\n \"\"\"Additionally include info about query parameters.\"\"\"\n\n def determine_metadata(self, request, view):\n data = super().determine_metadata(request, view)\n\n try:\n data['query_params'] = view.get_query_params_description()\n except AttributeError:\n pass\n\n return data\n\n\nclass ApiRoot(APIView):\n def get(self, request, format=None):\n return Response({\n 'export-badges': reverse('api:export-badges', request=request,\n format=format),\n 'export-instructors': reverse('api:export-instructors',\n request=request, format=format),\n 'export-members': reverse('api:export-members', request=request,\n format=format),\n 'events-published': reverse('api:events-published',\n request=request, format=format),\n 'user-todos': reverse('api:user-todos',\n request=request, format=format),\n })\n\n\nclass ExportBadgesView(ListAPIView):\n \"\"\"List all badges and people who have them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Badge.objects.prefetch_related('person_set')\n serializer_class = ExportBadgesSerializer\n\n\nclass ExportInstructorLocationsView(ListAPIView):\n \"\"\"List all airports and instructors located near them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Airport.objects.exclude(person=None) \\\n .prefetch_related('person_set')\n serializer_class = ExportInstructorLocationsSerializer\n\n\nclass ExportMembersView(ListAPIView):\n \"\"\"Show everyone who qualifies as an SCF member.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = PersonNameEmailSerializer\n\n def get_queryset(self):\n earliest_default, latest_default = default_membership_cutoff()\n\n earliest = self.request.query_params.get('earliest', None)\n if earliest is not None:\n try:\n earliest = datetime.datetime.strptime(earliest, '%Y-%m-%d') \\\n .date()\n except ValueError:\n earliest = earliest_default\n else:\n earliest = earliest_default\n\n latest = self.request.query_params.get('latest', None)\n if latest is not None:\n try:\n latest = datetime.datetime.strptime(latest, '%Y-%m-%d').date()\n except ValueError:\n latest = latest_default\n else:\n latest = latest_default\n\n return get_members(earliest, latest)\n\n def get_query_params_description(self):\n return {\n 'earliest': 'Date of earliest workshop someone taught at.'\n ' Defaults to -2*365 days from current date.',\n 'latest': 'Date of latest workshop someone taught at.'\n ' Defaults to current date.',\n }\n\n\nclass PublishedEvents(ListAPIView):\n \"\"\"List published events.\"\"\"\n\n # only events that have both a starting date and a URL\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = EventSerializer\n\n metadata_class = QueryMetadata\n\n def get_queryset(self):\n \"\"\"Optionally restrict the returned event set to events hosted by\n specific host or administered by specific admin.\"\"\"\n queryset = Event.objects.published_events()\n\n administrator = self.request.query_params.get('administrator', None)\n if administrator is not None:\n queryset = queryset.filter(administrator__pk=administrator)\n\n host = self.request.query_params.get('host', None)\n if host is not None:\n queryset = queryset.filter(host__pk=host)\n\n tags = self.request.query_params.getlist('tag', None)\n if tags:\n tags = Tag.objects.filter(name__in=tags)\n for tag in tags:\n queryset = queryset.filter(tags=tag)\n\n return queryset\n\n def get_query_params_description(self):\n return {\n 'administrator': 'ID of the organization responsible for admin '\n 'work on events.',\n 'host': 'ID of the organization hosting the event.',\n 'tag': \"Events' tag(s). You can use this parameter multiple \"\n \"times.\",\n }\n\n\nclass UserTodoItems(ListAPIView):\n permission_classes = (IsAuthenticated, )\n paginator = None\n serializer_class = TodoSerializer\n\n def get_queryset(self):\n \"\"\"Return current TODOs for currently logged in user.\"\"\"\n return TodoItem.objects.user(self.request.user) \\\n .incomplete() \\\n .select_related('event')\n", "path": "api/views.py"}], "after_files": [{"content": "import datetime\n\nfrom django.db.models import Q\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.metadata import SimpleMetadata\nfrom rest_framework.permissions import (\n IsAuthenticatedOrReadOnly, IsAuthenticated\n)\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework.views import APIView\n\nfrom workshops.models import Badge, Airport, Event, TodoItem, Tag\nfrom workshops.util import get_members, default_membership_cutoff\n\nfrom .serializers import (\n PersonNameEmailSerializer,\n ExportBadgesSerializer,\n ExportInstructorLocationsSerializer,\n EventSerializer,\n TodoSerializer,\n)\n\n\nclass QueryMetadata(SimpleMetadata):\n \"\"\"Additionally include info about query parameters.\"\"\"\n\n def determine_metadata(self, request, view):\n data = super().determine_metadata(request, view)\n\n try:\n data['query_params'] = view.get_query_params_description()\n except AttributeError:\n pass\n\n return data\n\n\nclass ApiRoot(APIView):\n def get(self, request, format=None):\n return Response({\n 'export-badges': reverse('api:export-badges', request=request,\n format=format),\n 'export-instructors': reverse('api:export-instructors',\n request=request, format=format),\n 'export-members': reverse('api:export-members', request=request,\n format=format),\n 'events-published': reverse('api:events-published',\n request=request, format=format),\n 'user-todos': reverse('api:user-todos',\n request=request, format=format),\n })\n\n\nclass ExportBadgesView(ListAPIView):\n \"\"\"List all badges and people who have them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Badge.objects.prefetch_related('person_set')\n serializer_class = ExportBadgesSerializer\n\n\nclass ExportInstructorLocationsView(ListAPIView):\n \"\"\"List all airports and instructors located near them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Airport.objects.exclude(person=None) \\\n .prefetch_related('person_set')\n serializer_class = ExportInstructorLocationsSerializer\n\n\nclass ExportMembersView(ListAPIView):\n \"\"\"Show everyone who qualifies as an SCF member.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = PersonNameEmailSerializer\n\n def get_queryset(self):\n earliest_default, latest_default = default_membership_cutoff()\n\n earliest = self.request.query_params.get('earliest', None)\n if earliest is not None:\n try:\n earliest = datetime.datetime.strptime(earliest, '%Y-%m-%d') \\\n .date()\n except ValueError:\n earliest = earliest_default\n else:\n earliest = earliest_default\n\n latest = self.request.query_params.get('latest', None)\n if latest is not None:\n try:\n latest = datetime.datetime.strptime(latest, '%Y-%m-%d').date()\n except ValueError:\n latest = latest_default\n else:\n latest = latest_default\n\n return get_members(earliest, latest)\n\n def get_query_params_description(self):\n return {\n 'earliest': 'Date of earliest workshop someone taught at.'\n ' Defaults to -2*365 days from current date.',\n 'latest': 'Date of latest workshop someone taught at.'\n ' Defaults to current date.',\n }\n\n\nclass PublishedEvents(ListAPIView):\n \"\"\"List published events.\"\"\"\n\n # only events that have both a starting date and a URL\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = EventSerializer\n\n metadata_class = QueryMetadata\n\n def get_queryset(self):\n \"\"\"Optionally restrict the returned event set to events hosted by\n specific host or administered by specific admin.\"\"\"\n queryset = Event.objects.published_events()\n\n administrator = self.request.query_params.get('administrator', None)\n if administrator is not None:\n queryset = queryset.filter(administrator__pk=administrator)\n\n host = self.request.query_params.get('host', None)\n if host is not None:\n queryset = queryset.filter(host__pk=host)\n\n tags = self.request.query_params.getlist('tag', None)\n if tags:\n tags = Tag.objects.filter(name__in=tags)\n for tag in tags:\n queryset = queryset.filter(tags=tag)\n\n return queryset\n\n def get_query_params_description(self):\n return {\n 'administrator': 'ID of the organization responsible for admin '\n 'work on events.',\n 'host': 'ID of the organization hosting the event.',\n 'tag': \"Events' tag(s). You can use this parameter multiple \"\n \"times.\",\n }\n\n\nclass UserTodoItems(ListAPIView):\n permission_classes = (IsAuthenticated, )\n paginator = None\n serializer_class = TodoSerializer\n\n def get_queryset(self):\n \"\"\"Return current TODOs for currently logged in user.\"\"\"\n return TodoItem.objects.user(self.request.user) \\\n .incomplete() \\\n .exclude(due=None) \\\n .select_related('event')\n", "path": "api/views.py"}]} | 1,754 | 83 |
gh_patches_debug_9431 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1272 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`pwndbg.gdblib.symbol.address` fail to handle the thread-local variables
<img width="1289" alt="截圖 2022-10-11 下午4 46 56" src="https://user-images.githubusercontent.com/61896187/195043471-185d98a1-0df4-43af-ba68-54f8e0853e31.png">
`pwndbg.gdblib.symbol.address('tcache')` will get error instead of return `None`.
Seems like it's because the error starts with `Cannot find thread-local storage` instead of `Cannot find thread-local variables`, so the following line fails to skip it:
https://github.com/pwndbg/pwndbg/blob/478a569cb3bef11f7011cd0a2f374d53ce5997fd/pwndbg/gdblib/symbol.py#L214-L217
cc: @gsingh93
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/gdblib/symbol.py`
Content:
```
1 """
2 Looking up addresses for function names / symbols, and
3 vice-versa.
4
5 Uses IDA when available if there isn't sufficient symbol
6 information available.
7 """
8 import os
9 import re
10
11 import elftools.common.exceptions
12 import elftools.elf.constants
13 import elftools.elf.elffile
14 import elftools.elf.segments
15 import gdb
16
17 import pwndbg.gdblib.android
18 import pwndbg.gdblib.arch
19 import pwndbg.gdblib.elf
20 import pwndbg.gdblib.events
21 import pwndbg.gdblib.file
22 import pwndbg.gdblib.memory
23 import pwndbg.gdblib.qemu
24 import pwndbg.gdblib.remote
25 import pwndbg.gdblib.stack
26 import pwndbg.ida
27 import pwndbg.lib.memoize
28 import pwndbg.vmmap
29
30
31 def _get_debug_file_directory():
32 """
33 Retrieve the debug file directory path.
34
35 The debug file directory path ('show debug-file-directory') is a comma-
36 separated list of directories which GDB will look in to find the binaries
37 currently loaded.
38 """
39 result = gdb.execute("show debug-file-directory", to_string=True, from_tty=False)
40 expr = r'The directory where separate debug symbols are searched for is "(.*)".\n'
41
42 match = re.search(expr, result)
43
44 if match:
45 return match.group(1)
46 return ""
47
48
49 def _set_debug_file_directory(d):
50 gdb.execute("set debug-file-directory %s" % d, to_string=True, from_tty=False)
51
52
53 def _add_debug_file_directory(d):
54 current = _get_debug_file_directory()
55 if current:
56 _set_debug_file_directory("%s:%s" % (current, d))
57 else:
58 _set_debug_file_directory(d)
59
60
61 if "/usr/lib/debug" not in _get_debug_file_directory():
62 _add_debug_file_directory("/usr/lib/debug")
63
64
65 _remote_files = {}
66
67
68 @pwndbg.gdblib.events.exit
69 def _reset_remote_files():
70 global _remote_files
71 _remote_files = {}
72
73
74 @pwndbg.gdblib.events.new_objfile
75 def _autofetch():
76 """ """
77 if not pwndbg.gdblib.remote.is_remote():
78 return
79
80 if pwndbg.gdblib.qemu.is_qemu_usermode():
81 return
82
83 if pwndbg.gdblib.android.is_android():
84 return
85
86 remote_files_dir = pwndbg.gdblib.file.remote_files_dir()
87 if remote_files_dir not in _get_debug_file_directory().split(":"):
88 _add_debug_file_directory(remote_files_dir)
89
90 for mapping in pwndbg.vmmap.get():
91 objfile = mapping.objfile
92
93 # Don't attempt to download things like '[stack]' and '[heap]'
94 if not objfile.startswith("/"):
95 continue
96
97 # Don't re-download things that we have already downloaded
98 if not objfile or objfile in _remote_files:
99 continue
100
101 msg = "Downloading %r from the remote server" % objfile
102 print(msg, end="")
103
104 try:
105 data = pwndbg.gdblib.file.get(objfile)
106 print("\r" + msg + ": OK")
107 except OSError:
108 # The file could not be downloaded :(
109 print("\r" + msg + ": Failed")
110 return
111
112 filename = os.path.basename(objfile)
113 local_path = os.path.join(remote_files_dir, filename)
114
115 with open(local_path, "wb+") as f:
116 f.write(data)
117
118 _remote_files[objfile] = local_path
119
120 base = None
121 for mapping in pwndbg.vmmap.get():
122 if mapping.objfile != objfile:
123 continue
124
125 if base is None or mapping.vaddr < base.vaddr:
126 base = mapping
127
128 if not base:
129 continue
130
131 base = base.vaddr
132
133 try:
134 elf = elftools.elf.elffile.ELFFile(open(local_path, "rb"))
135 except elftools.common.exceptions.ELFError:
136 continue
137
138 gdb_command = ["add-symbol-file", local_path, hex(int(base))]
139 for section in elf.iter_sections():
140 name = section.name # .decode('latin-1')
141 section = section.header
142 if not section.sh_flags & elftools.elf.constants.SH_FLAGS.SHF_ALLOC:
143 continue
144 gdb_command += ["-s", name, hex(int(base + section.sh_addr))]
145
146 print(" ".join(gdb_command))
147 # gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True)
148
149
150 @pwndbg.lib.memoize.reset_on_objfile
151 def get(address: int, gdb_only=False) -> str:
152 """
153 Retrieve the name for the symbol located at `address`
154 """
155 # Fast path
156 if address < pwndbg.gdblib.memory.MMAP_MIN_ADDR or address >= ((1 << 64) - 1):
157 return ""
158
159 # Don't look up stack addresses
160 if pwndbg.gdblib.stack.find(address):
161 return ""
162
163 # This sucks, but there's not a GDB API for this.
164 result = gdb.execute("info symbol %#x" % int(address), to_string=True, from_tty=False)
165
166 if not gdb_only and result.startswith("No symbol"):
167 address = int(address)
168 exe = pwndbg.gdblib.elf.exe()
169 if exe:
170 exe_map = pwndbg.vmmap.find(exe.address)
171 if exe_map and address in exe_map:
172 res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)
173 return res or ""
174
175 # Expected format looks like this:
176 # main in section .text of /bin/bash
177 # main + 3 in section .text of /bin/bash
178 # system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6
179 # No symbol matches system-1.
180 a, b, c, _ = result.split(None, 3)
181
182 if b == "+":
183 return "%s+%s" % (a, c)
184 if b == "in":
185 return a
186
187 return ""
188
189
190 @pwndbg.lib.memoize.reset_on_objfile
191 def address(symbol: str) -> int:
192 """
193 Get the address for `symbol`
194 """
195 try:
196 symbol_obj = gdb.lookup_symbol(symbol)[0]
197 if symbol_obj:
198 return int(symbol_obj.value().address)
199 except gdb.error as e:
200 # Symbol lookup only throws exceptions on errors, not if it failed to
201 # lookup a symbol. We want to raise these errors so we can handle them
202 # properly, but there are some we haven't figured out how to fix yet, so
203 # we ignore those here
204 skipped_exceptions = []
205
206 # This is exception is being thrown by the Go typeinfo tests, we should
207 # investigate why this is happening and see if we can explicitly check
208 # for it with `gdb.selected_frame()`
209 skipped_exceptions.append("No frame selected")
210
211 # If we try to look up a TLS variable when there is no TLS, this
212 # exception occurs. Ideally we should come up with a way to check for
213 # this case before calling `gdb.lookup_symbol`
214 skipped_exceptions.append("Cannot find thread-local variables")
215
216 if all(x not in str(e) for x in skipped_exceptions):
217 raise e
218
219 try:
220 # TODO: We should properly check if we have a connection to the IDA server first
221 address = pwndbg.ida.LocByName(symbol)
222 if address:
223 return address
224 except Exception:
225 pass
226
227
228 @pwndbg.lib.memoize.reset_on_objfile
229 def static_linkage_symbol_address(symbol):
230 if isinstance(symbol, int):
231 return symbol
232
233 try:
234 return int(symbol, 0)
235 except Exception:
236 pass
237
238 try:
239 symbol_obj = gdb.lookup_static_symbol(symbol)
240 return int(symbol_obj.value().address) if symbol_obj else None
241 except gdb.error:
242 return None
243
244
245 @pwndbg.gdblib.events.stop
246 @pwndbg.lib.memoize.reset_on_start
247 def _add_main_exe_to_symbols():
248 if not pwndbg.gdblib.remote.is_remote():
249 return
250
251 if pwndbg.gdblib.android.is_android():
252 return
253
254 exe = pwndbg.gdblib.elf.exe()
255
256 if not exe:
257 return
258
259 addr = exe.address
260
261 if not addr:
262 return
263
264 addr = int(addr)
265
266 mmap = pwndbg.vmmap.find(addr)
267 if not mmap:
268 return
269
270 path = mmap.objfile
271 if path and (pwndbg.gdblib.arch.endian == pwndbg.gdblib.arch.native_endian):
272 try:
273 gdb.execute("add-symbol-file %s" % (path,), from_tty=False, to_string=True)
274 except gdb.error:
275 pass
276
277
278 @pwndbg.lib.memoize.reset_on_stop
279 @pwndbg.lib.memoize.reset_on_start
280 def selected_frame_source_absolute_filename():
281 """
282 Retrieve the symbol table’s source absolute file name from the selected frame.
283
284 In case of missing symbol table or frame information, None is returned.
285 """
286 try:
287 frame = gdb.selected_frame()
288 except gdb.error:
289 return None
290
291 if not frame:
292 return None
293
294 sal = frame.find_sal()
295 if not sal:
296 return None
297
298 symtab = sal.symtab
299 if not symtab:
300 return None
301
302 return symtab.fullname()
303
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/gdblib/symbol.py b/pwndbg/gdblib/symbol.py
--- a/pwndbg/gdblib/symbol.py
+++ b/pwndbg/gdblib/symbol.py
@@ -211,7 +211,7 @@
# If we try to look up a TLS variable when there is no TLS, this
# exception occurs. Ideally we should come up with a way to check for
# this case before calling `gdb.lookup_symbol`
- skipped_exceptions.append("Cannot find thread-local variables")
+ skipped_exceptions.append("Cannot find thread-local")
if all(x not in str(e) for x in skipped_exceptions):
raise e
| {"golden_diff": "diff --git a/pwndbg/gdblib/symbol.py b/pwndbg/gdblib/symbol.py\n--- a/pwndbg/gdblib/symbol.py\n+++ b/pwndbg/gdblib/symbol.py\n@@ -211,7 +211,7 @@\n # If we try to look up a TLS variable when there is no TLS, this\n # exception occurs. Ideally we should come up with a way to check for\n # this case before calling `gdb.lookup_symbol`\n- skipped_exceptions.append(\"Cannot find thread-local variables\")\n+ skipped_exceptions.append(\"Cannot find thread-local\")\n \n if all(x not in str(e) for x in skipped_exceptions):\n raise e\n", "issue": "`pwndbg.gdblib.symbol.address` fail to handle the thread-local variables\n<img width=\"1289\" alt=\"\u622a\u5716 2022-10-11 \u4e0b\u53484 46 56\" src=\"https://user-images.githubusercontent.com/61896187/195043471-185d98a1-0df4-43af-ba68-54f8e0853e31.png\">\r\n\r\n`pwndbg.gdblib.symbol.address('tcache')` will get error instead of return `None`.\r\n\r\nSeems like it's because the error starts with `Cannot find thread-local storage` instead of `Cannot find thread-local variables`, so the following line fails to skip it:\r\n\r\nhttps://github.com/pwndbg/pwndbg/blob/478a569cb3bef11f7011cd0a2f374d53ce5997fd/pwndbg/gdblib/symbol.py#L214-L217\r\n\r\ncc: @gsingh93 \n", "before_files": [{"content": "\"\"\"\nLooking up addresses for function names / symbols, and\nvice-versa.\n\nUses IDA when available if there isn't sufficient symbol\ninformation available.\n\"\"\"\nimport os\nimport re\n\nimport elftools.common.exceptions\nimport elftools.elf.constants\nimport elftools.elf.elffile\nimport elftools.elf.segments\nimport gdb\n\nimport pwndbg.gdblib.android\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.elf\nimport pwndbg.gdblib.events\nimport pwndbg.gdblib.file\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.qemu\nimport pwndbg.gdblib.remote\nimport pwndbg.gdblib.stack\nimport pwndbg.ida\nimport pwndbg.lib.memoize\nimport pwndbg.vmmap\n\n\ndef _get_debug_file_directory():\n \"\"\"\n Retrieve the debug file directory path.\n\n The debug file directory path ('show debug-file-directory') is a comma-\n separated list of directories which GDB will look in to find the binaries\n currently loaded.\n \"\"\"\n result = gdb.execute(\"show debug-file-directory\", to_string=True, from_tty=False)\n expr = r'The directory where separate debug symbols are searched for is \"(.*)\".\\n'\n\n match = re.search(expr, result)\n\n if match:\n return match.group(1)\n return \"\"\n\n\ndef _set_debug_file_directory(d):\n gdb.execute(\"set debug-file-directory %s\" % d, to_string=True, from_tty=False)\n\n\ndef _add_debug_file_directory(d):\n current = _get_debug_file_directory()\n if current:\n _set_debug_file_directory(\"%s:%s\" % (current, d))\n else:\n _set_debug_file_directory(d)\n\n\nif \"/usr/lib/debug\" not in _get_debug_file_directory():\n _add_debug_file_directory(\"/usr/lib/debug\")\n\n\n_remote_files = {}\n\n\[email protected]\ndef _reset_remote_files():\n global _remote_files\n _remote_files = {}\n\n\[email protected]_objfile\ndef _autofetch():\n \"\"\" \"\"\"\n if not pwndbg.gdblib.remote.is_remote():\n return\n\n if pwndbg.gdblib.qemu.is_qemu_usermode():\n return\n\n if pwndbg.gdblib.android.is_android():\n return\n\n remote_files_dir = pwndbg.gdblib.file.remote_files_dir()\n if remote_files_dir not in _get_debug_file_directory().split(\":\"):\n _add_debug_file_directory(remote_files_dir)\n\n for mapping in pwndbg.vmmap.get():\n objfile = mapping.objfile\n\n # Don't attempt to download things like '[stack]' and '[heap]'\n if not objfile.startswith(\"/\"):\n continue\n\n # Don't re-download things that we have already downloaded\n if not objfile or objfile in _remote_files:\n continue\n\n msg = \"Downloading %r from the remote server\" % objfile\n print(msg, end=\"\")\n\n try:\n data = pwndbg.gdblib.file.get(objfile)\n print(\"\\r\" + msg + \": OK\")\n except OSError:\n # The file could not be downloaded :(\n print(\"\\r\" + msg + \": Failed\")\n return\n\n filename = os.path.basename(objfile)\n local_path = os.path.join(remote_files_dir, filename)\n\n with open(local_path, \"wb+\") as f:\n f.write(data)\n\n _remote_files[objfile] = local_path\n\n base = None\n for mapping in pwndbg.vmmap.get():\n if mapping.objfile != objfile:\n continue\n\n if base is None or mapping.vaddr < base.vaddr:\n base = mapping\n\n if not base:\n continue\n\n base = base.vaddr\n\n try:\n elf = elftools.elf.elffile.ELFFile(open(local_path, \"rb\"))\n except elftools.common.exceptions.ELFError:\n continue\n\n gdb_command = [\"add-symbol-file\", local_path, hex(int(base))]\n for section in elf.iter_sections():\n name = section.name # .decode('latin-1')\n section = section.header\n if not section.sh_flags & elftools.elf.constants.SH_FLAGS.SHF_ALLOC:\n continue\n gdb_command += [\"-s\", name, hex(int(base + section.sh_addr))]\n\n print(\" \".join(gdb_command))\n # gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True)\n\n\[email protected]_on_objfile\ndef get(address: int, gdb_only=False) -> str:\n \"\"\"\n Retrieve the name for the symbol located at `address`\n \"\"\"\n # Fast path\n if address < pwndbg.gdblib.memory.MMAP_MIN_ADDR or address >= ((1 << 64) - 1):\n return \"\"\n\n # Don't look up stack addresses\n if pwndbg.gdblib.stack.find(address):\n return \"\"\n\n # This sucks, but there's not a GDB API for this.\n result = gdb.execute(\"info symbol %#x\" % int(address), to_string=True, from_tty=False)\n\n if not gdb_only and result.startswith(\"No symbol\"):\n address = int(address)\n exe = pwndbg.gdblib.elf.exe()\n if exe:\n exe_map = pwndbg.vmmap.find(exe.address)\n if exe_map and address in exe_map:\n res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)\n return res or \"\"\n\n # Expected format looks like this:\n # main in section .text of /bin/bash\n # main + 3 in section .text of /bin/bash\n # system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6\n # No symbol matches system-1.\n a, b, c, _ = result.split(None, 3)\n\n if b == \"+\":\n return \"%s+%s\" % (a, c)\n if b == \"in\":\n return a\n\n return \"\"\n\n\[email protected]_on_objfile\ndef address(symbol: str) -> int:\n \"\"\"\n Get the address for `symbol`\n \"\"\"\n try:\n symbol_obj = gdb.lookup_symbol(symbol)[0]\n if symbol_obj:\n return int(symbol_obj.value().address)\n except gdb.error as e:\n # Symbol lookup only throws exceptions on errors, not if it failed to\n # lookup a symbol. We want to raise these errors so we can handle them\n # properly, but there are some we haven't figured out how to fix yet, so\n # we ignore those here\n skipped_exceptions = []\n\n # This is exception is being thrown by the Go typeinfo tests, we should\n # investigate why this is happening and see if we can explicitly check\n # for it with `gdb.selected_frame()`\n skipped_exceptions.append(\"No frame selected\")\n\n # If we try to look up a TLS variable when there is no TLS, this\n # exception occurs. Ideally we should come up with a way to check for\n # this case before calling `gdb.lookup_symbol`\n skipped_exceptions.append(\"Cannot find thread-local variables\")\n\n if all(x not in str(e) for x in skipped_exceptions):\n raise e\n\n try:\n # TODO: We should properly check if we have a connection to the IDA server first\n address = pwndbg.ida.LocByName(symbol)\n if address:\n return address\n except Exception:\n pass\n\n\[email protected]_on_objfile\ndef static_linkage_symbol_address(symbol):\n if isinstance(symbol, int):\n return symbol\n\n try:\n return int(symbol, 0)\n except Exception:\n pass\n\n try:\n symbol_obj = gdb.lookup_static_symbol(symbol)\n return int(symbol_obj.value().address) if symbol_obj else None\n except gdb.error:\n return None\n\n\[email protected]\[email protected]_on_start\ndef _add_main_exe_to_symbols():\n if not pwndbg.gdblib.remote.is_remote():\n return\n\n if pwndbg.gdblib.android.is_android():\n return\n\n exe = pwndbg.gdblib.elf.exe()\n\n if not exe:\n return\n\n addr = exe.address\n\n if not addr:\n return\n\n addr = int(addr)\n\n mmap = pwndbg.vmmap.find(addr)\n if not mmap:\n return\n\n path = mmap.objfile\n if path and (pwndbg.gdblib.arch.endian == pwndbg.gdblib.arch.native_endian):\n try:\n gdb.execute(\"add-symbol-file %s\" % (path,), from_tty=False, to_string=True)\n except gdb.error:\n pass\n\n\[email protected]_on_stop\[email protected]_on_start\ndef selected_frame_source_absolute_filename():\n \"\"\"\n Retrieve the symbol table\u2019s source absolute file name from the selected frame.\n\n In case of missing symbol table or frame information, None is returned.\n \"\"\"\n try:\n frame = gdb.selected_frame()\n except gdb.error:\n return None\n\n if not frame:\n return None\n\n sal = frame.find_sal()\n if not sal:\n return None\n\n symtab = sal.symtab\n if not symtab:\n return None\n\n return symtab.fullname()\n", "path": "pwndbg/gdblib/symbol.py"}], "after_files": [{"content": "\"\"\"\nLooking up addresses for function names / symbols, and\nvice-versa.\n\nUses IDA when available if there isn't sufficient symbol\ninformation available.\n\"\"\"\nimport os\nimport re\n\nimport elftools.common.exceptions\nimport elftools.elf.constants\nimport elftools.elf.elffile\nimport elftools.elf.segments\nimport gdb\n\nimport pwndbg.gdblib.android\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.elf\nimport pwndbg.gdblib.events\nimport pwndbg.gdblib.file\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.qemu\nimport pwndbg.gdblib.remote\nimport pwndbg.gdblib.stack\nimport pwndbg.ida\nimport pwndbg.lib.memoize\nimport pwndbg.vmmap\n\n\ndef _get_debug_file_directory():\n \"\"\"\n Retrieve the debug file directory path.\n\n The debug file directory path ('show debug-file-directory') is a comma-\n separated list of directories which GDB will look in to find the binaries\n currently loaded.\n \"\"\"\n result = gdb.execute(\"show debug-file-directory\", to_string=True, from_tty=False)\n expr = r'The directory where separate debug symbols are searched for is \"(.*)\".\\n'\n\n match = re.search(expr, result)\n\n if match:\n return match.group(1)\n return \"\"\n\n\ndef _set_debug_file_directory(d):\n gdb.execute(\"set debug-file-directory %s\" % d, to_string=True, from_tty=False)\n\n\ndef _add_debug_file_directory(d):\n current = _get_debug_file_directory()\n if current:\n _set_debug_file_directory(\"%s:%s\" % (current, d))\n else:\n _set_debug_file_directory(d)\n\n\nif \"/usr/lib/debug\" not in _get_debug_file_directory():\n _add_debug_file_directory(\"/usr/lib/debug\")\n\n\n_remote_files = {}\n\n\[email protected]\ndef _reset_remote_files():\n global _remote_files\n _remote_files = {}\n\n\[email protected]_objfile\ndef _autofetch():\n \"\"\" \"\"\"\n if not pwndbg.gdblib.remote.is_remote():\n return\n\n if pwndbg.gdblib.qemu.is_qemu_usermode():\n return\n\n if pwndbg.gdblib.android.is_android():\n return\n\n remote_files_dir = pwndbg.gdblib.file.remote_files_dir()\n if remote_files_dir not in _get_debug_file_directory().split(\":\"):\n _add_debug_file_directory(remote_files_dir)\n\n for mapping in pwndbg.vmmap.get():\n objfile = mapping.objfile\n\n # Don't attempt to download things like '[stack]' and '[heap]'\n if not objfile.startswith(\"/\"):\n continue\n\n # Don't re-download things that we have already downloaded\n if not objfile or objfile in _remote_files:\n continue\n\n msg = \"Downloading %r from the remote server\" % objfile\n print(msg, end=\"\")\n\n try:\n data = pwndbg.gdblib.file.get(objfile)\n print(\"\\r\" + msg + \": OK\")\n except OSError:\n # The file could not be downloaded :(\n print(\"\\r\" + msg + \": Failed\")\n return\n\n filename = os.path.basename(objfile)\n local_path = os.path.join(remote_files_dir, filename)\n\n with open(local_path, \"wb+\") as f:\n f.write(data)\n\n _remote_files[objfile] = local_path\n\n base = None\n for mapping in pwndbg.vmmap.get():\n if mapping.objfile != objfile:\n continue\n\n if base is None or mapping.vaddr < base.vaddr:\n base = mapping\n\n if not base:\n continue\n\n base = base.vaddr\n\n try:\n elf = elftools.elf.elffile.ELFFile(open(local_path, \"rb\"))\n except elftools.common.exceptions.ELFError:\n continue\n\n gdb_command = [\"add-symbol-file\", local_path, hex(int(base))]\n for section in elf.iter_sections():\n name = section.name # .decode('latin-1')\n section = section.header\n if not section.sh_flags & elftools.elf.constants.SH_FLAGS.SHF_ALLOC:\n continue\n gdb_command += [\"-s\", name, hex(int(base + section.sh_addr))]\n\n print(\" \".join(gdb_command))\n # gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True)\n\n\[email protected]_on_objfile\ndef get(address: int, gdb_only=False) -> str:\n \"\"\"\n Retrieve the name for the symbol located at `address`\n \"\"\"\n # Fast path\n if address < pwndbg.gdblib.memory.MMAP_MIN_ADDR or address >= ((1 << 64) - 1):\n return \"\"\n\n # Don't look up stack addresses\n if pwndbg.gdblib.stack.find(address):\n return \"\"\n\n # This sucks, but there's not a GDB API for this.\n result = gdb.execute(\"info symbol %#x\" % int(address), to_string=True, from_tty=False)\n\n if not gdb_only and result.startswith(\"No symbol\"):\n address = int(address)\n exe = pwndbg.gdblib.elf.exe()\n if exe:\n exe_map = pwndbg.vmmap.find(exe.address)\n if exe_map and address in exe_map:\n res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)\n return res or \"\"\n\n # Expected format looks like this:\n # main in section .text of /bin/bash\n # main + 3 in section .text of /bin/bash\n # system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6\n # No symbol matches system-1.\n a, b, c, _ = result.split(None, 3)\n\n if b == \"+\":\n return \"%s+%s\" % (a, c)\n if b == \"in\":\n return a\n\n return \"\"\n\n\[email protected]_on_objfile\ndef address(symbol: str) -> int:\n \"\"\"\n Get the address for `symbol`\n \"\"\"\n try:\n symbol_obj = gdb.lookup_symbol(symbol)[0]\n if symbol_obj:\n return int(symbol_obj.value().address)\n except gdb.error as e:\n # Symbol lookup only throws exceptions on errors, not if it failed to\n # lookup a symbol. We want to raise these errors so we can handle them\n # properly, but there are some we haven't figured out how to fix yet, so\n # we ignore those here\n skipped_exceptions = []\n\n # This is exception is being thrown by the Go typeinfo tests, we should\n # investigate why this is happening and see if we can explicitly check\n # for it with `gdb.selected_frame()`\n skipped_exceptions.append(\"No frame selected\")\n\n # If we try to look up a TLS variable when there is no TLS, this\n # exception occurs. Ideally we should come up with a way to check for\n # this case before calling `gdb.lookup_symbol`\n skipped_exceptions.append(\"Cannot find thread-local\")\n\n if all(x not in str(e) for x in skipped_exceptions):\n raise e\n\n try:\n # TODO: We should properly check if we have a connection to the IDA server first\n address = pwndbg.ida.LocByName(symbol)\n if address:\n return address\n except Exception:\n pass\n\n\[email protected]_on_objfile\ndef static_linkage_symbol_address(symbol):\n if isinstance(symbol, int):\n return symbol\n\n try:\n return int(symbol, 0)\n except Exception:\n pass\n\n try:\n symbol_obj = gdb.lookup_static_symbol(symbol)\n return int(symbol_obj.value().address) if symbol_obj else None\n except gdb.error:\n return None\n\n\[email protected]\[email protected]_on_start\ndef _add_main_exe_to_symbols():\n if not pwndbg.gdblib.remote.is_remote():\n return\n\n if pwndbg.gdblib.android.is_android():\n return\n\n exe = pwndbg.gdblib.elf.exe()\n\n if not exe:\n return\n\n addr = exe.address\n\n if not addr:\n return\n\n addr = int(addr)\n\n mmap = pwndbg.vmmap.find(addr)\n if not mmap:\n return\n\n path = mmap.objfile\n if path and (pwndbg.gdblib.arch.endian == pwndbg.gdblib.arch.native_endian):\n try:\n gdb.execute(\"add-symbol-file %s\" % (path,), from_tty=False, to_string=True)\n except gdb.error:\n pass\n\n\[email protected]_on_stop\[email protected]_on_start\ndef selected_frame_source_absolute_filename():\n \"\"\"\n Retrieve the symbol table\u2019s source absolute file name from the selected frame.\n\n In case of missing symbol table or frame information, None is returned.\n \"\"\"\n try:\n frame = gdb.selected_frame()\n except gdb.error:\n return None\n\n if not frame:\n return None\n\n sal = frame.find_sal()\n if not sal:\n return None\n\n symtab = sal.symtab\n if not symtab:\n return None\n\n return symtab.fullname()\n", "path": "pwndbg/gdblib/symbol.py"}]} | 3,412 | 153 |
gh_patches_debug_3378 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-2055 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WandbLogger warning not logging logs.
<!--
### Common bugs:
1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79).
2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq)
-->
## 🐛 Bug
WandbLogger giving warning: `WARNING Adding to old History rows isn't currently supported. Step 25 < 38` and not logging when I try to use the WandbLogger with k-fold cross-validation because there I am using the same instance of `wandb_logger` but using `trainer.fit` multiple times for different train_dl and valid_dl. Since the step gets repeated in each case, it's not logging anything after the 1st fold is complete even though the log keys are completely different. It was working perfectly with pytorch-lightning v-0.7.4. For now, I have to create separate experiments for each fold which are hard to analyze on wandb.
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
#### Code sample
[Colab Notebook](https://colab.research.google.com/drive/1fg-VVeSB71dWEH1qDvvdXTyfxeU--vsM?usp=sharing)
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
### Expected behavior
It should log even when the global_step is repeated in case if the logs keys are different.
<!-- A clear and concise description of what you expected to happen. -->
### Environment
- PyTorch Version (e.g., 1.0): 1.5.0
- OS (e.g., Linux): Linux
- How you installed PyTorch (`conda`, `pip`, source): pip
- Build command you used (if compiling from source):
- Python version: 3.7
- CUDA/cuDNN version: 10.1
- GPU models and configuration: Tesla P100-PCIE-16GB
- Any other relevant information:
### Additional context
<!-- Add any other context about the problem here. -->
WandbLogger warning not logging logs.
<!--
### Common bugs:
1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79).
2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq)
-->
## 🐛 Bug
WandbLogger giving warning: `WARNING Adding to old History rows isn't currently supported. Step 25 < 38` and not logging when I try to use the WandbLogger with k-fold cross-validation because there I am using the same instance of `wandb_logger` but using `trainer.fit` multiple times for different train_dl and valid_dl. Since the step gets repeated in each case, it's not logging anything after the 1st fold is complete even though the log keys are completely different. It was working perfectly with pytorch-lightning v-0.7.4. For now, I have to create separate experiments for each fold which are hard to analyze on wandb.
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
#### Code sample
[Colab Notebook](https://colab.research.google.com/drive/1fg-VVeSB71dWEH1qDvvdXTyfxeU--vsM?usp=sharing)
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
### Expected behavior
It should log even when the global_step is repeated in case if the logs keys are different.
<!-- A clear and concise description of what you expected to happen. -->
### Environment
- PyTorch Version (e.g., 1.0): 1.5.0
- OS (e.g., Linux): Linux
- How you installed PyTorch (`conda`, `pip`, source): pip
- Build command you used (if compiling from source):
- Python version: 3.7
- CUDA/cuDNN version: 10.1
- GPU models and configuration: Tesla P100-PCIE-16GB
- Any other relevant information:
### Additional context
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/loggers/wandb.py`
Content:
```
1 """
2 Weights and Biases
3 ------------------
4 """
5 import os
6 from argparse import Namespace
7 from typing import Optional, List, Dict, Union, Any
8
9 import torch.nn as nn
10
11 try:
12 import wandb
13 from wandb.wandb_run import Run
14 _WANDB_AVAILABLE = True
15 except ImportError: # pragma: no-cover
16 wandb = None
17 Run = None
18 _WANDB_AVAILABLE = False
19
20 from pytorch_lightning.loggers.base import LightningLoggerBase
21 from pytorch_lightning.utilities import rank_zero_only
22
23
24 class WandbLogger(LightningLoggerBase):
25 """
26 Log using `Weights and Biases <https://www.wandb.com/>`_. Install it with pip:
27
28 .. code-block:: bash
29
30 pip install wandb
31
32 Args:
33 name: Display name for the run.
34 save_dir: Path where data is saved.
35 offline: Run offline (data can be streamed later to wandb servers).
36 id: Sets the version, mainly used to resume a previous run.
37 anonymous: Enables or explicitly disables anonymous logging.
38 version: Sets the version, mainly used to resume a previous run.
39 project: The name of the project to which this run will belong.
40 tags: Tags associated with this run.
41 log_model: Save checkpoints in wandb dir to upload on W&B servers.
42 experiment: WandB experiment object
43 entity: The team posting this run (default: your username or your default team)
44 group: A unique string shared by all runs in a given group
45
46 Example:
47 >>> from pytorch_lightning.loggers import WandbLogger
48 >>> from pytorch_lightning import Trainer
49 >>> wandb_logger = WandbLogger()
50 >>> trainer = Trainer(logger=wandb_logger)
51
52 See Also:
53 - `Tutorial <https://app.wandb.ai/cayush/pytorchlightning/reports/
54 Use-Pytorch-Lightning-with-Weights-%26-Biases--Vmlldzo2NjQ1Mw>`__
55 on how to use W&B with Pytorch Lightning.
56
57 """
58
59 def __init__(self,
60 name: Optional[str] = None,
61 save_dir: Optional[str] = None,
62 offline: bool = False,
63 id: Optional[str] = None,
64 anonymous: bool = False,
65 version: Optional[str] = None,
66 project: Optional[str] = None,
67 tags: Optional[List[str]] = None,
68 log_model: bool = False,
69 experiment=None,
70 entity=None,
71 group: Optional[str] = None):
72 if not _WANDB_AVAILABLE:
73 raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover
74 ' install it with `pip install wandb`.')
75 super().__init__()
76 self._name = name
77 self._save_dir = save_dir
78 self._anonymous = 'allow' if anonymous else None
79 self._id = version or id
80 self._tags = tags
81 self._project = project
82 self._experiment = experiment
83 self._offline = offline
84 self._entity = entity
85 self._log_model = log_model
86 self._group = group
87
88 def __getstate__(self):
89 state = self.__dict__.copy()
90 # args needed to reload correct experiment
91 state['_id'] = self._experiment.id if self._experiment is not None else None
92
93 # cannot be pickled
94 state['_experiment'] = None
95 return state
96
97 @property
98 def experiment(self) -> Run:
99 r"""
100
101 Actual wandb object. To use wandb features in your
102 :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
103
104 Example::
105
106 self.logger.experiment.some_wandb_function()
107
108 """
109 if self._experiment is None:
110 if self._offline:
111 os.environ['WANDB_MODE'] = 'dryrun'
112 self._experiment = wandb.init(
113 name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,
114 reinit=True, id=self._id, resume='allow', tags=self._tags, entity=self._entity,
115 group=self._group)
116 # save checkpoints in wandb dir to upload on W&B servers
117 if self._log_model:
118 self.save_dir = self._experiment.dir
119 return self._experiment
120
121 def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100):
122 self.experiment.watch(model, log=log, log_freq=log_freq)
123
124 @rank_zero_only
125 def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
126 params = self._convert_params(params)
127 self.experiment.config.update(params, allow_val_change=True)
128
129 @rank_zero_only
130 def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
131 self.experiment.log(metrics, step=step)
132
133 @property
134 def name(self) -> str:
135 # don't create an experiment if we don't have one
136 name = self._experiment.project_name() if self._experiment else None
137 return name
138
139 @property
140 def version(self) -> str:
141 # don't create an experiment if we don't have one
142 return self._experiment.id if self._experiment else None
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py
--- a/pytorch_lightning/loggers/wandb.py
+++ b/pytorch_lightning/loggers/wandb.py
@@ -128,7 +128,7 @@
@rank_zero_only
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
- self.experiment.log(metrics, step=step)
+ self.experiment.log({'global_step': step, **metrics} if step is not None else metrics)
@property
def name(self) -> str:
| {"golden_diff": "diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py\n--- a/pytorch_lightning/loggers/wandb.py\n+++ b/pytorch_lightning/loggers/wandb.py\n@@ -128,7 +128,7 @@\n \n @rank_zero_only\n def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:\n- self.experiment.log(metrics, step=step)\n+ self.experiment.log({'global_step': step, **metrics} if step is not None else metrics)\n \n @property\n def name(self) -> str:\n", "issue": "WandbLogger warning not logging logs.\n<!-- \r\n### Common bugs:\r\n1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). \r\n2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) \r\n-->\r\n\r\n## \ud83d\udc1b Bug\r\nWandbLogger giving warning: `WARNING Adding to old History rows isn't currently supported. Step 25 < 38` and not logging when I try to use the WandbLogger with k-fold cross-validation because there I am using the same instance of `wandb_logger` but using `trainer.fit` multiple times for different train_dl and valid_dl. Since the step gets repeated in each case, it's not logging anything after the 1st fold is complete even though the log keys are completely different. It was working perfectly with pytorch-lightning v-0.7.4. For now, I have to create separate experiments for each fold which are hard to analyze on wandb.\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n#### Code sample\r\n[Colab Notebook](https://colab.research.google.com/drive/1fg-VVeSB71dWEH1qDvvdXTyfxeU--vsM?usp=sharing)\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue. \r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n### Expected behavior\r\nIt should log even when the global_step is repeated in case if the logs keys are different.\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n - PyTorch Version (e.g., 1.0): 1.5.0\r\n - OS (e.g., Linux): Linux\r\n - How you installed PyTorch (`conda`, `pip`, source): pip\r\n - Build command you used (if compiling from source):\r\n - Python version: 3.7\r\n - CUDA/cuDNN version: 10.1\r\n - GPU models and configuration: Tesla P100-PCIE-16GB\r\n - Any other relevant information:\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\nWandbLogger warning not logging logs.\n<!-- \r\n### Common bugs:\r\n1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). \r\n2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) \r\n-->\r\n\r\n## \ud83d\udc1b Bug\r\nWandbLogger giving warning: `WARNING Adding to old History rows isn't currently supported. Step 25 < 38` and not logging when I try to use the WandbLogger with k-fold cross-validation because there I am using the same instance of `wandb_logger` but using `trainer.fit` multiple times for different train_dl and valid_dl. Since the step gets repeated in each case, it's not logging anything after the 1st fold is complete even though the log keys are completely different. It was working perfectly with pytorch-lightning v-0.7.4. For now, I have to create separate experiments for each fold which are hard to analyze on wandb.\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n#### Code sample\r\n[Colab Notebook](https://colab.research.google.com/drive/1fg-VVeSB71dWEH1qDvvdXTyfxeU--vsM?usp=sharing)\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue. \r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n### Expected behavior\r\nIt should log even when the global_step is repeated in case if the logs keys are different.\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n - PyTorch Version (e.g., 1.0): 1.5.0\r\n - OS (e.g., Linux): Linux\r\n - How you installed PyTorch (`conda`, `pip`, source): pip\r\n - Build command you used (if compiling from source):\r\n - Python version: 3.7\r\n - CUDA/cuDNN version: 10.1\r\n - GPU models and configuration: Tesla P100-PCIE-16GB\r\n - Any other relevant information:\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "\"\"\"\nWeights and Biases\n------------------\n\"\"\"\nimport os\nfrom argparse import Namespace\nfrom typing import Optional, List, Dict, Union, Any\n\nimport torch.nn as nn\n\ntry:\n import wandb\n from wandb.wandb_run import Run\n _WANDB_AVAILABLE = True\nexcept ImportError: # pragma: no-cover\n wandb = None\n Run = None\n _WANDB_AVAILABLE = False\n\nfrom pytorch_lightning.loggers.base import LightningLoggerBase\nfrom pytorch_lightning.utilities import rank_zero_only\n\n\nclass WandbLogger(LightningLoggerBase):\n \"\"\"\n Log using `Weights and Biases <https://www.wandb.com/>`_. Install it with pip:\n\n .. code-block:: bash\n\n pip install wandb\n\n Args:\n name: Display name for the run.\n save_dir: Path where data is saved.\n offline: Run offline (data can be streamed later to wandb servers).\n id: Sets the version, mainly used to resume a previous run.\n anonymous: Enables or explicitly disables anonymous logging.\n version: Sets the version, mainly used to resume a previous run.\n project: The name of the project to which this run will belong.\n tags: Tags associated with this run.\n log_model: Save checkpoints in wandb dir to upload on W&B servers.\n experiment: WandB experiment object\n entity: The team posting this run (default: your username or your default team)\n group: A unique string shared by all runs in a given group\n\n Example:\n >>> from pytorch_lightning.loggers import WandbLogger\n >>> from pytorch_lightning import Trainer\n >>> wandb_logger = WandbLogger()\n >>> trainer = Trainer(logger=wandb_logger)\n\n See Also:\n - `Tutorial <https://app.wandb.ai/cayush/pytorchlightning/reports/\n Use-Pytorch-Lightning-with-Weights-%26-Biases--Vmlldzo2NjQ1Mw>`__\n on how to use W&B with Pytorch Lightning.\n\n \"\"\"\n\n def __init__(self,\n name: Optional[str] = None,\n save_dir: Optional[str] = None,\n offline: bool = False,\n id: Optional[str] = None,\n anonymous: bool = False,\n version: Optional[str] = None,\n project: Optional[str] = None,\n tags: Optional[List[str]] = None,\n log_model: bool = False,\n experiment=None,\n entity=None,\n group: Optional[str] = None):\n if not _WANDB_AVAILABLE:\n raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover\n ' install it with `pip install wandb`.')\n super().__init__()\n self._name = name\n self._save_dir = save_dir\n self._anonymous = 'allow' if anonymous else None\n self._id = version or id\n self._tags = tags\n self._project = project\n self._experiment = experiment\n self._offline = offline\n self._entity = entity\n self._log_model = log_model\n self._group = group\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # args needed to reload correct experiment\n state['_id'] = self._experiment.id if self._experiment is not None else None\n\n # cannot be pickled\n state['_experiment'] = None\n return state\n\n @property\n def experiment(self) -> Run:\n r\"\"\"\n\n Actual wandb object. To use wandb features in your\n :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.\n\n Example::\n\n self.logger.experiment.some_wandb_function()\n\n \"\"\"\n if self._experiment is None:\n if self._offline:\n os.environ['WANDB_MODE'] = 'dryrun'\n self._experiment = wandb.init(\n name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,\n reinit=True, id=self._id, resume='allow', tags=self._tags, entity=self._entity,\n group=self._group)\n # save checkpoints in wandb dir to upload on W&B servers\n if self._log_model:\n self.save_dir = self._experiment.dir\n return self._experiment\n\n def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100):\n self.experiment.watch(model, log=log, log_freq=log_freq)\n\n @rank_zero_only\n def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:\n params = self._convert_params(params)\n self.experiment.config.update(params, allow_val_change=True)\n\n @rank_zero_only\n def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:\n self.experiment.log(metrics, step=step)\n\n @property\n def name(self) -> str:\n # don't create an experiment if we don't have one\n name = self._experiment.project_name() if self._experiment else None\n return name\n\n @property\n def version(self) -> str:\n # don't create an experiment if we don't have one\n return self._experiment.id if self._experiment else None\n", "path": "pytorch_lightning/loggers/wandb.py"}], "after_files": [{"content": "\"\"\"\nWeights and Biases\n------------------\n\"\"\"\nimport os\nfrom argparse import Namespace\nfrom typing import Optional, List, Dict, Union, Any\n\nimport torch.nn as nn\n\ntry:\n import wandb\n from wandb.wandb_run import Run\n _WANDB_AVAILABLE = True\nexcept ImportError: # pragma: no-cover\n wandb = None\n Run = None\n _WANDB_AVAILABLE = False\n\nfrom pytorch_lightning.loggers.base import LightningLoggerBase\nfrom pytorch_lightning.utilities import rank_zero_only\n\n\nclass WandbLogger(LightningLoggerBase):\n \"\"\"\n Log using `Weights and Biases <https://www.wandb.com/>`_. Install it with pip:\n\n .. code-block:: bash\n\n pip install wandb\n\n Args:\n name: Display name for the run.\n save_dir: Path where data is saved.\n offline: Run offline (data can be streamed later to wandb servers).\n id: Sets the version, mainly used to resume a previous run.\n anonymous: Enables or explicitly disables anonymous logging.\n version: Sets the version, mainly used to resume a previous run.\n project: The name of the project to which this run will belong.\n tags: Tags associated with this run.\n log_model: Save checkpoints in wandb dir to upload on W&B servers.\n experiment: WandB experiment object\n entity: The team posting this run (default: your username or your default team)\n group: A unique string shared by all runs in a given group\n\n Example:\n >>> from pytorch_lightning.loggers import WandbLogger\n >>> from pytorch_lightning import Trainer\n >>> wandb_logger = WandbLogger()\n >>> trainer = Trainer(logger=wandb_logger)\n\n See Also:\n - `Tutorial <https://app.wandb.ai/cayush/pytorchlightning/reports/\n Use-Pytorch-Lightning-with-Weights-%26-Biases--Vmlldzo2NjQ1Mw>`__\n on how to use W&B with Pytorch Lightning.\n\n \"\"\"\n\n def __init__(self,\n name: Optional[str] = None,\n save_dir: Optional[str] = None,\n offline: bool = False,\n id: Optional[str] = None,\n anonymous: bool = False,\n version: Optional[str] = None,\n project: Optional[str] = None,\n tags: Optional[List[str]] = None,\n log_model: bool = False,\n experiment=None,\n entity=None,\n group: Optional[str] = None):\n if not _WANDB_AVAILABLE:\n raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover\n ' install it with `pip install wandb`.')\n super().__init__()\n self._name = name\n self._save_dir = save_dir\n self._anonymous = 'allow' if anonymous else None\n self._id = version or id\n self._tags = tags\n self._project = project\n self._experiment = experiment\n self._offline = offline\n self._entity = entity\n self._log_model = log_model\n self._group = group\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # args needed to reload correct experiment\n state['_id'] = self._experiment.id if self._experiment is not None else None\n\n # cannot be pickled\n state['_experiment'] = None\n return state\n\n @property\n def experiment(self) -> Run:\n r\"\"\"\n\n Actual wandb object. To use wandb features in your\n :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.\n\n Example::\n\n self.logger.experiment.some_wandb_function()\n\n \"\"\"\n if self._experiment is None:\n if self._offline:\n os.environ['WANDB_MODE'] = 'dryrun'\n self._experiment = wandb.init(\n name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,\n reinit=True, id=self._id, resume='allow', tags=self._tags, entity=self._entity,\n group=self._group)\n # save checkpoints in wandb dir to upload on W&B servers\n if self._log_model:\n self.save_dir = self._experiment.dir\n return self._experiment\n\n def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100):\n self.experiment.watch(model, log=log, log_freq=log_freq)\n\n @rank_zero_only\n def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:\n params = self._convert_params(params)\n self.experiment.config.update(params, allow_val_change=True)\n\n @rank_zero_only\n def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:\n self.experiment.log({'global_step': step, **metrics} if step is not None else metrics)\n\n @property\n def name(self) -> str:\n # don't create an experiment if we don't have one\n name = self._experiment.project_name() if self._experiment else None\n return name\n\n @property\n def version(self) -> str:\n # don't create an experiment if we don't have one\n return self._experiment.id if self._experiment else None\n", "path": "pytorch_lightning/loggers/wandb.py"}]} | 2,779 | 145 |
gh_patches_debug_19841 | rasdani/github-patches | git_diff | svthalia__concrexit-3648 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rate-limit and stricter validation on member registration view
See responsible disclosure email. https://staging.thalia.nu/association/register/member/ triggers sending an email. We should ratelimit it quite strictly.
This email also contains information (name, more?) provided by the user. While that information is properly HTML-escaped, some mail clients do render urls in it as clickable. And for example inserting newlines may be possible, so that someone could at least make the emails ugly.
So it wouldn't hurt to perform some extra validation of names.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/registrations/views.py`
Content:
```
1 """Views provided by the registrations package."""
2 from django.conf import settings
3 from django.contrib import messages
4 from django.contrib.admin.views.decorators import staff_member_required
5 from django.contrib.auth.decorators import login_required, permission_required
6 from django.db.models import Q
7 from django.http import Http404
8 from django.shortcuts import get_object_or_404, redirect
9 from django.template.defaultfilters import floatformat
10 from django.urls import reverse
11 from django.utils import timezone
12 from django.utils.decorators import method_decorator
13 from django.views import View
14 from django.views.generic import CreateView, FormView
15 from django.views.generic.base import TemplateResponseMixin, TemplateView
16
17 from members.decorators import membership_required
18 from members.models import Membership
19
20 from . import emails, forms, services
21 from .models import Entry, Reference, Registration, Renewal
22
23
24 class BecomeAMemberView(TemplateView):
25 """View that render a HTML template with context data."""
26
27 template_name = "registrations/become_a_member.html"
28
29 def get_context_data(self, **kwargs):
30 context = super().get_context_data(**kwargs)
31 context["year_fees"] = floatformat(
32 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2
33 )
34 context["study_fees"] = floatformat(
35 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2
36 )
37 return context
38
39
40 @method_decorator(staff_member_required, name="dispatch")
41 @method_decorator(
42 permission_required("registrations.review_entries"),
43 name="dispatch",
44 )
45 class EntryAdminView(View):
46 """View that handles the processing of entries."""
47
48 def post(self, request, *args, **kwargs):
49 action = request.POST.get("action")
50 entry = get_object_or_404(Entry, pk=kwargs["pk"])
51
52 registration = getattr(entry, "registration", None)
53 renewal = getattr(entry, "renewal", None)
54
55 if action == "accept":
56 if registration is not None:
57 if not registration.check_user_is_unique():
58 messages.error(
59 request,
60 f"Could not accept {registration}. Username or email is not unique.",
61 )
62 else:
63 services.accept_registration(registration, actor=request.user)
64 messages.success(request, f"Successfully accepted {registration}.")
65 elif renewal is not None: # pragma: no cover
66 services.accept_renewal(renewal, actor=request.user)
67 messages.success(request, f"Successfully accepted {renewal}.")
68 elif action == "reject":
69 if registration is not None:
70 services.reject_registration(registration, actor=request.user)
71 messages.success(request, f"Successfully rejected {registration}.")
72 elif renewal is not None: # pragma: no cover
73 services.reject_renewal(renewal, actor=request.user)
74 messages.success(request, f"Successfully rejected {renewal}.")
75 elif action == "resend":
76 if registration is not None:
77 emails.send_registration_email_confirmation(entry.registration)
78 messages.success(
79 request, f"Resent registration email of {registration}."
80 )
81 else:
82 messages.error(request, "Cannot resend renewal.")
83 elif action == "revert": # pragma: no cover
84 if registration is not None:
85 services.revert_registration(registration, actor=request.user)
86 messages.success(
87 request, f"Successfully reverted registration {registration}."
88 )
89 elif renewal is not None:
90 services.revert_renewal(renewal, actor=request.user)
91 messages.success(request, f"Successfully reverted renewal {renewal}.")
92
93 redirect_model = "registration" if registration is not None else "renewal"
94 return redirect(f"admin:registrations_{redirect_model}_change", kwargs["pk"])
95
96
97 class ConfirmEmailView(View, TemplateResponseMixin):
98 """View that confirms the email address of the provided registration."""
99
100 template_name = "registrations/confirm_email.html"
101
102 def get(self, request, *args, **kwargs):
103 registration = get_object_or_404(Registration, pk=kwargs["pk"])
104
105 if registration.status == Registration.STATUS_CONFIRM:
106 services.confirm_registration(registration)
107
108 if registration.status != Registration.STATUS_REVIEW:
109 raise Http404
110
111 return self.render_to_response({})
112
113
114 class BaseRegistrationFormView(FormView):
115 """View that renders a membership registration form."""
116
117 form_class = forms.MemberRegistrationForm
118 template_name = "registrations/register_member.html"
119
120 def get_context_data(self, **kwargs):
121 context = super().get_context_data(**kwargs)
122 context["google_api_key"] = settings.GOOGLE_PLACES_API_KEY
123 context["year_fees"] = floatformat(
124 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2
125 )
126 context["study_fees"] = floatformat(
127 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2
128 )
129 return context
130
131 def get(self, request, *args, **kwargs):
132 if request.user.is_authenticated:
133 return redirect("registrations:renew")
134 return super().get(request, args, kwargs)
135
136 def form_valid(self, form):
137 form.save()
138 emails.send_registration_email_confirmation(form.instance)
139 return redirect("registrations:register-success")
140
141
142 class MemberRegistrationFormView(BaseRegistrationFormView):
143 """View that renders the `member` membership registration form."""
144
145 form_class = forms.MemberRegistrationForm
146 template_name = "registrations/register_member.html"
147
148 def get_context_data(self, **kwargs):
149 context = super().get_context_data(**kwargs)
150 context["tpay_enabled"] = (
151 settings.THALIA_PAY_ENABLED_PAYMENT_METHOD
152 and settings.THALIA_PAY_FOR_NEW_MEMBERS
153 )
154 return context
155
156 def post(self, request, *args, **kwargs):
157 request.POST = request.POST.dict()
158 request.POST["language"] = request.LANGUAGE_CODE
159 request.POST["membership_type"] = Membership.MEMBER
160 return super().post(request, *args, **kwargs)
161
162
163 class BenefactorRegistrationFormView(BaseRegistrationFormView):
164 """View that renders the `benefactor` membership registration form."""
165
166 form_class = forms.BenefactorRegistrationForm
167 template_name = "registrations/register_benefactor.html"
168
169 def get_context_data(self, **kwargs):
170 context = super().get_context_data(**kwargs)
171 context["tpay_enabled"] = (
172 settings.THALIA_PAY_ENABLED_PAYMENT_METHOD
173 and settings.THALIA_PAY_FOR_NEW_MEMBERS
174 )
175 return context
176
177 def post(self, request, *args, **kwargs):
178 request.POST = request.POST.dict()
179 request.POST["language"] = request.LANGUAGE_CODE
180 request.POST["membership_type"] = Membership.BENEFACTOR
181 request.POST["length"] = Entry.MEMBERSHIP_YEAR
182 request.POST["remarks"] = (
183 "Registered as iCIS employee" if "icis_employee" in request.POST else ""
184 )
185 request.POST["no_references"] = "icis_employee" in request.POST
186 return super().post(request, *args, **kwargs)
187
188
189 @method_decorator(login_required, name="dispatch")
190 class RenewalFormView(FormView):
191 """View that renders the membership renewal form."""
192
193 form_class = forms.RenewalForm
194 template_name = "registrations/renewal.html"
195
196 def get_context_data(self, **kwargs):
197 context = super().get_context_data(**kwargs)
198 context["year_fees"] = floatformat(
199 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2
200 )
201 context["study_fees"] = floatformat(
202 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2
203 )
204 context["latest_membership"] = self.request.member.latest_membership
205 context["latest_renewal"] = Renewal.objects.filter(
206 Q(member=self.request.member)
207 & (
208 Q(status=Registration.STATUS_ACCEPTED)
209 | Q(status=Registration.STATUS_REVIEW)
210 )
211 ).last()
212 context["was_member"] = Membership.objects.filter(
213 user=self.request.member, type=Membership.MEMBER
214 ).exists()
215
216 context["benefactor_type"] = Membership.BENEFACTOR
217 return context
218
219 def get_form(self, form_class=None):
220 form = super().get_form(form_class)
221 member = self.request.member
222 if member is not None and member.latest_membership is not None:
223 latest_membership = member.latest_membership
224 # If latest membership has not ended or does not ends
225 # within 1 month: do not show 'year' length and disable benefactor option
226 hide_year_choice = not (
227 latest_membership is not None
228 and latest_membership.until is not None
229 and (latest_membership.until - timezone.now().date()).days <= 31
230 )
231
232 if hide_year_choice:
233 form.fields["length"].choices = [
234 c
235 for c in form.fields["length"].choices
236 if c[0] != Entry.MEMBERSHIP_YEAR
237 ]
238 form.fields["membership_type"].choices = [
239 c
240 for c in form.fields["membership_type"].choices
241 if c[0] != Membership.BENEFACTOR
242 ]
243
244 return form
245
246 def post(self, request, *args, **kwargs):
247 request.POST = request.POST.dict()
248 if request.member.latest_membership.type == Membership.BENEFACTOR:
249 request.POST["membership_type"] = Membership.BENEFACTOR
250 request.POST["length"] = Entry.MEMBERSHIP_YEAR
251 request.POST["member"] = request.member.pk
252 request.POST["remarks"] = ""
253 request.POST["no_references"] = True
254
255 if request.POST["membership_type"] == Membership.BENEFACTOR:
256 request.POST["no_references"] = False
257 if Membership.objects.filter(
258 user=request.member, type=Membership.MEMBER
259 ).exists():
260 request.POST["remarks"] = "Was a Thalia member in the past."
261 request.POST["no_references"] = True
262 if "icis_employee" in request.POST:
263 request.POST["remarks"] = "Registered as iCIS employee."
264 request.POST["no_references"] = True
265
266 return super().post(request, *args, **kwargs)
267
268 def form_valid(self, form):
269 renewal = form.save()
270 if not renewal.no_references:
271 emails.send_references_information_message(renewal)
272 emails.send_new_renewal_board_message(renewal)
273 return redirect("registrations:renew-success")
274
275
276 @method_decorator(login_required, name="dispatch")
277 @method_decorator(membership_required, name="dispatch")
278 class ReferenceCreateView(CreateView):
279 """View that renders a reference creation form."""
280
281 model = Reference
282 form_class = forms.ReferenceForm
283 template_name = "registrations/reference.html"
284 entry = None
285 success = False
286
287 def get_success_url(self):
288 return reverse("registrations:reference-success", args=(self.entry.pk,))
289
290 def get_context_data(self, **kwargs):
291 context = super().get_context_data(**kwargs)
292
293 context["success"] = self.success
294 try:
295 context["name"] = self.entry.registration.get_full_name()
296 except Registration.DoesNotExist:
297 context["name"] = self.entry.renewal.member.get_full_name()
298
299 return context
300
301 def dispatch(self, request, *args, **kwargs):
302 self.entry = get_object_or_404(Entry, pk=kwargs.get("pk"))
303
304 if (
305 self.entry.no_references
306 or self.entry.membership_type != Membership.BENEFACTOR
307 ):
308 raise Http404
309
310 return super().dispatch(request, *args, **kwargs)
311
312 def post(self, request, *args, **kwargs):
313 request.POST = request.POST.dict()
314 request.POST["member"] = request.member.pk
315 request.POST["entry"] = kwargs["pk"]
316 return super().post(request, *args, **kwargs)
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/registrations/views.py b/website/registrations/views.py
--- a/website/registrations/views.py
+++ b/website/registrations/views.py
@@ -14,6 +14,8 @@
from django.views.generic import CreateView, FormView
from django.views.generic.base import TemplateResponseMixin, TemplateView
+from django_ratelimit.decorators import ratelimit
+
from members.decorators import membership_required
from members.models import Membership
@@ -138,6 +140,10 @@
emails.send_registration_email_confirmation(form.instance)
return redirect("registrations:register-success")
+ @method_decorator(ratelimit(key="ip", rate="10/d"))
+ def post(self, request, *args, **kwargs):
+ return super().post(request, *args, **kwargs)
+
class MemberRegistrationFormView(BaseRegistrationFormView):
"""View that renders the `member` membership registration form."""
| {"golden_diff": "diff --git a/website/registrations/views.py b/website/registrations/views.py\n--- a/website/registrations/views.py\n+++ b/website/registrations/views.py\n@@ -14,6 +14,8 @@\n from django.views.generic import CreateView, FormView\n from django.views.generic.base import TemplateResponseMixin, TemplateView\n \n+from django_ratelimit.decorators import ratelimit\n+\n from members.decorators import membership_required\n from members.models import Membership\n \n@@ -138,6 +140,10 @@\n emails.send_registration_email_confirmation(form.instance)\n return redirect(\"registrations:register-success\")\n \n+ @method_decorator(ratelimit(key=\"ip\", rate=\"10/d\"))\n+ def post(self, request, *args, **kwargs):\n+ return super().post(request, *args, **kwargs)\n+\n \n class MemberRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `member` membership registration form.\"\"\"\n", "issue": "Rate-limit and stricter validation on member registration view\nSee responsible disclosure email. https://staging.thalia.nu/association/register/member/ triggers sending an email. We should ratelimit it quite strictly. \r\n\r\nThis email also contains information (name, more?) provided by the user. While that information is properly HTML-escaped, some mail clients do render urls in it as clickable. And for example inserting newlines may be possible, so that someone could at least make the emails ugly. \r\nSo it wouldn't hurt to perform some extra validation of names. \n", "before_files": [{"content": "\"\"\"Views provided by the registrations package.\"\"\"\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.generic import CreateView, FormView\nfrom django.views.generic.base import TemplateResponseMixin, TemplateView\n\nfrom members.decorators import membership_required\nfrom members.models import Membership\n\nfrom . import emails, forms, services\nfrom .models import Entry, Reference, Registration, Renewal\n\n\nclass BecomeAMemberView(TemplateView):\n \"\"\"View that render a HTML template with context data.\"\"\"\n\n template_name = \"registrations/become_a_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"registrations.review_entries\"),\n name=\"dispatch\",\n)\nclass EntryAdminView(View):\n \"\"\"View that handles the processing of entries.\"\"\"\n\n def post(self, request, *args, **kwargs):\n action = request.POST.get(\"action\")\n entry = get_object_or_404(Entry, pk=kwargs[\"pk\"])\n\n registration = getattr(entry, \"registration\", None)\n renewal = getattr(entry, \"renewal\", None)\n\n if action == \"accept\":\n if registration is not None:\n if not registration.check_user_is_unique():\n messages.error(\n request,\n f\"Could not accept {registration}. Username or email is not unique.\",\n )\n else:\n services.accept_registration(registration, actor=request.user)\n messages.success(request, f\"Successfully accepted {registration}.\")\n elif renewal is not None: # pragma: no cover\n services.accept_renewal(renewal, actor=request.user)\n messages.success(request, f\"Successfully accepted {renewal}.\")\n elif action == \"reject\":\n if registration is not None:\n services.reject_registration(registration, actor=request.user)\n messages.success(request, f\"Successfully rejected {registration}.\")\n elif renewal is not None: # pragma: no cover\n services.reject_renewal(renewal, actor=request.user)\n messages.success(request, f\"Successfully rejected {renewal}.\")\n elif action == \"resend\":\n if registration is not None:\n emails.send_registration_email_confirmation(entry.registration)\n messages.success(\n request, f\"Resent registration email of {registration}.\"\n )\n else:\n messages.error(request, \"Cannot resend renewal.\")\n elif action == \"revert\": # pragma: no cover\n if registration is not None:\n services.revert_registration(registration, actor=request.user)\n messages.success(\n request, f\"Successfully reverted registration {registration}.\"\n )\n elif renewal is not None:\n services.revert_renewal(renewal, actor=request.user)\n messages.success(request, f\"Successfully reverted renewal {renewal}.\")\n\n redirect_model = \"registration\" if registration is not None else \"renewal\"\n return redirect(f\"admin:registrations_{redirect_model}_change\", kwargs[\"pk\"])\n\n\nclass ConfirmEmailView(View, TemplateResponseMixin):\n \"\"\"View that confirms the email address of the provided registration.\"\"\"\n\n template_name = \"registrations/confirm_email.html\"\n\n def get(self, request, *args, **kwargs):\n registration = get_object_or_404(Registration, pk=kwargs[\"pk\"])\n\n if registration.status == Registration.STATUS_CONFIRM:\n services.confirm_registration(registration)\n\n if registration.status != Registration.STATUS_REVIEW:\n raise Http404\n\n return self.render_to_response({})\n\n\nclass BaseRegistrationFormView(FormView):\n \"\"\"View that renders a membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"google_api_key\"] = settings.GOOGLE_PLACES_API_KEY\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(\"registrations:renew\")\n return super().get(request, args, kwargs)\n\n def form_valid(self, form):\n form.save()\n emails.send_registration_email_confirmation(form.instance)\n return redirect(\"registrations:register-success\")\n\n\nclass MemberRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `member` membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.MEMBER\n return super().post(request, *args, **kwargs)\n\n\nclass BenefactorRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `benefactor` membership registration form.\"\"\"\n\n form_class = forms.BenefactorRegistrationForm\n template_name = \"registrations/register_benefactor.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"remarks\"] = (\n \"Registered as iCIS employee\" if \"icis_employee\" in request.POST else \"\"\n )\n request.POST[\"no_references\"] = \"icis_employee\" in request.POST\n return super().post(request, *args, **kwargs)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass RenewalFormView(FormView):\n \"\"\"View that renders the membership renewal form.\"\"\"\n\n form_class = forms.RenewalForm\n template_name = \"registrations/renewal.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n context[\"latest_membership\"] = self.request.member.latest_membership\n context[\"latest_renewal\"] = Renewal.objects.filter(\n Q(member=self.request.member)\n & (\n Q(status=Registration.STATUS_ACCEPTED)\n | Q(status=Registration.STATUS_REVIEW)\n )\n ).last()\n context[\"was_member\"] = Membership.objects.filter(\n user=self.request.member, type=Membership.MEMBER\n ).exists()\n\n context[\"benefactor_type\"] = Membership.BENEFACTOR\n return context\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n member = self.request.member\n if member is not None and member.latest_membership is not None:\n latest_membership = member.latest_membership\n # If latest membership has not ended or does not ends\n # within 1 month: do not show 'year' length and disable benefactor option\n hide_year_choice = not (\n latest_membership is not None\n and latest_membership.until is not None\n and (latest_membership.until - timezone.now().date()).days <= 31\n )\n\n if hide_year_choice:\n form.fields[\"length\"].choices = [\n c\n for c in form.fields[\"length\"].choices\n if c[0] != Entry.MEMBERSHIP_YEAR\n ]\n form.fields[\"membership_type\"].choices = [\n c\n for c in form.fields[\"membership_type\"].choices\n if c[0] != Membership.BENEFACTOR\n ]\n\n return form\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n if request.member.latest_membership.type == Membership.BENEFACTOR:\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"member\"] = request.member.pk\n request.POST[\"remarks\"] = \"\"\n request.POST[\"no_references\"] = True\n\n if request.POST[\"membership_type\"] == Membership.BENEFACTOR:\n request.POST[\"no_references\"] = False\n if Membership.objects.filter(\n user=request.member, type=Membership.MEMBER\n ).exists():\n request.POST[\"remarks\"] = \"Was a Thalia member in the past.\"\n request.POST[\"no_references\"] = True\n if \"icis_employee\" in request.POST:\n request.POST[\"remarks\"] = \"Registered as iCIS employee.\"\n request.POST[\"no_references\"] = True\n\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form):\n renewal = form.save()\n if not renewal.no_references:\n emails.send_references_information_message(renewal)\n emails.send_new_renewal_board_message(renewal)\n return redirect(\"registrations:renew-success\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(membership_required, name=\"dispatch\")\nclass ReferenceCreateView(CreateView):\n \"\"\"View that renders a reference creation form.\"\"\"\n\n model = Reference\n form_class = forms.ReferenceForm\n template_name = \"registrations/reference.html\"\n entry = None\n success = False\n\n def get_success_url(self):\n return reverse(\"registrations:reference-success\", args=(self.entry.pk,))\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"success\"] = self.success\n try:\n context[\"name\"] = self.entry.registration.get_full_name()\n except Registration.DoesNotExist:\n context[\"name\"] = self.entry.renewal.member.get_full_name()\n\n return context\n\n def dispatch(self, request, *args, **kwargs):\n self.entry = get_object_or_404(Entry, pk=kwargs.get(\"pk\"))\n\n if (\n self.entry.no_references\n or self.entry.membership_type != Membership.BENEFACTOR\n ):\n raise Http404\n\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"member\"] = request.member.pk\n request.POST[\"entry\"] = kwargs[\"pk\"]\n return super().post(request, *args, **kwargs)\n", "path": "website/registrations/views.py"}], "after_files": [{"content": "\"\"\"Views provided by the registrations package.\"\"\"\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.generic import CreateView, FormView\nfrom django.views.generic.base import TemplateResponseMixin, TemplateView\n\nfrom django_ratelimit.decorators import ratelimit\n\nfrom members.decorators import membership_required\nfrom members.models import Membership\n\nfrom . import emails, forms, services\nfrom .models import Entry, Reference, Registration, Renewal\n\n\nclass BecomeAMemberView(TemplateView):\n \"\"\"View that render a HTML template with context data.\"\"\"\n\n template_name = \"registrations/become_a_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"registrations.review_entries\"),\n name=\"dispatch\",\n)\nclass EntryAdminView(View):\n \"\"\"View that handles the processing of entries.\"\"\"\n\n def post(self, request, *args, **kwargs):\n action = request.POST.get(\"action\")\n entry = get_object_or_404(Entry, pk=kwargs[\"pk\"])\n\n registration = getattr(entry, \"registration\", None)\n renewal = getattr(entry, \"renewal\", None)\n\n if action == \"accept\":\n if registration is not None:\n if not registration.check_user_is_unique():\n messages.error(\n request,\n f\"Could not accept {registration}. Username or email is not unique.\",\n )\n else:\n services.accept_registration(registration, actor=request.user)\n messages.success(request, f\"Successfully accepted {registration}.\")\n elif renewal is not None: # pragma: no cover\n services.accept_renewal(renewal, actor=request.user)\n messages.success(request, f\"Successfully accepted {renewal}.\")\n elif action == \"reject\":\n if registration is not None:\n services.reject_registration(registration, actor=request.user)\n messages.success(request, f\"Successfully rejected {registration}.\")\n elif renewal is not None: # pragma: no cover\n services.reject_renewal(renewal, actor=request.user)\n messages.success(request, f\"Successfully rejected {renewal}.\")\n elif action == \"resend\":\n if registration is not None:\n emails.send_registration_email_confirmation(entry.registration)\n messages.success(\n request, f\"Resent registration email of {registration}.\"\n )\n else:\n messages.error(request, \"Cannot resend renewal.\")\n elif action == \"revert\": # pragma: no cover\n if registration is not None:\n services.revert_registration(registration, actor=request.user)\n messages.success(\n request, f\"Successfully reverted registration {registration}.\"\n )\n elif renewal is not None:\n services.revert_renewal(renewal, actor=request.user)\n messages.success(request, f\"Successfully reverted renewal {renewal}.\")\n\n redirect_model = \"registration\" if registration is not None else \"renewal\"\n return redirect(f\"admin:registrations_{redirect_model}_change\", kwargs[\"pk\"])\n\n\nclass ConfirmEmailView(View, TemplateResponseMixin):\n \"\"\"View that confirms the email address of the provided registration.\"\"\"\n\n template_name = \"registrations/confirm_email.html\"\n\n def get(self, request, *args, **kwargs):\n registration = get_object_or_404(Registration, pk=kwargs[\"pk\"])\n\n if registration.status == Registration.STATUS_CONFIRM:\n services.confirm_registration(registration)\n\n if registration.status != Registration.STATUS_REVIEW:\n raise Http404\n\n return self.render_to_response({})\n\n\nclass BaseRegistrationFormView(FormView):\n \"\"\"View that renders a membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"google_api_key\"] = settings.GOOGLE_PLACES_API_KEY\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(\"registrations:renew\")\n return super().get(request, args, kwargs)\n\n def form_valid(self, form):\n form.save()\n emails.send_registration_email_confirmation(form.instance)\n return redirect(\"registrations:register-success\")\n\n @method_decorator(ratelimit(key=\"ip\", rate=\"10/d\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass MemberRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `member` membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.MEMBER\n return super().post(request, *args, **kwargs)\n\n\nclass BenefactorRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `benefactor` membership registration form.\"\"\"\n\n form_class = forms.BenefactorRegistrationForm\n template_name = \"registrations/register_benefactor.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"remarks\"] = (\n \"Registered as iCIS employee\" if \"icis_employee\" in request.POST else \"\"\n )\n request.POST[\"no_references\"] = \"icis_employee\" in request.POST\n return super().post(request, *args, **kwargs)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass RenewalFormView(FormView):\n \"\"\"View that renders the membership renewal form.\"\"\"\n\n form_class = forms.RenewalForm\n template_name = \"registrations/renewal.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n context[\"latest_membership\"] = self.request.member.latest_membership\n context[\"latest_renewal\"] = Renewal.objects.filter(\n Q(member=self.request.member)\n & (\n Q(status=Registration.STATUS_ACCEPTED)\n | Q(status=Registration.STATUS_REVIEW)\n )\n ).last()\n context[\"was_member\"] = Membership.objects.filter(\n user=self.request.member, type=Membership.MEMBER\n ).exists()\n\n context[\"benefactor_type\"] = Membership.BENEFACTOR\n return context\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n member = self.request.member\n if member is not None and member.latest_membership is not None:\n latest_membership = member.latest_membership\n # If latest membership has not ended or does not ends\n # within 1 month: do not show 'year' length and disable benefactor option\n hide_year_choice = not (\n latest_membership is not None\n and latest_membership.until is not None\n and (latest_membership.until - timezone.now().date()).days <= 31\n )\n\n if hide_year_choice:\n form.fields[\"length\"].choices = [\n c\n for c in form.fields[\"length\"].choices\n if c[0] != Entry.MEMBERSHIP_YEAR\n ]\n form.fields[\"membership_type\"].choices = [\n c\n for c in form.fields[\"membership_type\"].choices\n if c[0] != Membership.BENEFACTOR\n ]\n\n return form\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n if request.member.latest_membership.type == Membership.BENEFACTOR:\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"member\"] = request.member.pk\n request.POST[\"remarks\"] = \"\"\n request.POST[\"no_references\"] = True\n\n if request.POST[\"membership_type\"] == Membership.BENEFACTOR:\n request.POST[\"no_references\"] = False\n if Membership.objects.filter(\n user=request.member, type=Membership.MEMBER\n ).exists():\n request.POST[\"remarks\"] = \"Was a Thalia member in the past.\"\n request.POST[\"no_references\"] = True\n if \"icis_employee\" in request.POST:\n request.POST[\"remarks\"] = \"Registered as iCIS employee.\"\n request.POST[\"no_references\"] = True\n\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form):\n renewal = form.save()\n if not renewal.no_references:\n emails.send_references_information_message(renewal)\n emails.send_new_renewal_board_message(renewal)\n return redirect(\"registrations:renew-success\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(membership_required, name=\"dispatch\")\nclass ReferenceCreateView(CreateView):\n \"\"\"View that renders a reference creation form.\"\"\"\n\n model = Reference\n form_class = forms.ReferenceForm\n template_name = \"registrations/reference.html\"\n entry = None\n success = False\n\n def get_success_url(self):\n return reverse(\"registrations:reference-success\", args=(self.entry.pk,))\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"success\"] = self.success\n try:\n context[\"name\"] = self.entry.registration.get_full_name()\n except Registration.DoesNotExist:\n context[\"name\"] = self.entry.renewal.member.get_full_name()\n\n return context\n\n def dispatch(self, request, *args, **kwargs):\n self.entry = get_object_or_404(Entry, pk=kwargs.get(\"pk\"))\n\n if (\n self.entry.no_references\n or self.entry.membership_type != Membership.BENEFACTOR\n ):\n raise Http404\n\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"member\"] = request.member.pk\n request.POST[\"entry\"] = kwargs[\"pk\"]\n return super().post(request, *args, **kwargs)\n", "path": "website/registrations/views.py"}]} | 3,800 | 208 |
gh_patches_debug_6554 | rasdani/github-patches | git_diff | open-mmlab__mmdetection3d-709 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Possible bug with 'rt_mat (np.ndarray)' in coord_3d_mode
https://github.com/open-mmlab/mmdetection3d/blob/a1e04538295b02d42fc59bb7ed58c59c98220174/mmdet3d/core/bbox/structures/coord_3d_mode.py#L246
Should it be `if rt_mat.shape[1] == 4:` to take into account both (np.ndarray | torch.Tensor) types of `rt_mat` as input
or
convert `rt_mat` from np.ndarray to torch.Tensor before using `if rt_mat.size(1) == 4` ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmdet3d/core/bbox/structures/coord_3d_mode.py`
Content:
```
1 import numpy as np
2 import torch
3 from enum import IntEnum, unique
4
5 from mmdet3d.core.points import (BasePoints, CameraPoints, DepthPoints,
6 LiDARPoints)
7 from .base_box3d import BaseInstance3DBoxes
8 from .cam_box3d import CameraInstance3DBoxes
9 from .depth_box3d import DepthInstance3DBoxes
10 from .lidar_box3d import LiDARInstance3DBoxes
11
12
13 @unique
14 class Coord3DMode(IntEnum):
15 r"""Enum of different ways to represent a box
16 and point cloud.
17
18 Coordinates in LiDAR:
19
20 .. code-block:: none
21
22 up z
23 ^ x front
24 | /
25 | /
26 left y <------ 0
27
28 The relative coordinate of bottom center in a LiDAR box is (0.5, 0.5, 0),
29 and the yaw is around the z axis, thus the rotation axis=2.
30
31 Coordinates in camera:
32
33 .. code-block:: none
34
35 z front
36 /
37 /
38 0 ------> x right
39 |
40 |
41 v
42 down y
43
44 The relative coordinate of bottom center in a CAM box is [0.5, 1.0, 0.5],
45 and the yaw is around the y axis, thus the rotation axis=1.
46
47 Coordinates in Depth mode:
48
49 .. code-block:: none
50
51 up z
52 ^ y front
53 | /
54 | /
55 0 ------> x right
56
57 The relative coordinate of bottom center in a DEPTH box is (0.5, 0.5, 0),
58 and the yaw is around the z axis, thus the rotation axis=2.
59 """
60
61 LIDAR = 0
62 CAM = 1
63 DEPTH = 2
64
65 @staticmethod
66 def convert(input, src, dst, rt_mat=None):
67 """Convert boxes or points from `src` mode to `dst` mode."""
68 if isinstance(input, BaseInstance3DBoxes):
69 return Coord3DMode.convert_box(input, src, dst, rt_mat=rt_mat)
70 elif isinstance(input, BasePoints):
71 return Coord3DMode.convert_point(input, src, dst, rt_mat=rt_mat)
72 else:
73 raise NotImplementedError
74
75 @staticmethod
76 def convert_box(box, src, dst, rt_mat=None):
77 """Convert boxes from `src` mode to `dst` mode.
78
79 Args:
80 box (tuple | list | np.ndarray |
81 torch.Tensor | BaseInstance3DBoxes):
82 Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7.
83 src (:obj:`CoordMode`): The src Box mode.
84 dst (:obj:`CoordMode`): The target Box mode.
85 rt_mat (np.ndarray | torch.Tensor): The rotation and translation
86 matrix between different coordinates. Defaults to None.
87 The conversion from `src` coordinates to `dst` coordinates
88 usually comes along the change of sensors, e.g., from camera
89 to LiDAR. This requires a transformation matrix.
90
91 Returns:
92 (tuple | list | np.ndarray | torch.Tensor | BaseInstance3DBoxes): \
93 The converted box of the same type.
94 """
95 if src == dst:
96 return box
97
98 is_numpy = isinstance(box, np.ndarray)
99 is_Instance3DBoxes = isinstance(box, BaseInstance3DBoxes)
100 single_box = isinstance(box, (list, tuple))
101 if single_box:
102 assert len(box) >= 7, (
103 'CoordMode.convert takes either a k-tuple/list or '
104 'an Nxk array/tensor, where k >= 7')
105 arr = torch.tensor(box)[None, :]
106 else:
107 # avoid modifying the input box
108 if is_numpy:
109 arr = torch.from_numpy(np.asarray(box)).clone()
110 elif is_Instance3DBoxes:
111 arr = box.tensor.clone()
112 else:
113 arr = box.clone()
114
115 # convert box from `src` mode to `dst` mode.
116 x_size, y_size, z_size = arr[..., 3:4], arr[..., 4:5], arr[..., 5:6]
117 if src == Coord3DMode.LIDAR and dst == Coord3DMode.CAM:
118 if rt_mat is None:
119 rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]])
120 xyz_size = torch.cat([y_size, z_size, x_size], dim=-1)
121 elif src == Coord3DMode.CAM and dst == Coord3DMode.LIDAR:
122 if rt_mat is None:
123 rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])
124 xyz_size = torch.cat([z_size, x_size, y_size], dim=-1)
125 elif src == Coord3DMode.DEPTH and dst == Coord3DMode.CAM:
126 if rt_mat is None:
127 rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]])
128 xyz_size = torch.cat([x_size, z_size, y_size], dim=-1)
129 elif src == Coord3DMode.CAM and dst == Coord3DMode.DEPTH:
130 if rt_mat is None:
131 rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
132 xyz_size = torch.cat([x_size, z_size, y_size], dim=-1)
133 elif src == Coord3DMode.LIDAR and dst == Coord3DMode.DEPTH:
134 if rt_mat is None:
135 rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
136 xyz_size = torch.cat([y_size, x_size, z_size], dim=-1)
137 elif src == Coord3DMode.DEPTH and dst == Coord3DMode.LIDAR:
138 if rt_mat is None:
139 rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
140 xyz_size = torch.cat([y_size, x_size, z_size], dim=-1)
141 else:
142 raise NotImplementedError(
143 f'Conversion from Coord3DMode {src} to {dst} '
144 'is not supported yet')
145
146 if not isinstance(rt_mat, torch.Tensor):
147 rt_mat = arr.new_tensor(rt_mat)
148 if rt_mat.size(1) == 4:
149 extended_xyz = torch.cat(
150 [arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1)
151 xyz = extended_xyz @ rt_mat.t()
152 else:
153 xyz = arr[:, :3] @ rt_mat.t()
154
155 remains = arr[..., 6:]
156 arr = torch.cat([xyz[:, :3], xyz_size, remains], dim=-1)
157
158 # convert arr to the original type
159 original_type = type(box)
160 if single_box:
161 return original_type(arr.flatten().tolist())
162 if is_numpy:
163 return arr.numpy()
164 elif is_Instance3DBoxes:
165 if dst == Coord3DMode.CAM:
166 target_type = CameraInstance3DBoxes
167 elif dst == Coord3DMode.LIDAR:
168 target_type = LiDARInstance3DBoxes
169 elif dst == Coord3DMode.DEPTH:
170 target_type = DepthInstance3DBoxes
171 else:
172 raise NotImplementedError(
173 f'Conversion to {dst} through {original_type}'
174 ' is not supported yet')
175 return target_type(
176 arr, box_dim=arr.size(-1), with_yaw=box.with_yaw)
177 else:
178 return arr
179
180 @staticmethod
181 def convert_point(point, src, dst, rt_mat=None):
182 """Convert points from `src` mode to `dst` mode.
183
184 Args:
185 point (tuple | list | np.ndarray |
186 torch.Tensor | BasePoints):
187 Can be a k-tuple, k-list or an Nxk array/tensor.
188 src (:obj:`CoordMode`): The src Point mode.
189 dst (:obj:`CoordMode`): The target Point mode.
190 rt_mat (np.ndarray | torch.Tensor): The rotation and translation
191 matrix between different coordinates. Defaults to None.
192 The conversion from `src` coordinates to `dst` coordinates
193 usually comes along the change of sensors, e.g., from camera
194 to LiDAR. This requires a transformation matrix.
195
196 Returns:
197 (tuple | list | np.ndarray | torch.Tensor | BasePoints): \
198 The converted point of the same type.
199 """
200 if src == dst:
201 return point
202
203 is_numpy = isinstance(point, np.ndarray)
204 is_InstancePoints = isinstance(point, BasePoints)
205 single_point = isinstance(point, (list, tuple))
206 if single_point:
207 assert len(point) >= 3, (
208 'CoordMode.convert takes either a k-tuple/list or '
209 'an Nxk array/tensor, where k >= 3')
210 arr = torch.tensor(point)[None, :]
211 else:
212 # avoid modifying the input point
213 if is_numpy:
214 arr = torch.from_numpy(np.asarray(point)).clone()
215 elif is_InstancePoints:
216 arr = point.tensor.clone()
217 else:
218 arr = point.clone()
219
220 # convert point from `src` mode to `dst` mode.
221 # TODO: LIDAR
222 # only implemented provided Rt matrix in cam-depth conversion
223 if src == Coord3DMode.LIDAR and dst == Coord3DMode.CAM:
224 if rt_mat is None:
225 rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]])
226 elif src == Coord3DMode.CAM and dst == Coord3DMode.LIDAR:
227 if rt_mat is None:
228 rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])
229 elif src == Coord3DMode.DEPTH and dst == Coord3DMode.CAM:
230 if rt_mat is None:
231 rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
232 elif src == Coord3DMode.CAM and dst == Coord3DMode.DEPTH:
233 if rt_mat is None:
234 rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]])
235 elif src == Coord3DMode.LIDAR and dst == Coord3DMode.DEPTH:
236 if rt_mat is None:
237 rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
238 elif src == Coord3DMode.DEPTH and dst == Coord3DMode.LIDAR:
239 if rt_mat is None:
240 rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
241 else:
242 raise NotImplementedError(
243 f'Conversion from Coord3DMode {src} to {dst} '
244 'is not supported yet')
245
246 if rt_mat.size(1) == 4:
247 extended_xyz = torch.cat(
248 [arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1)
249 xyz = extended_xyz @ rt_mat.t()
250 else:
251 xyz = arr[:, :3] @ rt_mat.t()
252
253 remains = arr[:, 3:]
254 arr = torch.cat([xyz[:, :3], remains], dim=-1)
255
256 # convert arr to the original type
257 original_type = type(point)
258 if single_point:
259 return original_type(arr.flatten().tolist())
260 if is_numpy:
261 return arr.numpy()
262 elif is_InstancePoints:
263 if dst == Coord3DMode.CAM:
264 target_type = CameraPoints
265 elif dst == Coord3DMode.LIDAR:
266 target_type = LiDARPoints
267 elif dst == Coord3DMode.DEPTH:
268 target_type = DepthPoints
269 else:
270 raise NotImplementedError(
271 f'Conversion to {dst} through {original_type}'
272 ' is not supported yet')
273 return target_type(
274 arr,
275 points_dim=arr.size(-1),
276 attribute_dims=point.attribute_dims)
277 else:
278 return arr
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmdet3d/core/bbox/structures/coord_3d_mode.py b/mmdet3d/core/bbox/structures/coord_3d_mode.py
--- a/mmdet3d/core/bbox/structures/coord_3d_mode.py
+++ b/mmdet3d/core/bbox/structures/coord_3d_mode.py
@@ -243,6 +243,8 @@
f'Conversion from Coord3DMode {src} to {dst} '
'is not supported yet')
+ if not isinstance(rt_mat, torch.Tensor):
+ rt_mat = arr.new_tensor(rt_mat)
if rt_mat.size(1) == 4:
extended_xyz = torch.cat(
[arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1)
| {"golden_diff": "diff --git a/mmdet3d/core/bbox/structures/coord_3d_mode.py b/mmdet3d/core/bbox/structures/coord_3d_mode.py\n--- a/mmdet3d/core/bbox/structures/coord_3d_mode.py\n+++ b/mmdet3d/core/bbox/structures/coord_3d_mode.py\n@@ -243,6 +243,8 @@\n f'Conversion from Coord3DMode {src} to {dst} '\n 'is not supported yet')\n \n+ if not isinstance(rt_mat, torch.Tensor):\n+ rt_mat = arr.new_tensor(rt_mat)\n if rt_mat.size(1) == 4:\n extended_xyz = torch.cat(\n [arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1)\n", "issue": "Possible bug with 'rt_mat (np.ndarray)' in coord_3d_mode\nhttps://github.com/open-mmlab/mmdetection3d/blob/a1e04538295b02d42fc59bb7ed58c59c98220174/mmdet3d/core/bbox/structures/coord_3d_mode.py#L246\r\n\r\nShould it be `if rt_mat.shape[1] == 4:` to take into account both (np.ndarray | torch.Tensor) types of `rt_mat` as input \r\nor\r\nconvert `rt_mat` from np.ndarray to torch.Tensor before using `if rt_mat.size(1) == 4` ?\n", "before_files": [{"content": "import numpy as np\nimport torch\nfrom enum import IntEnum, unique\n\nfrom mmdet3d.core.points import (BasePoints, CameraPoints, DepthPoints,\n LiDARPoints)\nfrom .base_box3d import BaseInstance3DBoxes\nfrom .cam_box3d import CameraInstance3DBoxes\nfrom .depth_box3d import DepthInstance3DBoxes\nfrom .lidar_box3d import LiDARInstance3DBoxes\n\n\n@unique\nclass Coord3DMode(IntEnum):\n r\"\"\"Enum of different ways to represent a box\n and point cloud.\n\n Coordinates in LiDAR:\n\n .. code-block:: none\n\n up z\n ^ x front\n | /\n | /\n left y <------ 0\n\n The relative coordinate of bottom center in a LiDAR box is (0.5, 0.5, 0),\n and the yaw is around the z axis, thus the rotation axis=2.\n\n Coordinates in camera:\n\n .. code-block:: none\n\n z front\n /\n /\n 0 ------> x right\n |\n |\n v\n down y\n\n The relative coordinate of bottom center in a CAM box is [0.5, 1.0, 0.5],\n and the yaw is around the y axis, thus the rotation axis=1.\n\n Coordinates in Depth mode:\n\n .. code-block:: none\n\n up z\n ^ y front\n | /\n | /\n 0 ------> x right\n\n The relative coordinate of bottom center in a DEPTH box is (0.5, 0.5, 0),\n and the yaw is around the z axis, thus the rotation axis=2.\n \"\"\"\n\n LIDAR = 0\n CAM = 1\n DEPTH = 2\n\n @staticmethod\n def convert(input, src, dst, rt_mat=None):\n \"\"\"Convert boxes or points from `src` mode to `dst` mode.\"\"\"\n if isinstance(input, BaseInstance3DBoxes):\n return Coord3DMode.convert_box(input, src, dst, rt_mat=rt_mat)\n elif isinstance(input, BasePoints):\n return Coord3DMode.convert_point(input, src, dst, rt_mat=rt_mat)\n else:\n raise NotImplementedError\n\n @staticmethod\n def convert_box(box, src, dst, rt_mat=None):\n \"\"\"Convert boxes from `src` mode to `dst` mode.\n\n Args:\n box (tuple | list | np.ndarray |\n torch.Tensor | BaseInstance3DBoxes):\n Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7.\n src (:obj:`CoordMode`): The src Box mode.\n dst (:obj:`CoordMode`): The target Box mode.\n rt_mat (np.ndarray | torch.Tensor): The rotation and translation\n matrix between different coordinates. Defaults to None.\n The conversion from `src` coordinates to `dst` coordinates\n usually comes along the change of sensors, e.g., from camera\n to LiDAR. This requires a transformation matrix.\n\n Returns:\n (tuple | list | np.ndarray | torch.Tensor | BaseInstance3DBoxes): \\\n The converted box of the same type.\n \"\"\"\n if src == dst:\n return box\n\n is_numpy = isinstance(box, np.ndarray)\n is_Instance3DBoxes = isinstance(box, BaseInstance3DBoxes)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) >= 7, (\n 'CoordMode.convert takes either a k-tuple/list or '\n 'an Nxk array/tensor, where k >= 7')\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n elif is_Instance3DBoxes:\n arr = box.tensor.clone()\n else:\n arr = box.clone()\n\n # convert box from `src` mode to `dst` mode.\n x_size, y_size, z_size = arr[..., 3:4], arr[..., 4:5], arr[..., 5:6]\n if src == Coord3DMode.LIDAR and dst == Coord3DMode.CAM:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]])\n xyz_size = torch.cat([y_size, z_size, x_size], dim=-1)\n elif src == Coord3DMode.CAM and dst == Coord3DMode.LIDAR:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])\n xyz_size = torch.cat([z_size, x_size, y_size], dim=-1)\n elif src == Coord3DMode.DEPTH and dst == Coord3DMode.CAM:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]])\n xyz_size = torch.cat([x_size, z_size, y_size], dim=-1)\n elif src == Coord3DMode.CAM and dst == Coord3DMode.DEPTH:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]])\n xyz_size = torch.cat([x_size, z_size, y_size], dim=-1)\n elif src == Coord3DMode.LIDAR and dst == Coord3DMode.DEPTH:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\n xyz_size = torch.cat([y_size, x_size, z_size], dim=-1)\n elif src == Coord3DMode.DEPTH and dst == Coord3DMode.LIDAR:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])\n xyz_size = torch.cat([y_size, x_size, z_size], dim=-1)\n else:\n raise NotImplementedError(\n f'Conversion from Coord3DMode {src} to {dst} '\n 'is not supported yet')\n\n if not isinstance(rt_mat, torch.Tensor):\n rt_mat = arr.new_tensor(rt_mat)\n if rt_mat.size(1) == 4:\n extended_xyz = torch.cat(\n [arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1)\n xyz = extended_xyz @ rt_mat.t()\n else:\n xyz = arr[:, :3] @ rt_mat.t()\n\n remains = arr[..., 6:]\n arr = torch.cat([xyz[:, :3], xyz_size, remains], dim=-1)\n\n # convert arr to the original type\n original_type = type(box)\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n elif is_Instance3DBoxes:\n if dst == Coord3DMode.CAM:\n target_type = CameraInstance3DBoxes\n elif dst == Coord3DMode.LIDAR:\n target_type = LiDARInstance3DBoxes\n elif dst == Coord3DMode.DEPTH:\n target_type = DepthInstance3DBoxes\n else:\n raise NotImplementedError(\n f'Conversion to {dst} through {original_type}'\n ' is not supported yet')\n return target_type(\n arr, box_dim=arr.size(-1), with_yaw=box.with_yaw)\n else:\n return arr\n\n @staticmethod\n def convert_point(point, src, dst, rt_mat=None):\n \"\"\"Convert points from `src` mode to `dst` mode.\n\n Args:\n point (tuple | list | np.ndarray |\n torch.Tensor | BasePoints):\n Can be a k-tuple, k-list or an Nxk array/tensor.\n src (:obj:`CoordMode`): The src Point mode.\n dst (:obj:`CoordMode`): The target Point mode.\n rt_mat (np.ndarray | torch.Tensor): The rotation and translation\n matrix between different coordinates. Defaults to None.\n The conversion from `src` coordinates to `dst` coordinates\n usually comes along the change of sensors, e.g., from camera\n to LiDAR. This requires a transformation matrix.\n\n Returns:\n (tuple | list | np.ndarray | torch.Tensor | BasePoints): \\\n The converted point of the same type.\n \"\"\"\n if src == dst:\n return point\n\n is_numpy = isinstance(point, np.ndarray)\n is_InstancePoints = isinstance(point, BasePoints)\n single_point = isinstance(point, (list, tuple))\n if single_point:\n assert len(point) >= 3, (\n 'CoordMode.convert takes either a k-tuple/list or '\n 'an Nxk array/tensor, where k >= 3')\n arr = torch.tensor(point)[None, :]\n else:\n # avoid modifying the input point\n if is_numpy:\n arr = torch.from_numpy(np.asarray(point)).clone()\n elif is_InstancePoints:\n arr = point.tensor.clone()\n else:\n arr = point.clone()\n\n # convert point from `src` mode to `dst` mode.\n # TODO: LIDAR\n # only implemented provided Rt matrix in cam-depth conversion\n if src == Coord3DMode.LIDAR and dst == Coord3DMode.CAM:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]])\n elif src == Coord3DMode.CAM and dst == Coord3DMode.LIDAR:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])\n elif src == Coord3DMode.DEPTH and dst == Coord3DMode.CAM:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]])\n elif src == Coord3DMode.CAM and dst == Coord3DMode.DEPTH:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]])\n elif src == Coord3DMode.LIDAR and dst == Coord3DMode.DEPTH:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\n elif src == Coord3DMode.DEPTH and dst == Coord3DMode.LIDAR:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])\n else:\n raise NotImplementedError(\n f'Conversion from Coord3DMode {src} to {dst} '\n 'is not supported yet')\n\n if rt_mat.size(1) == 4:\n extended_xyz = torch.cat(\n [arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1)\n xyz = extended_xyz @ rt_mat.t()\n else:\n xyz = arr[:, :3] @ rt_mat.t()\n\n remains = arr[:, 3:]\n arr = torch.cat([xyz[:, :3], remains], dim=-1)\n\n # convert arr to the original type\n original_type = type(point)\n if single_point:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n elif is_InstancePoints:\n if dst == Coord3DMode.CAM:\n target_type = CameraPoints\n elif dst == Coord3DMode.LIDAR:\n target_type = LiDARPoints\n elif dst == Coord3DMode.DEPTH:\n target_type = DepthPoints\n else:\n raise NotImplementedError(\n f'Conversion to {dst} through {original_type}'\n ' is not supported yet')\n return target_type(\n arr,\n points_dim=arr.size(-1),\n attribute_dims=point.attribute_dims)\n else:\n return arr\n", "path": "mmdet3d/core/bbox/structures/coord_3d_mode.py"}], "after_files": [{"content": "import numpy as np\nimport torch\nfrom enum import IntEnum, unique\n\nfrom mmdet3d.core.points import (BasePoints, CameraPoints, DepthPoints,\n LiDARPoints)\nfrom .base_box3d import BaseInstance3DBoxes\nfrom .cam_box3d import CameraInstance3DBoxes\nfrom .depth_box3d import DepthInstance3DBoxes\nfrom .lidar_box3d import LiDARInstance3DBoxes\n\n\n@unique\nclass Coord3DMode(IntEnum):\n r\"\"\"Enum of different ways to represent a box\n and point cloud.\n\n Coordinates in LiDAR:\n\n .. code-block:: none\n\n up z\n ^ x front\n | /\n | /\n left y <------ 0\n\n The relative coordinate of bottom center in a LiDAR box is (0.5, 0.5, 0),\n and the yaw is around the z axis, thus the rotation axis=2.\n\n Coordinates in camera:\n\n .. code-block:: none\n\n z front\n /\n /\n 0 ------> x right\n |\n |\n v\n down y\n\n The relative coordinate of bottom center in a CAM box is [0.5, 1.0, 0.5],\n and the yaw is around the y axis, thus the rotation axis=1.\n\n Coordinates in Depth mode:\n\n .. code-block:: none\n\n up z\n ^ y front\n | /\n | /\n 0 ------> x right\n\n The relative coordinate of bottom center in a DEPTH box is (0.5, 0.5, 0),\n and the yaw is around the z axis, thus the rotation axis=2.\n \"\"\"\n\n LIDAR = 0\n CAM = 1\n DEPTH = 2\n\n @staticmethod\n def convert(input, src, dst, rt_mat=None):\n \"\"\"Convert boxes or points from `src` mode to `dst` mode.\"\"\"\n if isinstance(input, BaseInstance3DBoxes):\n return Coord3DMode.convert_box(input, src, dst, rt_mat=rt_mat)\n elif isinstance(input, BasePoints):\n return Coord3DMode.convert_point(input, src, dst, rt_mat=rt_mat)\n else:\n raise NotImplementedError\n\n @staticmethod\n def convert_box(box, src, dst, rt_mat=None):\n \"\"\"Convert boxes from `src` mode to `dst` mode.\n\n Args:\n box (tuple | list | np.ndarray |\n torch.Tensor | BaseInstance3DBoxes):\n Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7.\n src (:obj:`CoordMode`): The src Box mode.\n dst (:obj:`CoordMode`): The target Box mode.\n rt_mat (np.ndarray | torch.Tensor): The rotation and translation\n matrix between different coordinates. Defaults to None.\n The conversion from `src` coordinates to `dst` coordinates\n usually comes along the change of sensors, e.g., from camera\n to LiDAR. This requires a transformation matrix.\n\n Returns:\n (tuple | list | np.ndarray | torch.Tensor | BaseInstance3DBoxes): \\\n The converted box of the same type.\n \"\"\"\n if src == dst:\n return box\n\n is_numpy = isinstance(box, np.ndarray)\n is_Instance3DBoxes = isinstance(box, BaseInstance3DBoxes)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) >= 7, (\n 'CoordMode.convert takes either a k-tuple/list or '\n 'an Nxk array/tensor, where k >= 7')\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n elif is_Instance3DBoxes:\n arr = box.tensor.clone()\n else:\n arr = box.clone()\n\n # convert box from `src` mode to `dst` mode.\n x_size, y_size, z_size = arr[..., 3:4], arr[..., 4:5], arr[..., 5:6]\n if src == Coord3DMode.LIDAR and dst == Coord3DMode.CAM:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]])\n xyz_size = torch.cat([y_size, z_size, x_size], dim=-1)\n elif src == Coord3DMode.CAM and dst == Coord3DMode.LIDAR:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])\n xyz_size = torch.cat([z_size, x_size, y_size], dim=-1)\n elif src == Coord3DMode.DEPTH and dst == Coord3DMode.CAM:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]])\n xyz_size = torch.cat([x_size, z_size, y_size], dim=-1)\n elif src == Coord3DMode.CAM and dst == Coord3DMode.DEPTH:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]])\n xyz_size = torch.cat([x_size, z_size, y_size], dim=-1)\n elif src == Coord3DMode.LIDAR and dst == Coord3DMode.DEPTH:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\n xyz_size = torch.cat([y_size, x_size, z_size], dim=-1)\n elif src == Coord3DMode.DEPTH and dst == Coord3DMode.LIDAR:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])\n xyz_size = torch.cat([y_size, x_size, z_size], dim=-1)\n else:\n raise NotImplementedError(\n f'Conversion from Coord3DMode {src} to {dst} '\n 'is not supported yet')\n\n if not isinstance(rt_mat, torch.Tensor):\n rt_mat = arr.new_tensor(rt_mat)\n if rt_mat.size(1) == 4:\n extended_xyz = torch.cat(\n [arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1)\n xyz = extended_xyz @ rt_mat.t()\n else:\n xyz = arr[:, :3] @ rt_mat.t()\n\n remains = arr[..., 6:]\n arr = torch.cat([xyz[:, :3], xyz_size, remains], dim=-1)\n\n # convert arr to the original type\n original_type = type(box)\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n elif is_Instance3DBoxes:\n if dst == Coord3DMode.CAM:\n target_type = CameraInstance3DBoxes\n elif dst == Coord3DMode.LIDAR:\n target_type = LiDARInstance3DBoxes\n elif dst == Coord3DMode.DEPTH:\n target_type = DepthInstance3DBoxes\n else:\n raise NotImplementedError(\n f'Conversion to {dst} through {original_type}'\n ' is not supported yet')\n return target_type(\n arr, box_dim=arr.size(-1), with_yaw=box.with_yaw)\n else:\n return arr\n\n @staticmethod\n def convert_point(point, src, dst, rt_mat=None):\n \"\"\"Convert points from `src` mode to `dst` mode.\n\n Args:\n point (tuple | list | np.ndarray |\n torch.Tensor | BasePoints):\n Can be a k-tuple, k-list or an Nxk array/tensor.\n src (:obj:`CoordMode`): The src Point mode.\n dst (:obj:`CoordMode`): The target Point mode.\n rt_mat (np.ndarray | torch.Tensor): The rotation and translation\n matrix between different coordinates. Defaults to None.\n The conversion from `src` coordinates to `dst` coordinates\n usually comes along the change of sensors, e.g., from camera\n to LiDAR. This requires a transformation matrix.\n\n Returns:\n (tuple | list | np.ndarray | torch.Tensor | BasePoints): \\\n The converted point of the same type.\n \"\"\"\n if src == dst:\n return point\n\n is_numpy = isinstance(point, np.ndarray)\n is_InstancePoints = isinstance(point, BasePoints)\n single_point = isinstance(point, (list, tuple))\n if single_point:\n assert len(point) >= 3, (\n 'CoordMode.convert takes either a k-tuple/list or '\n 'an Nxk array/tensor, where k >= 3')\n arr = torch.tensor(point)[None, :]\n else:\n # avoid modifying the input point\n if is_numpy:\n arr = torch.from_numpy(np.asarray(point)).clone()\n elif is_InstancePoints:\n arr = point.tensor.clone()\n else:\n arr = point.clone()\n\n # convert point from `src` mode to `dst` mode.\n # TODO: LIDAR\n # only implemented provided Rt matrix in cam-depth conversion\n if src == Coord3DMode.LIDAR and dst == Coord3DMode.CAM:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]])\n elif src == Coord3DMode.CAM and dst == Coord3DMode.LIDAR:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])\n elif src == Coord3DMode.DEPTH and dst == Coord3DMode.CAM:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]])\n elif src == Coord3DMode.CAM and dst == Coord3DMode.DEPTH:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]])\n elif src == Coord3DMode.LIDAR and dst == Coord3DMode.DEPTH:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\n elif src == Coord3DMode.DEPTH and dst == Coord3DMode.LIDAR:\n if rt_mat is None:\n rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])\n else:\n raise NotImplementedError(\n f'Conversion from Coord3DMode {src} to {dst} '\n 'is not supported yet')\n\n if not isinstance(rt_mat, torch.Tensor):\n rt_mat = arr.new_tensor(rt_mat)\n if rt_mat.size(1) == 4:\n extended_xyz = torch.cat(\n [arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1)\n xyz = extended_xyz @ rt_mat.t()\n else:\n xyz = arr[:, :3] @ rt_mat.t()\n\n remains = arr[:, 3:]\n arr = torch.cat([xyz[:, :3], remains], dim=-1)\n\n # convert arr to the original type\n original_type = type(point)\n if single_point:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n elif is_InstancePoints:\n if dst == Coord3DMode.CAM:\n target_type = CameraPoints\n elif dst == Coord3DMode.LIDAR:\n target_type = LiDARPoints\n elif dst == Coord3DMode.DEPTH:\n target_type = DepthPoints\n else:\n raise NotImplementedError(\n f'Conversion to {dst} through {original_type}'\n ' is not supported yet')\n return target_type(\n arr,\n points_dim=arr.size(-1),\n attribute_dims=point.attribute_dims)\n else:\n return arr\n", "path": "mmdet3d/core/bbox/structures/coord_3d_mode.py"}]} | 3,928 | 183 |
gh_patches_debug_10225 | rasdani/github-patches | git_diff | wagtail__wagtail-822 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unicode content in rich text causes error
Steps to reproduce:
1. Using the wagtail interface, create a new instance of a page derived model which has a rich text field
2. In the rich text field, include unicode characters such as: `©` or `’`
3. Publish the page
The page will be published fine, and probably rendered normally through a template.
4. Return to the wagtail admin, and edit the newly created page
The rich text field is not rendered in the editing page. Inspecting the html shows that wagtail is attempting to render the field, its heading is there. But the field is not rendered.
If you attempt to publish this new page, the form will submit a page with no content in the rich text field.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/utils/widgets.py`
Content:
```
1 from django.forms.widgets import Widget
2 from django.utils.safestring import mark_safe
3
4
5 class WidgetWithScript(Widget):
6 def render(self, name, value, attrs=None):
7 widget = super(WidgetWithScript, self).render(name, value, attrs)
8
9 final_attrs = self.build_attrs(attrs, name=name)
10 id_ = final_attrs.get('id', None)
11 if 'id_' is None:
12 return widget
13
14 js = self.render_js_init(id_, name, value)
15 out = '{0}<script>{1}</script>'.format(widget, js)
16 return mark_safe(out)
17
18 def render_js_init(self, id_, name, value):
19 return ''
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/utils/widgets.py b/wagtail/utils/widgets.py
--- a/wagtail/utils/widgets.py
+++ b/wagtail/utils/widgets.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import, unicode_literals
+
from django.forms.widgets import Widget
from django.utils.safestring import mark_safe
@@ -8,7 +10,7 @@
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
- if 'id_' is None:
+ if id_ is None:
return widget
js = self.render_js_init(id_, name, value)
| {"golden_diff": "diff --git a/wagtail/utils/widgets.py b/wagtail/utils/widgets.py\n--- a/wagtail/utils/widgets.py\n+++ b/wagtail/utils/widgets.py\n@@ -1,3 +1,5 @@\n+from __future__ import absolute_import, unicode_literals\n+\n from django.forms.widgets import Widget\n from django.utils.safestring import mark_safe\n \n@@ -8,7 +10,7 @@\n \n final_attrs = self.build_attrs(attrs, name=name)\n id_ = final_attrs.get('id', None)\n- if 'id_' is None:\n+ if id_ is None:\n return widget\n \n js = self.render_js_init(id_, name, value)\n", "issue": "Unicode content in rich text causes error\nSteps to reproduce:\n1. Using the wagtail interface, create a new instance of a page derived model which has a rich text field\n2. In the rich text field, include unicode characters such as: `\u00a9` or `\u2019`\n3. Publish the page\n \n The page will be published fine, and probably rendered normally through a template.\n4. Return to the wagtail admin, and edit the newly created page\n \n The rich text field is not rendered in the editing page. Inspecting the html shows that wagtail is attempting to render the field, its heading is there. But the field is not rendered.\n \n If you attempt to publish this new page, the form will submit a page with no content in the rich text field.\n\n", "before_files": [{"content": "from django.forms.widgets import Widget\nfrom django.utils.safestring import mark_safe\n\n\nclass WidgetWithScript(Widget):\n def render(self, name, value, attrs=None):\n widget = super(WidgetWithScript, self).render(name, value, attrs)\n\n final_attrs = self.build_attrs(attrs, name=name)\n id_ = final_attrs.get('id', None)\n if 'id_' is None:\n return widget\n\n js = self.render_js_init(id_, name, value)\n out = '{0}<script>{1}</script>'.format(widget, js)\n return mark_safe(out)\n\n def render_js_init(self, id_, name, value):\n return ''\n", "path": "wagtail/utils/widgets.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django.forms.widgets import Widget\nfrom django.utils.safestring import mark_safe\n\n\nclass WidgetWithScript(Widget):\n def render(self, name, value, attrs=None):\n widget = super(WidgetWithScript, self).render(name, value, attrs)\n\n final_attrs = self.build_attrs(attrs, name=name)\n id_ = final_attrs.get('id', None)\n if id_ is None:\n return widget\n\n js = self.render_js_init(id_, name, value)\n out = '{0}<script>{1}</script>'.format(widget, js)\n return mark_safe(out)\n\n def render_js_init(self, id_, name, value):\n return ''\n", "path": "wagtail/utils/widgets.py"}]} | 600 | 146 |
gh_patches_debug_4403 | rasdani/github-patches | git_diff | learningequality__kolibri-5037 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Facing error while kolibri 0.12.0 deb file installation
### Observed behavior
After running below command it shows error:
**sudo dpkg -i kolibri_0.12.0b4-0ubuntu1_all.deb**
File downloaded from https://github.com/learningequality/kolibri/releases.

### Context
Kolibri version : Kolibri 0.12.0
Operating system : Ubuntu 14.04
### Screenshots:


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/__init__.py`
Content:
```
1 """
2 CAUTION! Keep everything here at at minimum. Do not import stuff.
3 This module is imported in setup.py, so you cannot for instance
4 import a dependency.
5 """
6 from __future__ import absolute_import
7 from __future__ import print_function
8 from __future__ import unicode_literals
9
10 from .utils import env
11 from .utils.version import get_version
12
13 # Setup the environment before loading anything else from the application
14 env.set_env()
15
16 #: This may not be the exact version as it's subject to modification with
17 #: get_version() - use ``kolibri.__version__`` for the exact version string.
18 VERSION = (0, 12, 0, 'alpha', 0)
19
20 __author__ = 'Learning Equality'
21 __email__ = '[email protected]'
22 __version__ = str(get_version(VERSION))
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/__init__.py b/kolibri/__init__.py
--- a/kolibri/__init__.py
+++ b/kolibri/__init__.py
@@ -15,7 +15,7 @@
#: This may not be the exact version as it's subject to modification with
#: get_version() - use ``kolibri.__version__`` for the exact version string.
-VERSION = (0, 12, 0, 'alpha', 0)
+VERSION = (0, 12, 0, 'beta', 0)
__author__ = 'Learning Equality'
__email__ = '[email protected]'
| {"golden_diff": "diff --git a/kolibri/__init__.py b/kolibri/__init__.py\n--- a/kolibri/__init__.py\n+++ b/kolibri/__init__.py\n@@ -15,7 +15,7 @@\n \n #: This may not be the exact version as it's subject to modification with\n #: get_version() - use ``kolibri.__version__`` for the exact version string.\n-VERSION = (0, 12, 0, 'alpha', 0)\n+VERSION = (0, 12, 0, 'beta', 0)\n \n __author__ = 'Learning Equality'\n __email__ = '[email protected]'\n", "issue": "Facing error while kolibri 0.12.0 deb file installation\n### Observed behavior\r\nAfter running below command it shows error:\r\n**sudo dpkg -i kolibri_0.12.0b4-0ubuntu1_all.deb**\r\n\r\nFile downloaded from https://github.com/learningequality/kolibri/releases.\r\n\r\n\r\n\r\n### Context\r\nKolibri version : Kolibri 0.12.0\r\nOperating system : Ubuntu 14.04\r\n\r\n### Screenshots:\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nCAUTION! Keep everything here at at minimum. Do not import stuff.\nThis module is imported in setup.py, so you cannot for instance\nimport a dependency.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom .utils import env\nfrom .utils.version import get_version\n\n# Setup the environment before loading anything else from the application\nenv.set_env()\n\n#: This may not be the exact version as it's subject to modification with\n#: get_version() - use ``kolibri.__version__`` for the exact version string.\nVERSION = (0, 12, 0, 'alpha', 0)\n\n__author__ = 'Learning Equality'\n__email__ = '[email protected]'\n__version__ = str(get_version(VERSION))\n", "path": "kolibri/__init__.py"}], "after_files": [{"content": "\"\"\"\nCAUTION! Keep everything here at at minimum. Do not import stuff.\nThis module is imported in setup.py, so you cannot for instance\nimport a dependency.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom .utils import env\nfrom .utils.version import get_version\n\n# Setup the environment before loading anything else from the application\nenv.set_env()\n\n#: This may not be the exact version as it's subject to modification with\n#: get_version() - use ``kolibri.__version__`` for the exact version string.\nVERSION = (0, 12, 0, 'beta', 0)\n\n__author__ = 'Learning Equality'\n__email__ = '[email protected]'\n__version__ = str(get_version(VERSION))\n", "path": "kolibri/__init__.py"}]} | 766 | 146 |
gh_patches_debug_37466 | rasdani/github-patches | git_diff | holoviz__panel-5427 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Serialization Error When Visualizing PyDeck TextLayer
#### ALL software version info
PyDeck 0.8, Panel 1.2
#### Description of expected behavior and the observed behavior
I recently coded up a variation of the PyDeck TextLayer example [from the PyDeck website](https://deckgl.readthedocs.io/en/latest/gallery/text_layer.html) and then attempted to integrate that into a Panel application. Panel is having a serialization issue with the PyDeck String type.
#### Complete, minimal, self-contained example code that reproduces the issue
[This Google Colab notebook](https://colab.research.google.com/drive/13ziK2OrM7n6zmqQDKJQ1_fvrFCCj2oGf?usp=sharing) shows the visualization running without issue in PyDeck and then shows my attempt to render that same visualization via Panel.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/pane/deckgl.py`
Content:
```
1 """
2 Defines a PyDeck Pane which renders a PyDeck plot using a PyDeckPlot
3 bokeh model.
4 """
5 from __future__ import annotations
6
7 import json
8 import sys
9
10 from collections import defaultdict
11 from typing import (
12 TYPE_CHECKING, Any, ClassVar, Dict, Mapping, Optional,
13 )
14
15 import numpy as np
16 import param
17
18 from bokeh.models import ColumnDataSource
19 from pyviz_comms import JupyterComm
20
21 from ..util import is_dataframe, lazy_load
22 from .base import ModelPane
23
24 if TYPE_CHECKING:
25 from bokeh.document import Document
26 from bokeh.model import Model
27 from pyviz_comms import Comm
28
29
30 def lower_camel_case_keys(attrs):
31 """
32 Makes all the keys in a dictionary camel-cased and lower-case
33
34 Parameters
35 ----------
36 attrs : dict
37 Dictionary for which all the keys should be converted to camel-case
38 """
39 for snake_key in list(attrs.keys()):
40 if '_' not in snake_key:
41 continue
42 camel_key = lower_first_letter(to_camel_case(snake_key))
43 attrs[camel_key] = attrs.pop(snake_key)
44
45
46 def to_camel_case(snake_case: str) -> str:
47 """
48 Makes a snake case string into a camel case one
49
50 Parameters
51 -----------
52 snake_case : str
53 Snake-cased string (e.g., "snake_cased") to be converted to camel-case (e.g., "camelCase")
54 """
55 output_str = ''
56 should_upper_case = False
57 for c in snake_case:
58 if c == '_':
59 should_upper_case = True
60 continue
61 output_str = output_str + c.upper() if should_upper_case else output_str + c
62 should_upper_case = False
63 return output_str
64
65
66 def lower_first_letter(s: str) -> str:
67 return s[:1].lower() + s[1:] if s else ''
68
69
70 def recurse_data(data):
71 if hasattr(data, 'to_json'):
72 data = data.__dict__
73 if isinstance(data, dict):
74 data = dict(data)
75 lower_camel_case_keys(data)
76 data = {k: recurse_data(v) if k != 'data' else v
77 for k, v in data.items()}
78 elif isinstance(data, list):
79 data = [recurse_data(d) for d in data]
80 return data
81
82
83 class DeckGL(ModelPane):
84 """
85 The `DeckGL` pane renders the Deck.gl
86 JSON specification as well as PyDeck plots inside a panel.
87
88 Deck.gl is a very powerful WebGL-powered framework for visual exploratory
89 data analysis of large datasets.
90
91 Reference: https://panel.holoviz.org/reference/panes/DeckGL.html
92
93 :Example:
94
95 >>> pn.extension('deckgl')
96 >>> DeckGL(
97 ... some_deckgl_dict_or_pydeck_object,
98 ... mapbox_api_key=MAPBOX_KEY, height=600
99 ... )
100 """
101
102 mapbox_api_key = param.String(default=None, doc="""
103 The MapBox API key if not supplied by a PyDeck object.""")
104
105 tooltips = param.ClassSelector(default=True, class_=(bool, dict), doc="""
106 Whether to enable tooltips""")
107
108 click_state = param.Dict(default={}, doc="""
109 Contains the last click event on the DeckGL plot.""")
110
111 hover_state = param.Dict(default={}, doc="""
112 The current hover state of the DeckGL plot.""")
113
114 view_state = param.Dict(default={}, doc="""
115 The current view state of the DeckGL plot.""")
116
117 throttle = param.Dict(default={'view': 200, 'hover': 200}, doc="""
118 Throttling timeout (in milliseconds) for view state and hover
119 events sent from the frontend.""")
120
121 _rename: ClassVar[Mapping[str, str | None]] = {
122 'click_state': 'clickState', 'hover_state': 'hoverState',
123 'view_state': 'viewState', 'tooltips': 'tooltip'
124 }
125
126 _updates: ClassVar[bool] = True
127
128 priority: ClassVar[float | bool | None] = None
129
130 @classmethod
131 def applies(cls, obj: Any) -> float | bool | None:
132 if cls.is_pydeck(obj):
133 return 0.8
134 elif isinstance(obj, (dict, str)):
135 return 0
136 return False
137
138 @classmethod
139 def is_pydeck(cls, obj):
140 if 'pydeck' in sys.modules:
141 import pydeck
142 return isinstance(obj, pydeck.bindings.deck.Deck)
143 return False
144
145 @classmethod
146 def _process_data(cls, data):
147 columns = defaultdict(list)
148 for d in data:
149 for col, val in d.items():
150 columns[col].append(val)
151 return {col: np.asarray(vals) for col, vals in columns.items()}
152
153 @classmethod
154 def _update_sources(cls, json_data, sources):
155 layers = json_data.get('layers', [])
156
157 # Create index of sources by columns
158 source_columns = defaultdict(list)
159 for i, source in enumerate(sources):
160 key = tuple(sorted(source.data.keys()))
161 source_columns[key].append((i, source))
162
163 # Process
164 unprocessed, unused = [], list(sources)
165 for layer in layers:
166 data = layer.get('data')
167 if is_dataframe(data):
168 data = ColumnDataSource.from_df(data)
169 elif (isinstance(data, list) and data
170 and isinstance(data[0], dict)):
171 data = cls._process_data(data)
172 else:
173 continue
174
175 key = tuple(sorted(data.keys()))
176 existing = source_columns.get(key)
177 if existing:
178 index, cds = existing.pop()
179 layer['data'] = index
180 updates = {}
181 for col, values in data.items():
182 if not np.array_equal(data[col], cds.data[col]):
183 updates[col] = values
184 if updates:
185 cds.data.update(updates)
186 unused.remove(cds)
187 else:
188 unprocessed.append((layer, data))
189
190 for layer, data in unprocessed:
191 if unused:
192 cds = unused.pop()
193 cds.data = data
194 else:
195 cds = ColumnDataSource(data)
196 sources.append(cds)
197 layer['data'] = sources.index(cds)
198
199 def _transform_object(self, obj) -> Dict[str, Any]:
200 if self.object is None:
201 data, mapbox_api_key, tooltip = {}, self.mapbox_api_key, self.tooltips
202 elif isinstance(self.object, (str, dict)):
203 if isinstance(self.object, str):
204 data = json.loads(self.object)
205 else:
206 data = dict(self.object)
207 data['layers'] = [dict(layer) for layer in data.get('layers', [])]
208 mapbox_api_key = self.mapbox_api_key
209 tooltip = self.tooltips
210 else:
211 data = dict(self.object.__dict__)
212 mapbox_api_key = data.pop('mapbox_key', self.mapbox_api_key)
213 deck_widget = data.pop('deck_widget', None)
214 if isinstance(self.tooltips, dict) or deck_widget is None:
215 tooltip = self.tooltips
216 else:
217 tooltip = deck_widget.tooltip
218 data = {k: v for k, v in recurse_data(data).items() if v is not None}
219
220 # Delete undefined width and height
221 for view in data.get('views', []):
222 if view.get('width', False) is None:
223 view.pop('width')
224 if view.get('height', False) is None:
225 view.pop('height')
226
227 return dict(data=data, tooltip=tooltip, mapbox_api_key=mapbox_api_key or "")
228
229 def _get_model(
230 self, doc: Document, root: Optional[Model] = None,
231 parent: Optional[Model] = None, comm: Optional[Comm] = None
232 ) -> Model:
233 self._bokeh_model = DeckGLPlot = lazy_load(
234 'panel.models.deckgl', 'DeckGLPlot', isinstance(comm, JupyterComm), root
235 )
236 properties = self._get_properties(doc)
237 data = properties.pop('data')
238 properties['data_sources'] = sources = []
239 self._update_sources(data, sources)
240 properties['layers'] = data.pop('layers', [])
241 properties['initialViewState'] = data.pop('initialViewState', {})
242 model = DeckGLPlot(data=data, **properties)
243 root = root or model
244 self._link_props(model, ['clickState', 'hoverState', 'viewState'], doc, root, comm)
245 self._models[root.ref["id"]] = (model, parent)
246 return model
247
248 def _update(self, ref: str, model: Model) -> None:
249 properties = self._get_properties(model.document)
250 data = properties.pop('data')
251 self._update_sources(data, model.data_sources)
252 properties['data'] = data
253 properties['layers'] = data.pop('layers', [])
254 properties['initialViewState'] = data.pop('initialViewState', {})
255 model.update(**properties)
256
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/pane/deckgl.py b/panel/pane/deckgl.py
--- a/panel/pane/deckgl.py
+++ b/panel/pane/deckgl.py
@@ -15,6 +15,7 @@
import numpy as np
import param
+from bokeh.core.serialization import Serializer
from bokeh.models import ColumnDataSource
from pyviz_comms import JupyterComm
@@ -123,6 +124,8 @@
'view_state': 'viewState', 'tooltips': 'tooltip'
}
+ _pydeck_encoders_are_added: ClassVar[bool] = False
+
_updates: ClassVar[bool] = True
priority: ClassVar[float | bool | None] = None
@@ -196,6 +199,36 @@
sources.append(cds)
layer['data'] = sources.index(cds)
+ @classmethod
+ def _add_pydeck_encoders(cls):
+ if cls._pydeck_encoders_are_added or 'pydeck' not in sys.modules:
+ return
+
+ from pydeck.types import String
+ def pydeck_string_encoder(obj, serializer):
+ return obj.value
+
+ Serializer._encoders[String] = pydeck_string_encoder
+
+ def _transform_deck_object(self, obj):
+ data = dict(obj.__dict__)
+ mapbox_api_key = data.pop('mapbox_key', "") or self.mapbox_api_key
+ deck_widget = data.pop('deck_widget', None)
+ if isinstance(self.tooltips, dict) or deck_widget is None:
+ tooltip = self.tooltips
+ else:
+ tooltip = deck_widget.tooltip
+ data = {k: v for k, v in recurse_data(data).items() if v is not None}
+
+ if "initialViewState" in data:
+ data["initialViewState"]={
+ k:v for k, v in data["initialViewState"].items() if v is not None
+ }
+
+ self._add_pydeck_encoders()
+
+ return data, tooltip, mapbox_api_key
+
def _transform_object(self, obj) -> Dict[str, Any]:
if self.object is None:
data, mapbox_api_key, tooltip = {}, self.mapbox_api_key, self.tooltips
@@ -208,14 +241,7 @@
mapbox_api_key = self.mapbox_api_key
tooltip = self.tooltips
else:
- data = dict(self.object.__dict__)
- mapbox_api_key = data.pop('mapbox_key', self.mapbox_api_key)
- deck_widget = data.pop('deck_widget', None)
- if isinstance(self.tooltips, dict) or deck_widget is None:
- tooltip = self.tooltips
- else:
- tooltip = deck_widget.tooltip
- data = {k: v for k, v in recurse_data(data).items() if v is not None}
+ data, tooltip, mapbox_api_key = self._transform_deck_object(self.object)
# Delete undefined width and height
for view in data.get('views', []):
| {"golden_diff": "diff --git a/panel/pane/deckgl.py b/panel/pane/deckgl.py\n--- a/panel/pane/deckgl.py\n+++ b/panel/pane/deckgl.py\n@@ -15,6 +15,7 @@\n import numpy as np\n import param\n \n+from bokeh.core.serialization import Serializer\n from bokeh.models import ColumnDataSource\n from pyviz_comms import JupyterComm\n \n@@ -123,6 +124,8 @@\n 'view_state': 'viewState', 'tooltips': 'tooltip'\n }\n \n+ _pydeck_encoders_are_added: ClassVar[bool] = False\n+\n _updates: ClassVar[bool] = True\n \n priority: ClassVar[float | bool | None] = None\n@@ -196,6 +199,36 @@\n sources.append(cds)\n layer['data'] = sources.index(cds)\n \n+ @classmethod\n+ def _add_pydeck_encoders(cls):\n+ if cls._pydeck_encoders_are_added or 'pydeck' not in sys.modules:\n+ return\n+\n+ from pydeck.types import String\n+ def pydeck_string_encoder(obj, serializer):\n+ return obj.value\n+\n+ Serializer._encoders[String] = pydeck_string_encoder\n+\n+ def _transform_deck_object(self, obj):\n+ data = dict(obj.__dict__)\n+ mapbox_api_key = data.pop('mapbox_key', \"\") or self.mapbox_api_key\n+ deck_widget = data.pop('deck_widget', None)\n+ if isinstance(self.tooltips, dict) or deck_widget is None:\n+ tooltip = self.tooltips\n+ else:\n+ tooltip = deck_widget.tooltip\n+ data = {k: v for k, v in recurse_data(data).items() if v is not None}\n+\n+ if \"initialViewState\" in data:\n+ data[\"initialViewState\"]={\n+ k:v for k, v in data[\"initialViewState\"].items() if v is not None\n+ }\n+\n+ self._add_pydeck_encoders()\n+\n+ return data, tooltip, mapbox_api_key\n+\n def _transform_object(self, obj) -> Dict[str, Any]:\n if self.object is None:\n data, mapbox_api_key, tooltip = {}, self.mapbox_api_key, self.tooltips\n@@ -208,14 +241,7 @@\n mapbox_api_key = self.mapbox_api_key\n tooltip = self.tooltips\n else:\n- data = dict(self.object.__dict__)\n- mapbox_api_key = data.pop('mapbox_key', self.mapbox_api_key)\n- deck_widget = data.pop('deck_widget', None)\n- if isinstance(self.tooltips, dict) or deck_widget is None:\n- tooltip = self.tooltips\n- else:\n- tooltip = deck_widget.tooltip\n- data = {k: v for k, v in recurse_data(data).items() if v is not None}\n+ data, tooltip, mapbox_api_key = self._transform_deck_object(self.object)\n \n # Delete undefined width and height\n for view in data.get('views', []):\n", "issue": "Serialization Error When Visualizing PyDeck TextLayer\n#### ALL software version info\r\nPyDeck 0.8, Panel 1.2\r\n\r\n#### Description of expected behavior and the observed behavior\r\nI recently coded up a variation of the PyDeck TextLayer example [from the PyDeck website](https://deckgl.readthedocs.io/en/latest/gallery/text_layer.html) and then attempted to integrate that into a Panel application. Panel is having a serialization issue with the PyDeck String type. \r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n[This Google Colab notebook](https://colab.research.google.com/drive/13ziK2OrM7n6zmqQDKJQ1_fvrFCCj2oGf?usp=sharing) shows the visualization running without issue in PyDeck and then shows my attempt to render that same visualization via Panel.\r\n\n", "before_files": [{"content": "\"\"\"\nDefines a PyDeck Pane which renders a PyDeck plot using a PyDeckPlot\nbokeh model.\n\"\"\"\nfrom __future__ import annotations\n\nimport json\nimport sys\n\nfrom collections import defaultdict\nfrom typing import (\n TYPE_CHECKING, Any, ClassVar, Dict, Mapping, Optional,\n)\n\nimport numpy as np\nimport param\n\nfrom bokeh.models import ColumnDataSource\nfrom pyviz_comms import JupyterComm\n\nfrom ..util import is_dataframe, lazy_load\nfrom .base import ModelPane\n\nif TYPE_CHECKING:\n from bokeh.document import Document\n from bokeh.model import Model\n from pyviz_comms import Comm\n\n\ndef lower_camel_case_keys(attrs):\n \"\"\"\n Makes all the keys in a dictionary camel-cased and lower-case\n\n Parameters\n ----------\n attrs : dict\n Dictionary for which all the keys should be converted to camel-case\n \"\"\"\n for snake_key in list(attrs.keys()):\n if '_' not in snake_key:\n continue\n camel_key = lower_first_letter(to_camel_case(snake_key))\n attrs[camel_key] = attrs.pop(snake_key)\n\n\ndef to_camel_case(snake_case: str) -> str:\n \"\"\"\n Makes a snake case string into a camel case one\n\n Parameters\n -----------\n snake_case : str\n Snake-cased string (e.g., \"snake_cased\") to be converted to camel-case (e.g., \"camelCase\")\n \"\"\"\n output_str = ''\n should_upper_case = False\n for c in snake_case:\n if c == '_':\n should_upper_case = True\n continue\n output_str = output_str + c.upper() if should_upper_case else output_str + c\n should_upper_case = False\n return output_str\n\n\ndef lower_first_letter(s: str) -> str:\n return s[:1].lower() + s[1:] if s else ''\n\n\ndef recurse_data(data):\n if hasattr(data, 'to_json'):\n data = data.__dict__\n if isinstance(data, dict):\n data = dict(data)\n lower_camel_case_keys(data)\n data = {k: recurse_data(v) if k != 'data' else v\n for k, v in data.items()}\n elif isinstance(data, list):\n data = [recurse_data(d) for d in data]\n return data\n\n\nclass DeckGL(ModelPane):\n \"\"\"\n The `DeckGL` pane renders the Deck.gl\n JSON specification as well as PyDeck plots inside a panel.\n\n Deck.gl is a very powerful WebGL-powered framework for visual exploratory\n data analysis of large datasets.\n\n Reference: https://panel.holoviz.org/reference/panes/DeckGL.html\n\n :Example:\n\n >>> pn.extension('deckgl')\n >>> DeckGL(\n ... some_deckgl_dict_or_pydeck_object,\n ... mapbox_api_key=MAPBOX_KEY, height=600\n ... )\n \"\"\"\n\n mapbox_api_key = param.String(default=None, doc=\"\"\"\n The MapBox API key if not supplied by a PyDeck object.\"\"\")\n\n tooltips = param.ClassSelector(default=True, class_=(bool, dict), doc=\"\"\"\n Whether to enable tooltips\"\"\")\n\n click_state = param.Dict(default={}, doc=\"\"\"\n Contains the last click event on the DeckGL plot.\"\"\")\n\n hover_state = param.Dict(default={}, doc=\"\"\"\n The current hover state of the DeckGL plot.\"\"\")\n\n view_state = param.Dict(default={}, doc=\"\"\"\n The current view state of the DeckGL plot.\"\"\")\n\n throttle = param.Dict(default={'view': 200, 'hover': 200}, doc=\"\"\"\n Throttling timeout (in milliseconds) for view state and hover\n events sent from the frontend.\"\"\")\n\n _rename: ClassVar[Mapping[str, str | None]] = {\n 'click_state': 'clickState', 'hover_state': 'hoverState',\n 'view_state': 'viewState', 'tooltips': 'tooltip'\n }\n\n _updates: ClassVar[bool] = True\n\n priority: ClassVar[float | bool | None] = None\n\n @classmethod\n def applies(cls, obj: Any) -> float | bool | None:\n if cls.is_pydeck(obj):\n return 0.8\n elif isinstance(obj, (dict, str)):\n return 0\n return False\n\n @classmethod\n def is_pydeck(cls, obj):\n if 'pydeck' in sys.modules:\n import pydeck\n return isinstance(obj, pydeck.bindings.deck.Deck)\n return False\n\n @classmethod\n def _process_data(cls, data):\n columns = defaultdict(list)\n for d in data:\n for col, val in d.items():\n columns[col].append(val)\n return {col: np.asarray(vals) for col, vals in columns.items()}\n\n @classmethod\n def _update_sources(cls, json_data, sources):\n layers = json_data.get('layers', [])\n\n # Create index of sources by columns\n source_columns = defaultdict(list)\n for i, source in enumerate(sources):\n key = tuple(sorted(source.data.keys()))\n source_columns[key].append((i, source))\n\n # Process\n unprocessed, unused = [], list(sources)\n for layer in layers:\n data = layer.get('data')\n if is_dataframe(data):\n data = ColumnDataSource.from_df(data)\n elif (isinstance(data, list) and data\n and isinstance(data[0], dict)):\n data = cls._process_data(data)\n else:\n continue\n\n key = tuple(sorted(data.keys()))\n existing = source_columns.get(key)\n if existing:\n index, cds = existing.pop()\n layer['data'] = index\n updates = {}\n for col, values in data.items():\n if not np.array_equal(data[col], cds.data[col]):\n updates[col] = values\n if updates:\n cds.data.update(updates)\n unused.remove(cds)\n else:\n unprocessed.append((layer, data))\n\n for layer, data in unprocessed:\n if unused:\n cds = unused.pop()\n cds.data = data\n else:\n cds = ColumnDataSource(data)\n sources.append(cds)\n layer['data'] = sources.index(cds)\n\n def _transform_object(self, obj) -> Dict[str, Any]:\n if self.object is None:\n data, mapbox_api_key, tooltip = {}, self.mapbox_api_key, self.tooltips\n elif isinstance(self.object, (str, dict)):\n if isinstance(self.object, str):\n data = json.loads(self.object)\n else:\n data = dict(self.object)\n data['layers'] = [dict(layer) for layer in data.get('layers', [])]\n mapbox_api_key = self.mapbox_api_key\n tooltip = self.tooltips\n else:\n data = dict(self.object.__dict__)\n mapbox_api_key = data.pop('mapbox_key', self.mapbox_api_key)\n deck_widget = data.pop('deck_widget', None)\n if isinstance(self.tooltips, dict) or deck_widget is None:\n tooltip = self.tooltips\n else:\n tooltip = deck_widget.tooltip\n data = {k: v for k, v in recurse_data(data).items() if v is not None}\n\n # Delete undefined width and height\n for view in data.get('views', []):\n if view.get('width', False) is None:\n view.pop('width')\n if view.get('height', False) is None:\n view.pop('height')\n\n return dict(data=data, tooltip=tooltip, mapbox_api_key=mapbox_api_key or \"\")\n\n def _get_model(\n self, doc: Document, root: Optional[Model] = None,\n parent: Optional[Model] = None, comm: Optional[Comm] = None\n ) -> Model:\n self._bokeh_model = DeckGLPlot = lazy_load(\n 'panel.models.deckgl', 'DeckGLPlot', isinstance(comm, JupyterComm), root\n )\n properties = self._get_properties(doc)\n data = properties.pop('data')\n properties['data_sources'] = sources = []\n self._update_sources(data, sources)\n properties['layers'] = data.pop('layers', [])\n properties['initialViewState'] = data.pop('initialViewState', {})\n model = DeckGLPlot(data=data, **properties)\n root = root or model\n self._link_props(model, ['clickState', 'hoverState', 'viewState'], doc, root, comm)\n self._models[root.ref[\"id\"]] = (model, parent)\n return model\n\n def _update(self, ref: str, model: Model) -> None:\n properties = self._get_properties(model.document)\n data = properties.pop('data')\n self._update_sources(data, model.data_sources)\n properties['data'] = data\n properties['layers'] = data.pop('layers', [])\n properties['initialViewState'] = data.pop('initialViewState', {})\n model.update(**properties)\n", "path": "panel/pane/deckgl.py"}], "after_files": [{"content": "\"\"\"\nDefines a PyDeck Pane which renders a PyDeck plot using a PyDeckPlot\nbokeh model.\n\"\"\"\nfrom __future__ import annotations\n\nimport json\nimport sys\n\nfrom collections import defaultdict\nfrom typing import (\n TYPE_CHECKING, Any, ClassVar, Dict, Mapping, Optional,\n)\n\nimport numpy as np\nimport param\n\nfrom bokeh.core.serialization import Serializer\nfrom bokeh.models import ColumnDataSource\nfrom pyviz_comms import JupyterComm\n\nfrom ..util import is_dataframe, lazy_load\nfrom .base import ModelPane\n\nif TYPE_CHECKING:\n from bokeh.document import Document\n from bokeh.model import Model\n from pyviz_comms import Comm\n\n\ndef lower_camel_case_keys(attrs):\n \"\"\"\n Makes all the keys in a dictionary camel-cased and lower-case\n\n Parameters\n ----------\n attrs : dict\n Dictionary for which all the keys should be converted to camel-case\n \"\"\"\n for snake_key in list(attrs.keys()):\n if '_' not in snake_key:\n continue\n camel_key = lower_first_letter(to_camel_case(snake_key))\n attrs[camel_key] = attrs.pop(snake_key)\n\n\ndef to_camel_case(snake_case: str) -> str:\n \"\"\"\n Makes a snake case string into a camel case one\n\n Parameters\n -----------\n snake_case : str\n Snake-cased string (e.g., \"snake_cased\") to be converted to camel-case (e.g., \"camelCase\")\n \"\"\"\n output_str = ''\n should_upper_case = False\n for c in snake_case:\n if c == '_':\n should_upper_case = True\n continue\n output_str = output_str + c.upper() if should_upper_case else output_str + c\n should_upper_case = False\n return output_str\n\n\ndef lower_first_letter(s: str) -> str:\n return s[:1].lower() + s[1:] if s else ''\n\n\ndef recurse_data(data):\n if hasattr(data, 'to_json'):\n data = data.__dict__\n if isinstance(data, dict):\n data = dict(data)\n lower_camel_case_keys(data)\n data = {k: recurse_data(v) if k != 'data' else v\n for k, v in data.items()}\n elif isinstance(data, list):\n data = [recurse_data(d) for d in data]\n return data\n\n\nclass DeckGL(ModelPane):\n \"\"\"\n The `DeckGL` pane renders the Deck.gl\n JSON specification as well as PyDeck plots inside a panel.\n\n Deck.gl is a very powerful WebGL-powered framework for visual exploratory\n data analysis of large datasets.\n\n Reference: https://panel.holoviz.org/reference/panes/DeckGL.html\n\n :Example:\n\n >>> pn.extension('deckgl')\n >>> DeckGL(\n ... some_deckgl_dict_or_pydeck_object,\n ... mapbox_api_key=MAPBOX_KEY, height=600\n ... )\n \"\"\"\n\n mapbox_api_key = param.String(default=None, doc=\"\"\"\n The MapBox API key if not supplied by a PyDeck object.\"\"\")\n\n tooltips = param.ClassSelector(default=True, class_=(bool, dict), doc=\"\"\"\n Whether to enable tooltips\"\"\")\n\n click_state = param.Dict(default={}, doc=\"\"\"\n Contains the last click event on the DeckGL plot.\"\"\")\n\n hover_state = param.Dict(default={}, doc=\"\"\"\n The current hover state of the DeckGL plot.\"\"\")\n\n view_state = param.Dict(default={}, doc=\"\"\"\n The current view state of the DeckGL plot.\"\"\")\n\n throttle = param.Dict(default={'view': 200, 'hover': 200}, doc=\"\"\"\n Throttling timeout (in milliseconds) for view state and hover\n events sent from the frontend.\"\"\")\n\n _rename: ClassVar[Mapping[str, str | None]] = {\n 'click_state': 'clickState', 'hover_state': 'hoverState',\n 'view_state': 'viewState', 'tooltips': 'tooltip'\n }\n\n _pydeck_encoders_are_added: ClassVar[bool] = False\n\n _updates: ClassVar[bool] = True\n\n priority: ClassVar[float | bool | None] = None\n\n @classmethod\n def applies(cls, obj: Any) -> float | bool | None:\n if cls.is_pydeck(obj):\n return 0.8\n elif isinstance(obj, (dict, str)):\n return 0\n return False\n\n @classmethod\n def is_pydeck(cls, obj):\n if 'pydeck' in sys.modules:\n import pydeck\n return isinstance(obj, pydeck.bindings.deck.Deck)\n return False\n\n @classmethod\n def _process_data(cls, data):\n columns = defaultdict(list)\n for d in data:\n for col, val in d.items():\n columns[col].append(val)\n return {col: np.asarray(vals) for col, vals in columns.items()}\n\n @classmethod\n def _update_sources(cls, json_data, sources):\n layers = json_data.get('layers', [])\n\n # Create index of sources by columns\n source_columns = defaultdict(list)\n for i, source in enumerate(sources):\n key = tuple(sorted(source.data.keys()))\n source_columns[key].append((i, source))\n\n # Process\n unprocessed, unused = [], list(sources)\n for layer in layers:\n data = layer.get('data')\n if is_dataframe(data):\n data = ColumnDataSource.from_df(data)\n elif (isinstance(data, list) and data\n and isinstance(data[0], dict)):\n data = cls._process_data(data)\n else:\n continue\n\n key = tuple(sorted(data.keys()))\n existing = source_columns.get(key)\n if existing:\n index, cds = existing.pop()\n layer['data'] = index\n updates = {}\n for col, values in data.items():\n if not np.array_equal(data[col], cds.data[col]):\n updates[col] = values\n if updates:\n cds.data.update(updates)\n unused.remove(cds)\n else:\n unprocessed.append((layer, data))\n\n for layer, data in unprocessed:\n if unused:\n cds = unused.pop()\n cds.data = data\n else:\n cds = ColumnDataSource(data)\n sources.append(cds)\n layer['data'] = sources.index(cds)\n\n @classmethod\n def _add_pydeck_encoders(cls):\n if cls._pydeck_encoders_are_added or 'pydeck' not in sys.modules:\n return\n\n from pydeck.types import String\n def pydeck_string_encoder(obj, serializer):\n return obj.value\n\n Serializer._encoders[String] = pydeck_string_encoder\n\n def _transform_deck_object(self, obj):\n data = dict(obj.__dict__)\n mapbox_api_key = data.pop('mapbox_key', \"\") or self.mapbox_api_key\n deck_widget = data.pop('deck_widget', None)\n if isinstance(self.tooltips, dict) or deck_widget is None:\n tooltip = self.tooltips\n else:\n tooltip = deck_widget.tooltip\n data = {k: v for k, v in recurse_data(data).items() if v is not None}\n\n if \"initialViewState\" in data:\n data[\"initialViewState\"]={\n k:v for k, v in data[\"initialViewState\"].items() if v is not None\n }\n\n self._add_pydeck_encoders()\n\n return data, tooltip, mapbox_api_key\n\n def _transform_object(self, obj) -> Dict[str, Any]:\n if self.object is None:\n data, mapbox_api_key, tooltip = {}, self.mapbox_api_key, self.tooltips\n elif isinstance(self.object, (str, dict)):\n if isinstance(self.object, str):\n data = json.loads(self.object)\n else:\n data = dict(self.object)\n data['layers'] = [dict(layer) for layer in data.get('layers', [])]\n mapbox_api_key = self.mapbox_api_key\n tooltip = self.tooltips\n else:\n data, tooltip, mapbox_api_key = self._transform_deck_object(self.object)\n\n # Delete undefined width and height\n for view in data.get('views', []):\n if view.get('width', False) is None:\n view.pop('width')\n if view.get('height', False) is None:\n view.pop('height')\n\n return dict(data=data, tooltip=tooltip, mapbox_api_key=mapbox_api_key or \"\")\n\n def _get_model(\n self, doc: Document, root: Optional[Model] = None,\n parent: Optional[Model] = None, comm: Optional[Comm] = None\n ) -> Model:\n self._bokeh_model = DeckGLPlot = lazy_load(\n 'panel.models.deckgl', 'DeckGLPlot', isinstance(comm, JupyterComm), root\n )\n properties = self._get_properties(doc)\n data = properties.pop('data')\n properties['data_sources'] = sources = []\n self._update_sources(data, sources)\n properties['layers'] = data.pop('layers', [])\n properties['initialViewState'] = data.pop('initialViewState', {})\n model = DeckGLPlot(data=data, **properties)\n root = root or model\n self._link_props(model, ['clickState', 'hoverState', 'viewState'], doc, root, comm)\n self._models[root.ref[\"id\"]] = (model, parent)\n return model\n\n def _update(self, ref: str, model: Model) -> None:\n properties = self._get_properties(model.document)\n data = properties.pop('data')\n self._update_sources(data, model.data_sources)\n properties['data'] = data\n properties['layers'] = data.pop('layers', [])\n properties['initialViewState'] = data.pop('initialViewState', {})\n model.update(**properties)\n", "path": "panel/pane/deckgl.py"}]} | 3,068 | 691 |
gh_patches_debug_21066 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-1428 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deprecation notice for 3.7 modifies global state (user warning filters) as import side-effect
It is impossible to filter Python37DeprecationWarning after PR https://github.com/googleapis/google-auth-library-python/pull/1371.
Custom libraries should not configure warning filters, because it is user project's global state. Most of the times you cannot modify import order and insert new warning filters after your library modifies them.
#### Environment details
- OS: Ubuntu 22.04.3 LTS linux 5.15.0-89-generic
- Python version: 3.7.17
- pip version: 23.3.1
- `google-auth` version: 2.24.0
#### Steps to reproduce
1. install google-auth into your python3.7 project
2. configure filterwarning rule `ignore::DeprecationWarning` in pytest.ini
3. use google.auth or google.oauth2 somewhere in your project
4. run pytest
5. get Python37DeprecationWarning that you cannot filter
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/oauth2/__init__.py`
Content:
```
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Google OAuth 2.0 Library for Python."""
16
17 import sys
18 import warnings
19
20
21 class Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER
22 """
23 Deprecation warning raised when Python 3.7 runtime is detected.
24 Python 3.7 support will be dropped after January 1, 2024. See
25 https://cloud.google.com/python/docs/python37-sunset/ for more information.
26 """
27
28 pass
29
30
31 # Checks if the current runtime is Python 3.7.
32 if sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER
33 message = (
34 "After January 1, 2024, new releases of this library will drop support "
35 "for Python 3.7. More details about Python 3.7 support "
36 "can be found at https://cloud.google.com/python/docs/python37-sunset/"
37 )
38 # Configure the Python37DeprecationWarning warning so that it is only emitted once.
39 warnings.simplefilter("once", Python37DeprecationWarning)
40 warnings.warn(message, Python37DeprecationWarning)
41
```
Path: `google/auth/__init__.py`
Content:
```
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Google Auth Library for Python."""
16
17 import logging
18 import sys
19 import warnings
20
21 from google.auth import version as google_auth_version
22 from google.auth._default import (
23 default,
24 load_credentials_from_dict,
25 load_credentials_from_file,
26 )
27
28
29 __version__ = google_auth_version.__version__
30
31
32 __all__ = ["default", "load_credentials_from_file", "load_credentials_from_dict"]
33
34
35 class Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER
36 """
37 Deprecation warning raised when Python 3.7 runtime is detected.
38 Python 3.7 support will be dropped after January 1, 2024. See
39 https://cloud.google.com/python/docs/python37-sunset/ for more information.
40 """
41
42 pass
43
44
45 # Checks if the current runtime is Python 3.7.
46 if sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER
47 message = (
48 "After January 1, 2024, new releases of this library will drop support "
49 "for Python 3.7. More details about Python 3.7 support "
50 "can be found at https://cloud.google.com/python/docs/python37-sunset/"
51 )
52
53 # Configure the Python37DeprecationWarning warning so that it is only emitted once.
54 warnings.simplefilter("once", Python37DeprecationWarning)
55 warnings.warn(message, Python37DeprecationWarning)
56
57 # Set default logging handler to avoid "No handler found" warnings.
58 logging.getLogger(__name__).addHandler(logging.NullHandler())
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/google/auth/__init__.py b/google/auth/__init__.py
--- a/google/auth/__init__.py
+++ b/google/auth/__init__.py
@@ -49,9 +49,6 @@
"for Python 3.7. More details about Python 3.7 support "
"can be found at https://cloud.google.com/python/docs/python37-sunset/"
)
-
- # Configure the Python37DeprecationWarning warning so that it is only emitted once.
- warnings.simplefilter("once", Python37DeprecationWarning)
warnings.warn(message, Python37DeprecationWarning)
# Set default logging handler to avoid "No handler found" warnings.
diff --git a/google/oauth2/__init__.py b/google/oauth2/__init__.py
--- a/google/oauth2/__init__.py
+++ b/google/oauth2/__init__.py
@@ -35,6 +35,4 @@
"for Python 3.7. More details about Python 3.7 support "
"can be found at https://cloud.google.com/python/docs/python37-sunset/"
)
- # Configure the Python37DeprecationWarning warning so that it is only emitted once.
- warnings.simplefilter("once", Python37DeprecationWarning)
warnings.warn(message, Python37DeprecationWarning)
| {"golden_diff": "diff --git a/google/auth/__init__.py b/google/auth/__init__.py\n--- a/google/auth/__init__.py\n+++ b/google/auth/__init__.py\n@@ -49,9 +49,6 @@\n \"for Python 3.7. More details about Python 3.7 support \"\n \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n )\n-\n- # Configure the Python37DeprecationWarning warning so that it is only emitted once.\n- warnings.simplefilter(\"once\", Python37DeprecationWarning)\n warnings.warn(message, Python37DeprecationWarning)\n \n # Set default logging handler to avoid \"No handler found\" warnings.\ndiff --git a/google/oauth2/__init__.py b/google/oauth2/__init__.py\n--- a/google/oauth2/__init__.py\n+++ b/google/oauth2/__init__.py\n@@ -35,6 +35,4 @@\n \"for Python 3.7. More details about Python 3.7 support \"\n \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n )\n- # Configure the Python37DeprecationWarning warning so that it is only emitted once.\n- warnings.simplefilter(\"once\", Python37DeprecationWarning)\n warnings.warn(message, Python37DeprecationWarning)\n", "issue": "deprecation notice for 3.7 modifies global state (user warning filters) as import side-effect\nIt is impossible to filter Python37DeprecationWarning after PR https://github.com/googleapis/google-auth-library-python/pull/1371.\r\n\r\nCustom libraries should not configure warning filters, because it is user project's global state. Most of the times you cannot modify import order and insert new warning filters after your library modifies them.\r\n\r\n#### Environment details\r\n\r\n - OS: Ubuntu 22.04.3 LTS linux 5.15.0-89-generic\r\n - Python version: 3.7.17\r\n - pip version: 23.3.1\r\n - `google-auth` version: 2.24.0\r\n\r\n#### Steps to reproduce\r\n\r\n 1. install google-auth into your python3.7 project\r\n 2. configure filterwarning rule `ignore::DeprecationWarning` in pytest.ini\r\n 3. use google.auth or google.oauth2 somewhere in your project\r\n 4. run pytest\r\n 5. get Python37DeprecationWarning that you cannot filter \n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google OAuth 2.0 Library for Python.\"\"\"\n\nimport sys\nimport warnings\n\n\nclass Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER\n \"\"\"\n Deprecation warning raised when Python 3.7 runtime is detected.\n Python 3.7 support will be dropped after January 1, 2024. See\n https://cloud.google.com/python/docs/python37-sunset/ for more information.\n \"\"\"\n\n pass\n\n\n# Checks if the current runtime is Python 3.7.\nif sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER\n message = (\n \"After January 1, 2024, new releases of this library will drop support \"\n \"for Python 3.7. More details about Python 3.7 support \"\n \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n )\n # Configure the Python37DeprecationWarning warning so that it is only emitted once.\n warnings.simplefilter(\"once\", Python37DeprecationWarning)\n warnings.warn(message, Python37DeprecationWarning)\n", "path": "google/oauth2/__init__.py"}, {"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google Auth Library for Python.\"\"\"\n\nimport logging\nimport sys\nimport warnings\n\nfrom google.auth import version as google_auth_version\nfrom google.auth._default import (\n default,\n load_credentials_from_dict,\n load_credentials_from_file,\n)\n\n\n__version__ = google_auth_version.__version__\n\n\n__all__ = [\"default\", \"load_credentials_from_file\", \"load_credentials_from_dict\"]\n\n\nclass Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER\n \"\"\"\n Deprecation warning raised when Python 3.7 runtime is detected.\n Python 3.7 support will be dropped after January 1, 2024. See\n https://cloud.google.com/python/docs/python37-sunset/ for more information.\n \"\"\"\n\n pass\n\n\n# Checks if the current runtime is Python 3.7.\nif sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER\n message = (\n \"After January 1, 2024, new releases of this library will drop support \"\n \"for Python 3.7. More details about Python 3.7 support \"\n \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n )\n\n # Configure the Python37DeprecationWarning warning so that it is only emitted once.\n warnings.simplefilter(\"once\", Python37DeprecationWarning)\n warnings.warn(message, Python37DeprecationWarning)\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "google/auth/__init__.py"}], "after_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google OAuth 2.0 Library for Python.\"\"\"\n\nimport sys\nimport warnings\n\n\nclass Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER\n \"\"\"\n Deprecation warning raised when Python 3.7 runtime is detected.\n Python 3.7 support will be dropped after January 1, 2024. See\n https://cloud.google.com/python/docs/python37-sunset/ for more information.\n \"\"\"\n\n pass\n\n\n# Checks if the current runtime is Python 3.7.\nif sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER\n message = (\n \"After January 1, 2024, new releases of this library will drop support \"\n \"for Python 3.7. More details about Python 3.7 support \"\n \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n )\n warnings.warn(message, Python37DeprecationWarning)\n", "path": "google/oauth2/__init__.py"}, {"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google Auth Library for Python.\"\"\"\n\nimport logging\nimport sys\nimport warnings\n\nfrom google.auth import version as google_auth_version\nfrom google.auth._default import (\n default,\n load_credentials_from_dict,\n load_credentials_from_file,\n)\n\n\n__version__ = google_auth_version.__version__\n\n\n__all__ = [\"default\", \"load_credentials_from_file\", \"load_credentials_from_dict\"]\n\n\nclass Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER\n \"\"\"\n Deprecation warning raised when Python 3.7 runtime is detected.\n Python 3.7 support will be dropped after January 1, 2024. See\n https://cloud.google.com/python/docs/python37-sunset/ for more information.\n \"\"\"\n\n pass\n\n\n# Checks if the current runtime is Python 3.7.\nif sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER\n message = (\n \"After January 1, 2024, new releases of this library will drop support \"\n \"for Python 3.7. More details about Python 3.7 support \"\n \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n )\n warnings.warn(message, Python37DeprecationWarning)\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "google/auth/__init__.py"}]} | 1,578 | 296 |
gh_patches_debug_8968 | rasdani/github-patches | git_diff | pyca__cryptography-3335 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Python 3.6 support
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # This file is dual licensed under the terms of the Apache License, Version
4 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
5 # for complete details.
6
7 from __future__ import absolute_import, division, print_function
8
9 import os
10 import platform
11 import subprocess
12 import sys
13 from distutils.command.build import build
14
15 import pkg_resources
16
17 from setuptools import find_packages, setup
18 from setuptools.command.install import install
19 from setuptools.command.test import test
20
21
22 base_dir = os.path.dirname(__file__)
23 src_dir = os.path.join(base_dir, "src")
24
25 # When executing the setup.py, we need to be able to import ourselves, this
26 # means that we need to add the src/ directory to the sys.path.
27 sys.path.insert(0, src_dir)
28
29 about = {}
30 with open(os.path.join(src_dir, "cryptography", "__about__.py")) as f:
31 exec(f.read(), about)
32
33
34 VECTORS_DEPENDENCY = "cryptography_vectors=={0}".format(about['__version__'])
35
36 requirements = [
37 "idna>=2.0",
38 "pyasn1>=0.1.8",
39 "six>=1.4.1",
40 "setuptools>=11.3",
41 ]
42 setup_requirements = []
43
44 if sys.version_info < (3, 4):
45 requirements.append("enum34")
46
47 if sys.version_info < (3, 3):
48 requirements.append("ipaddress")
49
50 if platform.python_implementation() == "PyPy":
51 if sys.pypy_version_info < (2, 6):
52 raise RuntimeError(
53 "cryptography 1.0 is not compatible with PyPy < 2.6. Please "
54 "upgrade PyPy to use this library."
55 )
56 else:
57 requirements.append("cffi>=1.4.1")
58 setup_requirements.append("cffi>=1.4.1")
59
60 test_requirements = [
61 "pytest>=2.9.0",
62 "pretend",
63 "iso8601",
64 "pyasn1_modules",
65 "pytz",
66 ]
67 if sys.version_info[:2] > (2, 6):
68 test_requirements.append("hypothesis>=1.11.4")
69
70
71 # If there's no vectors locally that probably means we are in a tarball and
72 # need to go and get the matching vectors package from PyPi
73 if not os.path.exists(os.path.join(base_dir, "vectors/setup.py")):
74 test_requirements.append(VECTORS_DEPENDENCY)
75
76
77 def cc_is_available():
78 return sys.platform == "darwin" and list(map(
79 int, platform.mac_ver()[0].split("."))) >= [10, 8, 0]
80
81
82 backends = [
83 "openssl = cryptography.hazmat.backends.openssl:backend"
84 ]
85
86 if cc_is_available():
87 backends.append(
88 "commoncrypto = cryptography.hazmat.backends.commoncrypto:backend",
89 )
90
91
92 class PyTest(test):
93 def finalize_options(self):
94 test.finalize_options(self)
95 self.test_args = []
96 self.test_suite = True
97
98 # This means there's a vectors/ folder with the package in here.
99 # cd into it, install the vectors package and then refresh sys.path
100 if VECTORS_DEPENDENCY not in test_requirements:
101 subprocess.check_call(
102 [sys.executable, "setup.py", "install"], cwd="vectors"
103 )
104 pkg_resources.get_distribution("cryptography_vectors").activate()
105
106 def run_tests(self):
107 # Import here because in module scope the eggs are not loaded.
108 import pytest
109 test_args = [os.path.join(base_dir, "tests")]
110 errno = pytest.main(test_args)
111 sys.exit(errno)
112
113
114 def keywords_with_side_effects(argv):
115 """
116 Get a dictionary with setup keywords that (can) have side effects.
117
118 :param argv: A list of strings with command line arguments.
119 :returns: A dictionary with keyword arguments for the ``setup()`` function.
120
121 This setup.py script uses the setuptools 'setup_requires' feature because
122 this is required by the cffi package to compile extension modules. The
123 purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi
124 build process as a result of setup.py invocations that don't need the cffi
125 module to be built (setup.py serves the dual purpose of exposing package
126 metadata).
127
128 All of the options listed by ``python setup.py --help`` that print
129 information should be recognized here. The commands ``clean``,
130 ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.
131 Any combination of these options and commands is also supported.
132
133 This function was originally based on the `setup.py script`_ of SciPy (see
134 also the discussion in `pip issue #25`_).
135
136 .. _pip issue #25: https://github.com/pypa/pip/issues/25
137 .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py
138 """
139 no_setup_requires_arguments = (
140 '-h', '--help',
141 '-n', '--dry-run',
142 '-q', '--quiet',
143 '-v', '--verbose',
144 '-V', '--version',
145 '--author',
146 '--author-email',
147 '--classifiers',
148 '--contact',
149 '--contact-email',
150 '--description',
151 '--egg-base',
152 '--fullname',
153 '--help-commands',
154 '--keywords',
155 '--licence',
156 '--license',
157 '--long-description',
158 '--maintainer',
159 '--maintainer-email',
160 '--name',
161 '--no-user-cfg',
162 '--obsoletes',
163 '--platforms',
164 '--provides',
165 '--requires',
166 '--url',
167 'clean',
168 'egg_info',
169 'register',
170 'sdist',
171 'upload',
172 )
173
174 def is_short_option(argument):
175 """Check whether a command line argument is a short option."""
176 return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'
177
178 def expand_short_options(argument):
179 """Expand combined short options into canonical short options."""
180 return ('-' + char for char in argument[1:])
181
182 def argument_without_setup_requirements(argv, i):
183 """Check whether a command line argument needs setup requirements."""
184 if argv[i] in no_setup_requires_arguments:
185 # Simple case: An argument which is either an option or a command
186 # which doesn't need setup requirements.
187 return True
188 elif (is_short_option(argv[i]) and
189 all(option in no_setup_requires_arguments
190 for option in expand_short_options(argv[i]))):
191 # Not so simple case: Combined short options none of which need
192 # setup requirements.
193 return True
194 elif argv[i - 1:i] == ['--egg-base']:
195 # Tricky case: --egg-info takes an argument which should not make
196 # us use setup_requires (defeating the purpose of this code).
197 return True
198 else:
199 return False
200
201 if all(argument_without_setup_requirements(argv, i)
202 for i in range(1, len(argv))):
203 return {
204 "cmdclass": {
205 "build": DummyBuild,
206 "install": DummyInstall,
207 "test": DummyPyTest,
208 }
209 }
210 else:
211 cffi_modules = [
212 "src/_cffi_src/build_openssl.py:ffi",
213 "src/_cffi_src/build_constant_time.py:ffi",
214 "src/_cffi_src/build_padding.py:ffi",
215 ]
216 if cc_is_available():
217 cffi_modules.append("src/_cffi_src/build_commoncrypto.py:ffi")
218
219 return {
220 "setup_requires": setup_requirements,
221 "cmdclass": {
222 "test": PyTest,
223 },
224 "cffi_modules": cffi_modules
225 }
226
227
228 setup_requires_error = ("Requested setup command that needs 'setup_requires' "
229 "while command line arguments implied a side effect "
230 "free command or option.")
231
232
233 class DummyBuild(build):
234 """
235 This class makes it very obvious when ``keywords_with_side_effects()`` has
236 incorrectly interpreted the command line arguments to ``setup.py build`` as
237 one of the 'side effect free' commands or options.
238 """
239
240 def run(self):
241 raise RuntimeError(setup_requires_error)
242
243
244 class DummyInstall(install):
245 """
246 This class makes it very obvious when ``keywords_with_side_effects()`` has
247 incorrectly interpreted the command line arguments to ``setup.py install``
248 as one of the 'side effect free' commands or options.
249 """
250
251 def run(self):
252 raise RuntimeError(setup_requires_error)
253
254
255 class DummyPyTest(test):
256 """
257 This class makes it very obvious when ``keywords_with_side_effects()`` has
258 incorrectly interpreted the command line arguments to ``setup.py test`` as
259 one of the 'side effect free' commands or options.
260 """
261
262 def run_tests(self):
263 raise RuntimeError(setup_requires_error)
264
265
266 with open(os.path.join(base_dir, "README.rst")) as f:
267 long_description = f.read()
268
269
270 setup(
271 name=about["__title__"],
272 version=about["__version__"],
273
274 description=about["__summary__"],
275 long_description=long_description,
276 license=about["__license__"],
277 url=about["__uri__"],
278
279 author=about["__author__"],
280 author_email=about["__email__"],
281
282 classifiers=[
283 "Intended Audience :: Developers",
284 "License :: OSI Approved :: Apache Software License",
285 "License :: OSI Approved :: BSD License",
286 "Natural Language :: English",
287 "Operating System :: MacOS :: MacOS X",
288 "Operating System :: POSIX",
289 "Operating System :: POSIX :: BSD",
290 "Operating System :: POSIX :: Linux",
291 "Operating System :: Microsoft :: Windows",
292 "Programming Language :: Python",
293 "Programming Language :: Python :: 2",
294 "Programming Language :: Python :: 2.6",
295 "Programming Language :: Python :: 2.7",
296 "Programming Language :: Python :: 3",
297 "Programming Language :: Python :: 3.3",
298 "Programming Language :: Python :: 3.4",
299 "Programming Language :: Python :: 3.5",
300 "Programming Language :: Python :: Implementation :: CPython",
301 "Programming Language :: Python :: Implementation :: PyPy",
302 "Topic :: Security :: Cryptography",
303 ],
304
305 package_dir={"": "src"},
306 packages=find_packages(where="src", exclude=["_cffi_src", "_cffi_src.*"]),
307 include_package_data=True,
308
309 install_requires=requirements,
310 tests_require=test_requirements,
311 extras_require={
312 "test": test_requirements,
313 "docstest": [
314 "doc8",
315 "pyenchant",
316 "readme_renderer >= 16.0",
317 "sphinx",
318 "sphinx_rtd_theme",
319 "sphinxcontrib-spelling",
320 ],
321 "pep8test": [
322 "flake8",
323 "flake8-import-order",
324 "pep8-naming",
325 ],
326 },
327
328 # for cffi
329 zip_safe=False,
330 ext_package="cryptography.hazmat.bindings",
331 entry_points={
332 "cryptography.backends": backends,
333 },
334 **keywords_with_side_effects(sys.argv)
335 )
336
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -297,6 +297,7 @@
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Security :: Cryptography",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -297,6 +297,7 @@\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n+ \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n", "issue": "Add Python 3.6 support\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools>=11.3\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (2, 6):\n raise RuntimeError(\n \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n requirements.append(\"cffi>=1.4.1\")\n setup_requirements.append(\"cffi>=1.4.1\")\n\ntest_requirements = [\n \"pytest>=2.9.0\",\n \"pretend\",\n \"iso8601\",\n \"pyasn1_modules\",\n \"pytz\",\n]\nif sys.version_info[:2] > (2, 6):\n test_requirements.append(\"hypothesis>=1.11.4\")\n\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require={\n \"test\": test_requirements,\n \"docstest\": [\n \"doc8\",\n \"pyenchant\",\n \"readme_renderer >= 16.0\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib-spelling\",\n ],\n \"pep8test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools>=11.3\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (2, 6):\n raise RuntimeError(\n \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n requirements.append(\"cffi>=1.4.1\")\n setup_requirements.append(\"cffi>=1.4.1\")\n\ntest_requirements = [\n \"pytest>=2.9.0\",\n \"pretend\",\n \"iso8601\",\n \"pyasn1_modules\",\n \"pytz\",\n]\nif sys.version_info[:2] > (2, 6):\n test_requirements.append(\"hypothesis>=1.11.4\")\n\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require={\n \"test\": test_requirements,\n \"docstest\": [\n \"doc8\",\n \"pyenchant\",\n \"readme_renderer >= 16.0\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib-spelling\",\n ],\n \"pep8test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}]} | 3,610 | 116 |
gh_patches_debug_26498 | rasdani/github-patches | git_diff | privacyidea__privacyidea-3836 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OTP+PIN does not work offline
<!--
Thank you for filing a bug within privacyIDEA. The more information you provide, the easier it is to identify the problem!
Issues are not meant for asking questions! You're welcome to ask questions in our community forum: https://community.privacyidea.org
privacyIDEA is licensed under the AGPL and comes without warranty. If you require reliable, professional help, please consider getting either
- consultancy
(https://netknights.it/en/leistungen/one-time-services/)
- or the privacyIDEA Enterprise Edition
(https://netknights.it/en/leistungen/service-level-agreements/)
-->
### Top-level intent
Do offline with an OTP pin that is used *behind* the OTP value: OTP+PIN.
### Expected outcome
Be able to authenticate offline with OTP+PIN,
### Actual outcome
The online authentication works out, all offline authentications fail.
### Solution
The offline data for HOTP is created by in a fixed manner concatenate PIN+OTP here:
https://github.com/privacyidea/privacyidea/blob/1eb6c8ff6b283ae75d62e5d2c2635e55276d5515/privacyidea/lib/applications/offline.py#L86
This means the user will try to authenticate with OTP+PIN and will always fail.
We will have to get the config value of "prepend PIN" and use it to either concatenate PIN+OTP or OTP+PIN.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `privacyidea/lib/applications/offline.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # 2015-04-08 Cornelius Kölbel <[email protected]>
4 # Add options ROUNDS to avoid timeouts during OTP hash calculation
5 # 2015-04-03 Cornelius Kölbel <[email protected]>
6 # Use pbkdf2 for OTP hashing
7 # 2015-03-13 Cornelius Kölbel, <[email protected]>
8 # initial writeup
9 #
10 # License: AGPLv3
11 # contact: http://www.privacyidea.org
12 #
13 # This code is free software; you can redistribute it and/or
14 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
15 # License as published by the Free Software Foundation; either
16 # version 3 of the License, or any later version.
17 #
18 # This code is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
22 #
23 # You should have received a copy of the GNU Affero General Public
24 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
25 #
26 from privacyidea.lib.applications import MachineApplicationBase
27 from privacyidea.lib.crypto import geturandom
28 from privacyidea.lib.error import ValidateError, ParameterError
29 import logging
30 from passlib.hash import pbkdf2_sha512
31 from privacyidea.lib.token import get_tokens
32 from privacyidea.lib.policy import TYPE
33 log = logging.getLogger(__name__)
34 ROUNDS = 6549
35 REFILLTOKEN_LENGTH = 40
36
37
38 class MachineApplication(MachineApplicationBase):
39 """
40 This is the application for Offline authentication with PAM or
41 the privacyIDEA credential provider.
42
43 The machine application returns a list of salted OTP hashes to be used with
44 offline authentication. The token then is disabled, so that it can not
45 be used for online authentication anymore, to avoid reusing a fished OTP
46 value.
47
48 The server stores the information, which OTP values were issued.
49
50 options options:
51 * user: a username.
52 * count: is the number of OTP values returned
53
54 """
55 application_name = "offline"
56
57 @staticmethod
58 def generate_new_refilltoken(token_obj):
59 """
60 Generate new refill token and store it in the tokeninfo of the token.
61 :param token_obj: token in question
62 :return: a string
63 """
64 new_refilltoken = geturandom(REFILLTOKEN_LENGTH, hex=True)
65 token_obj.add_tokeninfo("refilltoken", new_refilltoken)
66 return new_refilltoken
67
68 @staticmethod
69 def get_offline_otps(token_obj, otppin, amount, rounds=ROUNDS):
70 """
71 Retrieve the desired number of passwords (= PIN + OTP), hash them
72 and return them in a dictionary. Increase the token counter.
73 :param token_obj: token in question
74 :param otppin: The OTP PIN to prepend in the passwords. The PIN is not validated!
75 :param amount: Number of OTP values (non-negative!)
76 :param rounds: Number of PBKDF2 rounds
77 :return: dictionary
78 """
79 if amount < 0:
80 raise ParameterError("Invalid refill amount: {!r}".format(amount))
81 (res, err, otp_dict) = token_obj.get_multi_otp(count=amount, counter_index=True)
82 otps = otp_dict.get("otp")
83 for key in otps.keys():
84 # Return the hash of OTP PIN and OTP values
85 otps[key] = pbkdf2_sha512.using(
86 rounds=rounds, salt_size=10).hash(otppin + otps.get(key))
87 # We do not disable the token, so if all offline OTP values
88 # are used, the token can be used the authenticate online again.
89 # token_obj.enable(False)
90 # increase the counter by the consumed values and
91 # also store it in tokeninfo.
92 token_obj.inc_otp_counter(increment=amount)
93
94 return otps
95
96 @staticmethod
97 def get_refill(token_obj, password, options=None):
98 """
99 Returns new authentication OTPs to refill the client
100
101 To do so we also verify the password, which may consist of PIN + OTP.
102
103 :param token_obj: Token object
104 :param password: PIN + OTP
105 :param options: dict that might contain "count" and "rounds"
106 :return: a dictionary of auth items
107 """
108 options = options or {}
109 count = int(options.get("count", 100))
110 rounds = int(options.get("rounds", ROUNDS))
111 _r, otppin, otpval = token_obj.split_pin_pass(password)
112 if not _r:
113 raise ParameterError("Could not split password")
114 current_token_counter = token_obj.token.count
115 first_offline_counter = current_token_counter - count
116 if first_offline_counter < 0:
117 first_offline_counter = 0
118 # find the value in the offline OTP values! This resets the token.count!
119 matching_count = token_obj.check_otp(otpval, first_offline_counter, count)
120 token_obj.set_otp_count(current_token_counter)
121 # Raise an exception *after* we reset the token counter
122 if matching_count < 0:
123 raise ValidateError("You provided a wrong OTP value.")
124 # We have to add 1 here: Assume *first_offline_counter* is the counter value of the first offline OTP
125 # we sent to the client. Assume the client then requests a refill with that exact OTP value.
126 # Then, we need to respond with a refill of one OTP value, as the client has consumed one OTP value.
127 counter_diff = matching_count - first_offline_counter + 1
128 otps = MachineApplication.get_offline_otps(token_obj, otppin, counter_diff, rounds)
129 token_obj.add_tokeninfo(key="offline_counter",
130 value=count)
131 return otps
132
133 @staticmethod
134 def get_authentication_item(token_type,
135 serial,
136 challenge=None, options=None,
137 filter_param=None):
138 """
139 :param token_type: the type of the token. At the moment
140 we only support "HOTP" token. Supporting time
141 based tokens is difficult, since we would have to
142 return a looooong list of OTP values.
143 Supporting "yubikey" token (AES) would be
144 possible, too.
145 :param serial: the serial number of the token.
146 :param challenge: This can contain the password (otp pin + otp
147 value) so that we can put the OTP PIN into the hashed response.
148 :type challenge: basestring
149 :return auth_item: A list of hashed OTP values
150 """
151 ret = {}
152 options = options or {}
153 password = challenge
154 if token_type.lower() == "hotp":
155 tokens = get_tokens(serial=serial)
156 if len(tokens) == 1:
157 token_obj = tokens[0]
158 if password:
159 _r, otppin, _ = token_obj.split_pin_pass(password)
160 if not _r:
161 raise ParameterError("Could not split password")
162 else:
163 otppin = ""
164 otps = MachineApplication.get_offline_otps(token_obj,
165 otppin,
166 int(options.get("count", 100)),
167 int(options.get("rounds", ROUNDS)))
168 refilltoken = MachineApplication.generate_new_refilltoken(token_obj)
169 ret["response"] = otps
170 ret["refilltoken"] = refilltoken
171 user_object = token_obj.user
172 if user_object:
173 uInfo = user_object.info
174 if "username" in uInfo:
175 ret["user"] = ret["username"] = uInfo.get("username")
176
177 else:
178 log.info("Token %r, type %r is not supported by "
179 "OFFLINE application module" % (serial, token_type))
180
181 return ret
182
183 @staticmethod
184 def get_options():
185 """
186 returns a dictionary with a list of required and optional options
187 """
188 return {'count': {'type': TYPE.STRING},
189 'rounds': {'type': TYPE.STRING}}
190
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/privacyidea/lib/applications/offline.py b/privacyidea/lib/applications/offline.py
--- a/privacyidea/lib/applications/offline.py
+++ b/privacyidea/lib/applications/offline.py
@@ -29,6 +29,7 @@
import logging
from passlib.hash import pbkdf2_sha512
from privacyidea.lib.token import get_tokens
+from privacyidea.lib.config import get_prepend_pin
from privacyidea.lib.policy import TYPE
log = logging.getLogger(__name__)
ROUNDS = 6549
@@ -80,10 +81,12 @@
raise ParameterError("Invalid refill amount: {!r}".format(amount))
(res, err, otp_dict) = token_obj.get_multi_otp(count=amount, counter_index=True)
otps = otp_dict.get("otp")
- for key in otps.keys():
+ prepend_pin = get_prepend_pin()
+ for key, otp in otps.items():
# Return the hash of OTP PIN and OTP values
+ otppw = otppin + otp if prepend_pin else otp + otppin
otps[key] = pbkdf2_sha512.using(
- rounds=rounds, salt_size=10).hash(otppin + otps.get(key))
+ rounds=rounds, salt_size=10).hash(otppw)
# We do not disable the token, so if all offline OTP values
# are used, the token can be used the authenticate online again.
# token_obj.enable(False)
| {"golden_diff": "diff --git a/privacyidea/lib/applications/offline.py b/privacyidea/lib/applications/offline.py\n--- a/privacyidea/lib/applications/offline.py\n+++ b/privacyidea/lib/applications/offline.py\n@@ -29,6 +29,7 @@\n import logging\n from passlib.hash import pbkdf2_sha512\n from privacyidea.lib.token import get_tokens\n+from privacyidea.lib.config import get_prepend_pin\n from privacyidea.lib.policy import TYPE\n log = logging.getLogger(__name__)\n ROUNDS = 6549\n@@ -80,10 +81,12 @@\n raise ParameterError(\"Invalid refill amount: {!r}\".format(amount))\n (res, err, otp_dict) = token_obj.get_multi_otp(count=amount, counter_index=True)\n otps = otp_dict.get(\"otp\")\n- for key in otps.keys():\n+ prepend_pin = get_prepend_pin()\n+ for key, otp in otps.items():\n # Return the hash of OTP PIN and OTP values\n+ otppw = otppin + otp if prepend_pin else otp + otppin\n otps[key] = pbkdf2_sha512.using(\n- rounds=rounds, salt_size=10).hash(otppin + otps.get(key))\n+ rounds=rounds, salt_size=10).hash(otppw)\n # We do not disable the token, so if all offline OTP values\n # are used, the token can be used the authenticate online again.\n # token_obj.enable(False)\n", "issue": "OTP+PIN does not work offline\n<!--\r\nThank you for filing a bug within privacyIDEA. The more information you provide, the easier it is to identify the problem!\r\n\r\nIssues are not meant for asking questions! You're welcome to ask questions in our community forum: https://community.privacyidea.org\r\n\r\nprivacyIDEA is licensed under the AGPL and comes without warranty. If you require reliable, professional help, please consider getting either\r\n- consultancy \r\n (https://netknights.it/en/leistungen/one-time-services/)\r\n- or the privacyIDEA Enterprise Edition \r\n (https://netknights.it/en/leistungen/service-level-agreements/)\r\n-->\r\n\r\n### Top-level intent\r\n\r\nDo offline with an OTP pin that is used *behind* the OTP value: OTP+PIN.\r\n\r\n### Expected outcome\r\n\r\nBe able to authenticate offline with OTP+PIN, \r\n\r\n### Actual outcome\r\n\r\nThe online authentication works out, all offline authentications fail.\r\n\r\n### Solution\r\n\r\nThe offline data for HOTP is created by in a fixed manner concatenate PIN+OTP here:\r\n\r\nhttps://github.com/privacyidea/privacyidea/blob/1eb6c8ff6b283ae75d62e5d2c2635e55276d5515/privacyidea/lib/applications/offline.py#L86\r\n\r\nThis means the user will try to authenticate with OTP+PIN and will always fail.\r\nWe will have to get the config value of \"prepend PIN\" and use it to either concatenate PIN+OTP or OTP+PIN.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2015-04-08 Cornelius K\u00f6lbel <[email protected]>\n# Add options ROUNDS to avoid timeouts during OTP hash calculation\n# 2015-04-03 Cornelius K\u00f6lbel <[email protected]>\n# Use pbkdf2 for OTP hashing\n# 2015-03-13 Cornelius K\u00f6lbel, <[email protected]>\n# initial writeup\n#\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom privacyidea.lib.applications import MachineApplicationBase\nfrom privacyidea.lib.crypto import geturandom\nfrom privacyidea.lib.error import ValidateError, ParameterError\nimport logging\nfrom passlib.hash import pbkdf2_sha512\nfrom privacyidea.lib.token import get_tokens\nfrom privacyidea.lib.policy import TYPE\nlog = logging.getLogger(__name__)\nROUNDS = 6549\nREFILLTOKEN_LENGTH = 40\n\n\nclass MachineApplication(MachineApplicationBase):\n \"\"\"\n This is the application for Offline authentication with PAM or\n the privacyIDEA credential provider.\n\n The machine application returns a list of salted OTP hashes to be used with\n offline authentication. The token then is disabled, so that it can not\n be used for online authentication anymore, to avoid reusing a fished OTP\n value.\n\n The server stores the information, which OTP values were issued.\n\n options options:\n * user: a username.\n * count: is the number of OTP values returned\n\n \"\"\"\n application_name = \"offline\"\n\n @staticmethod\n def generate_new_refilltoken(token_obj):\n \"\"\"\n Generate new refill token and store it in the tokeninfo of the token.\n :param token_obj: token in question\n :return: a string\n \"\"\"\n new_refilltoken = geturandom(REFILLTOKEN_LENGTH, hex=True)\n token_obj.add_tokeninfo(\"refilltoken\", new_refilltoken)\n return new_refilltoken\n\n @staticmethod\n def get_offline_otps(token_obj, otppin, amount, rounds=ROUNDS):\n \"\"\"\n Retrieve the desired number of passwords (= PIN + OTP), hash them\n and return them in a dictionary. Increase the token counter.\n :param token_obj: token in question\n :param otppin: The OTP PIN to prepend in the passwords. The PIN is not validated!\n :param amount: Number of OTP values (non-negative!)\n :param rounds: Number of PBKDF2 rounds\n :return: dictionary\n \"\"\"\n if amount < 0:\n raise ParameterError(\"Invalid refill amount: {!r}\".format(amount))\n (res, err, otp_dict) = token_obj.get_multi_otp(count=amount, counter_index=True)\n otps = otp_dict.get(\"otp\")\n for key in otps.keys():\n # Return the hash of OTP PIN and OTP values\n otps[key] = pbkdf2_sha512.using(\n rounds=rounds, salt_size=10).hash(otppin + otps.get(key))\n # We do not disable the token, so if all offline OTP values\n # are used, the token can be used the authenticate online again.\n # token_obj.enable(False)\n # increase the counter by the consumed values and\n # also store it in tokeninfo.\n token_obj.inc_otp_counter(increment=amount)\n\n return otps\n\n @staticmethod\n def get_refill(token_obj, password, options=None):\n \"\"\"\n Returns new authentication OTPs to refill the client\n\n To do so we also verify the password, which may consist of PIN + OTP.\n\n :param token_obj: Token object\n :param password: PIN + OTP\n :param options: dict that might contain \"count\" and \"rounds\"\n :return: a dictionary of auth items\n \"\"\"\n options = options or {}\n count = int(options.get(\"count\", 100))\n rounds = int(options.get(\"rounds\", ROUNDS))\n _r, otppin, otpval = token_obj.split_pin_pass(password)\n if not _r:\n raise ParameterError(\"Could not split password\")\n current_token_counter = token_obj.token.count\n first_offline_counter = current_token_counter - count\n if first_offline_counter < 0:\n first_offline_counter = 0\n # find the value in the offline OTP values! This resets the token.count!\n matching_count = token_obj.check_otp(otpval, first_offline_counter, count)\n token_obj.set_otp_count(current_token_counter)\n # Raise an exception *after* we reset the token counter\n if matching_count < 0:\n raise ValidateError(\"You provided a wrong OTP value.\")\n # We have to add 1 here: Assume *first_offline_counter* is the counter value of the first offline OTP\n # we sent to the client. Assume the client then requests a refill with that exact OTP value.\n # Then, we need to respond with a refill of one OTP value, as the client has consumed one OTP value.\n counter_diff = matching_count - first_offline_counter + 1\n otps = MachineApplication.get_offline_otps(token_obj, otppin, counter_diff, rounds)\n token_obj.add_tokeninfo(key=\"offline_counter\",\n value=count)\n return otps\n\n @staticmethod\n def get_authentication_item(token_type,\n serial,\n challenge=None, options=None,\n filter_param=None):\n \"\"\"\n :param token_type: the type of the token. At the moment\n we only support \"HOTP\" token. Supporting time\n based tokens is difficult, since we would have to\n return a looooong list of OTP values.\n Supporting \"yubikey\" token (AES) would be\n possible, too.\n :param serial: the serial number of the token.\n :param challenge: This can contain the password (otp pin + otp\n value) so that we can put the OTP PIN into the hashed response.\n :type challenge: basestring\n :return auth_item: A list of hashed OTP values\n \"\"\"\n ret = {}\n options = options or {}\n password = challenge\n if token_type.lower() == \"hotp\":\n tokens = get_tokens(serial=serial)\n if len(tokens) == 1:\n token_obj = tokens[0]\n if password:\n _r, otppin, _ = token_obj.split_pin_pass(password)\n if not _r:\n raise ParameterError(\"Could not split password\")\n else:\n otppin = \"\"\n otps = MachineApplication.get_offline_otps(token_obj,\n otppin,\n int(options.get(\"count\", 100)),\n int(options.get(\"rounds\", ROUNDS)))\n refilltoken = MachineApplication.generate_new_refilltoken(token_obj)\n ret[\"response\"] = otps\n ret[\"refilltoken\"] = refilltoken\n user_object = token_obj.user\n if user_object:\n uInfo = user_object.info\n if \"username\" in uInfo:\n ret[\"user\"] = ret[\"username\"] = uInfo.get(\"username\")\n\n else:\n log.info(\"Token %r, type %r is not supported by \"\n \"OFFLINE application module\" % (serial, token_type))\n\n return ret\n\n @staticmethod\n def get_options():\n \"\"\"\n returns a dictionary with a list of required and optional options\n \"\"\"\n return {'count': {'type': TYPE.STRING},\n 'rounds': {'type': TYPE.STRING}}\n", "path": "privacyidea/lib/applications/offline.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2015-04-08 Cornelius K\u00f6lbel <[email protected]>\n# Add options ROUNDS to avoid timeouts during OTP hash calculation\n# 2015-04-03 Cornelius K\u00f6lbel <[email protected]>\n# Use pbkdf2 for OTP hashing\n# 2015-03-13 Cornelius K\u00f6lbel, <[email protected]>\n# initial writeup\n#\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom privacyidea.lib.applications import MachineApplicationBase\nfrom privacyidea.lib.crypto import geturandom\nfrom privacyidea.lib.error import ValidateError, ParameterError\nimport logging\nfrom passlib.hash import pbkdf2_sha512\nfrom privacyidea.lib.token import get_tokens\nfrom privacyidea.lib.config import get_prepend_pin\nfrom privacyidea.lib.policy import TYPE\nlog = logging.getLogger(__name__)\nROUNDS = 6549\nREFILLTOKEN_LENGTH = 40\n\n\nclass MachineApplication(MachineApplicationBase):\n \"\"\"\n This is the application for Offline authentication with PAM or\n the privacyIDEA credential provider.\n\n The machine application returns a list of salted OTP hashes to be used with\n offline authentication. The token then is disabled, so that it can not\n be used for online authentication anymore, to avoid reusing a fished OTP\n value.\n\n The server stores the information, which OTP values were issued.\n\n options options:\n * user: a username.\n * count: is the number of OTP values returned\n\n \"\"\"\n application_name = \"offline\"\n\n @staticmethod\n def generate_new_refilltoken(token_obj):\n \"\"\"\n Generate new refill token and store it in the tokeninfo of the token.\n :param token_obj: token in question\n :return: a string\n \"\"\"\n new_refilltoken = geturandom(REFILLTOKEN_LENGTH, hex=True)\n token_obj.add_tokeninfo(\"refilltoken\", new_refilltoken)\n return new_refilltoken\n\n @staticmethod\n def get_offline_otps(token_obj, otppin, amount, rounds=ROUNDS):\n \"\"\"\n Retrieve the desired number of passwords (= PIN + OTP), hash them\n and return them in a dictionary. Increase the token counter.\n :param token_obj: token in question\n :param otppin: The OTP PIN to prepend in the passwords. The PIN is not validated!\n :param amount: Number of OTP values (non-negative!)\n :param rounds: Number of PBKDF2 rounds\n :return: dictionary\n \"\"\"\n if amount < 0:\n raise ParameterError(\"Invalid refill amount: {!r}\".format(amount))\n (res, err, otp_dict) = token_obj.get_multi_otp(count=amount, counter_index=True)\n otps = otp_dict.get(\"otp\")\n prepend_pin = get_prepend_pin()\n for key, otp in otps.items():\n # Return the hash of OTP PIN and OTP values\n otppw = otppin + otp if prepend_pin else otp + otppin\n otps[key] = pbkdf2_sha512.using(\n rounds=rounds, salt_size=10).hash(otppw)\n # We do not disable the token, so if all offline OTP values\n # are used, the token can be used the authenticate online again.\n # token_obj.enable(False)\n # increase the counter by the consumed values and\n # also store it in tokeninfo.\n token_obj.inc_otp_counter(increment=amount)\n\n return otps\n\n @staticmethod\n def get_refill(token_obj, password, options=None):\n \"\"\"\n Returns new authentication OTPs to refill the client\n\n To do so we also verify the password, which may consist of PIN + OTP.\n\n :param token_obj: Token object\n :param password: PIN + OTP\n :param options: dict that might contain \"count\" and \"rounds\"\n :return: a dictionary of auth items\n \"\"\"\n options = options or {}\n count = int(options.get(\"count\", 100))\n rounds = int(options.get(\"rounds\", ROUNDS))\n _r, otppin, otpval = token_obj.split_pin_pass(password)\n if not _r:\n raise ParameterError(\"Could not split password\")\n current_token_counter = token_obj.token.count\n first_offline_counter = current_token_counter - count\n if first_offline_counter < 0:\n first_offline_counter = 0\n # find the value in the offline OTP values! This resets the token.count!\n matching_count = token_obj.check_otp(otpval, first_offline_counter, count)\n token_obj.set_otp_count(current_token_counter)\n # Raise an exception *after* we reset the token counter\n if matching_count < 0:\n raise ValidateError(\"You provided a wrong OTP value.\")\n # We have to add 1 here: Assume *first_offline_counter* is the counter value of the first offline OTP\n # we sent to the client. Assume the client then requests a refill with that exact OTP value.\n # Then, we need to respond with a refill of one OTP value, as the client has consumed one OTP value.\n counter_diff = matching_count - first_offline_counter + 1\n otps = MachineApplication.get_offline_otps(token_obj, otppin, counter_diff, rounds)\n token_obj.add_tokeninfo(key=\"offline_counter\",\n value=count)\n return otps\n\n @staticmethod\n def get_authentication_item(token_type,\n serial,\n challenge=None, options=None,\n filter_param=None):\n \"\"\"\n :param token_type: the type of the token. At the moment\n we only support \"HOTP\" token. Supporting time\n based tokens is difficult, since we would have to\n return a looooong list of OTP values.\n Supporting \"yubikey\" token (AES) would be\n possible, too.\n :param serial: the serial number of the token.\n :param challenge: This can contain the password (otp pin + otp\n value) so that we can put the OTP PIN into the hashed response.\n :type challenge: basestring\n :return auth_item: A list of hashed OTP values\n \"\"\"\n ret = {}\n options = options or {}\n password = challenge\n if token_type.lower() == \"hotp\":\n tokens = get_tokens(serial=serial)\n if len(tokens) == 1:\n token_obj = tokens[0]\n if password:\n _r, otppin, _ = token_obj.split_pin_pass(password)\n if not _r:\n raise ParameterError(\"Could not split password\")\n else:\n otppin = \"\"\n otps = MachineApplication.get_offline_otps(token_obj,\n otppin,\n int(options.get(\"count\", 100)),\n int(options.get(\"rounds\", ROUNDS)))\n refilltoken = MachineApplication.generate_new_refilltoken(token_obj)\n ret[\"response\"] = otps\n ret[\"refilltoken\"] = refilltoken\n user_object = token_obj.user\n if user_object:\n uInfo = user_object.info\n if \"username\" in uInfo:\n ret[\"user\"] = ret[\"username\"] = uInfo.get(\"username\")\n\n else:\n log.info(\"Token %r, type %r is not supported by \"\n \"OFFLINE application module\" % (serial, token_type))\n\n return ret\n\n @staticmethod\n def get_options():\n \"\"\"\n returns a dictionary with a list of required and optional options\n \"\"\"\n return {'count': {'type': TYPE.STRING},\n 'rounds': {'type': TYPE.STRING}}\n", "path": "privacyidea/lib/applications/offline.py"}]} | 2,886 | 346 |
gh_patches_debug_7882 | rasdani/github-patches | git_diff | numpy__numpy-15189 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TST: Add the first test using hypothesis
This pull request adds the first test that uses hypothesis and hence brings in hypothesis as an additional test dependency.
@mattip Could you take a look at this please?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numpy/conftest.py`
Content:
```
1 """
2 Pytest configuration and fixtures for the Numpy test suite.
3 """
4 import os
5
6 import pytest
7 import numpy
8
9 from numpy.core._multiarray_tests import get_fpu_mode
10
11
12 _old_fpu_mode = None
13 _collect_results = {}
14
15
16 def pytest_configure(config):
17 config.addinivalue_line("markers",
18 "valgrind_error: Tests that are known to error under valgrind.")
19 config.addinivalue_line("markers",
20 "leaks_references: Tests that are known to leak references.")
21 config.addinivalue_line("markers",
22 "slow: Tests that are very slow.")
23
24
25 def pytest_addoption(parser):
26 parser.addoption("--available-memory", action="store", default=None,
27 help=("Set amount of memory available for running the "
28 "test suite. This can result to tests requiring "
29 "especially large amounts of memory to be skipped. "
30 "Equivalent to setting environment variable "
31 "NPY_AVAILABLE_MEM. Default: determined"
32 "automatically."))
33
34
35 def pytest_sessionstart(session):
36 available_mem = session.config.getoption('available_memory')
37 if available_mem is not None:
38 os.environ['NPY_AVAILABLE_MEM'] = available_mem
39
40
41 #FIXME when yield tests are gone.
42 @pytest.hookimpl()
43 def pytest_itemcollected(item):
44 """
45 Check FPU precision mode was not changed during test collection.
46
47 The clumsy way we do it here is mainly necessary because numpy
48 still uses yield tests, which can execute code at test collection
49 time.
50 """
51 global _old_fpu_mode
52
53 mode = get_fpu_mode()
54
55 if _old_fpu_mode is None:
56 _old_fpu_mode = mode
57 elif mode != _old_fpu_mode:
58 _collect_results[item] = (_old_fpu_mode, mode)
59 _old_fpu_mode = mode
60
61
62 @pytest.fixture(scope="function", autouse=True)
63 def check_fpu_mode(request):
64 """
65 Check FPU precision mode was not changed during the test.
66 """
67 old_mode = get_fpu_mode()
68 yield
69 new_mode = get_fpu_mode()
70
71 if old_mode != new_mode:
72 raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
73 " during the test".format(old_mode, new_mode))
74
75 collect_result = _collect_results.get(request.node)
76 if collect_result is not None:
77 old_mode, new_mode = collect_result
78 raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
79 " when collecting the test".format(old_mode,
80 new_mode))
81
82
83 @pytest.fixture(autouse=True)
84 def add_np(doctest_namespace):
85 doctest_namespace['np'] = numpy
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numpy/conftest.py b/numpy/conftest.py
--- a/numpy/conftest.py
+++ b/numpy/conftest.py
@@ -3,6 +3,7 @@
"""
import os
+import hypothesis
import pytest
import numpy
@@ -12,6 +13,12 @@
_old_fpu_mode = None
_collect_results = {}
+# See https://hypothesis.readthedocs.io/en/latest/settings.html
+hypothesis.settings.register_profile(
+ name="numpy-profile", deadline=None, print_blob=True,
+)
+hypothesis.settings.load_profile("numpy-profile")
+
def pytest_configure(config):
config.addinivalue_line("markers",
| {"golden_diff": "diff --git a/numpy/conftest.py b/numpy/conftest.py\n--- a/numpy/conftest.py\n+++ b/numpy/conftest.py\n@@ -3,6 +3,7 @@\n \"\"\"\n import os\n \n+import hypothesis\n import pytest\n import numpy\n \n@@ -12,6 +13,12 @@\n _old_fpu_mode = None\n _collect_results = {}\n \n+# See https://hypothesis.readthedocs.io/en/latest/settings.html\n+hypothesis.settings.register_profile(\n+ name=\"numpy-profile\", deadline=None, print_blob=True,\n+)\n+hypothesis.settings.load_profile(\"numpy-profile\")\n+\n \n def pytest_configure(config):\n config.addinivalue_line(\"markers\",\n", "issue": "TST: Add the first test using hypothesis\nThis pull request adds the first test that uses hypothesis and hence brings in hypothesis as an additional test dependency.\r\n\r\n@mattip Could you take a look at this please?\r\n\n", "before_files": [{"content": "\"\"\"\nPytest configuration and fixtures for the Numpy test suite.\n\"\"\"\nimport os\n\nimport pytest\nimport numpy\n\nfrom numpy.core._multiarray_tests import get_fpu_mode\n\n\n_old_fpu_mode = None\n_collect_results = {}\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\",\n \"valgrind_error: Tests that are known to error under valgrind.\")\n config.addinivalue_line(\"markers\",\n \"leaks_references: Tests that are known to leak references.\")\n config.addinivalue_line(\"markers\",\n \"slow: Tests that are very slow.\")\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--available-memory\", action=\"store\", default=None,\n help=(\"Set amount of memory available for running the \"\n \"test suite. This can result to tests requiring \"\n \"especially large amounts of memory to be skipped. \"\n \"Equivalent to setting environment variable \"\n \"NPY_AVAILABLE_MEM. Default: determined\"\n \"automatically.\"))\n\n\ndef pytest_sessionstart(session):\n available_mem = session.config.getoption('available_memory')\n if available_mem is not None:\n os.environ['NPY_AVAILABLE_MEM'] = available_mem\n\n\n#FIXME when yield tests are gone.\[email protected]()\ndef pytest_itemcollected(item):\n \"\"\"\n Check FPU precision mode was not changed during test collection.\n\n The clumsy way we do it here is mainly necessary because numpy\n still uses yield tests, which can execute code at test collection\n time.\n \"\"\"\n global _old_fpu_mode\n\n mode = get_fpu_mode()\n\n if _old_fpu_mode is None:\n _old_fpu_mode = mode\n elif mode != _old_fpu_mode:\n _collect_results[item] = (_old_fpu_mode, mode)\n _old_fpu_mode = mode\n\n\[email protected](scope=\"function\", autouse=True)\ndef check_fpu_mode(request):\n \"\"\"\n Check FPU precision mode was not changed during the test.\n \"\"\"\n old_mode = get_fpu_mode()\n yield\n new_mode = get_fpu_mode()\n\n if old_mode != new_mode:\n raise AssertionError(\"FPU precision mode changed from {0:#x} to {1:#x}\"\n \" during the test\".format(old_mode, new_mode))\n\n collect_result = _collect_results.get(request.node)\n if collect_result is not None:\n old_mode, new_mode = collect_result\n raise AssertionError(\"FPU precision mode changed from {0:#x} to {1:#x}\"\n \" when collecting the test\".format(old_mode,\n new_mode))\n\n\[email protected](autouse=True)\ndef add_np(doctest_namespace):\n doctest_namespace['np'] = numpy\n", "path": "numpy/conftest.py"}], "after_files": [{"content": "\"\"\"\nPytest configuration and fixtures for the Numpy test suite.\n\"\"\"\nimport os\n\nimport hypothesis\nimport pytest\nimport numpy\n\nfrom numpy.core._multiarray_tests import get_fpu_mode\n\n\n_old_fpu_mode = None\n_collect_results = {}\n\n# See https://hypothesis.readthedocs.io/en/latest/settings.html\nhypothesis.settings.register_profile(\n name=\"numpy-profile\", deadline=None, print_blob=True,\n)\nhypothesis.settings.load_profile(\"numpy-profile\")\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\",\n \"valgrind_error: Tests that are known to error under valgrind.\")\n config.addinivalue_line(\"markers\",\n \"leaks_references: Tests that are known to leak references.\")\n config.addinivalue_line(\"markers\",\n \"slow: Tests that are very slow.\")\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--available-memory\", action=\"store\", default=None,\n help=(\"Set amount of memory available for running the \"\n \"test suite. This can result to tests requiring \"\n \"especially large amounts of memory to be skipped. \"\n \"Equivalent to setting environment variable \"\n \"NPY_AVAILABLE_MEM. Default: determined\"\n \"automatically.\"))\n\n\ndef pytest_sessionstart(session):\n available_mem = session.config.getoption('available_memory')\n if available_mem is not None:\n os.environ['NPY_AVAILABLE_MEM'] = available_mem\n\n\n#FIXME when yield tests are gone.\[email protected]()\ndef pytest_itemcollected(item):\n \"\"\"\n Check FPU precision mode was not changed during test collection.\n\n The clumsy way we do it here is mainly necessary because numpy\n still uses yield tests, which can execute code at test collection\n time.\n \"\"\"\n global _old_fpu_mode\n\n mode = get_fpu_mode()\n\n if _old_fpu_mode is None:\n _old_fpu_mode = mode\n elif mode != _old_fpu_mode:\n _collect_results[item] = (_old_fpu_mode, mode)\n _old_fpu_mode = mode\n\n\[email protected](scope=\"function\", autouse=True)\ndef check_fpu_mode(request):\n \"\"\"\n Check FPU precision mode was not changed during the test.\n \"\"\"\n old_mode = get_fpu_mode()\n yield\n new_mode = get_fpu_mode()\n\n if old_mode != new_mode:\n raise AssertionError(\"FPU precision mode changed from {0:#x} to {1:#x}\"\n \" during the test\".format(old_mode, new_mode))\n\n collect_result = _collect_results.get(request.node)\n if collect_result is not None:\n old_mode, new_mode = collect_result\n raise AssertionError(\"FPU precision mode changed from {0:#x} to {1:#x}\"\n \" when collecting the test\".format(old_mode,\n new_mode))\n\n\[email protected](autouse=True)\ndef add_np(doctest_namespace):\n doctest_namespace['np'] = numpy\n", "path": "numpy/conftest.py"}]} | 1,062 | 153 |
gh_patches_debug_36207 | rasdani/github-patches | git_diff | huggingface__transformers-5972 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
convert_roberta: AttributeError when converting CamemBERT model.pt to pytorch_model.bin
Hi,
I trained a CamemBERT model with the fairseq library which gave me the following files:
- dict.txt: vocabulary coming from the sentencepiece model
- sentencepiece.bpe.model
- model.pt
Now I am trying to convert the model.pt into pytorch_model.bin and config.json as mentionned here ([fairseq/issues#1514](https://github.com/pytorch/fairseq/issues/1514)) and here ([transformers/issue#1850](https://github.com/huggingface/transformers/issues/1850)), by using the conversion script of the transformers library ([transfomers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/blob/master/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py)). The goal is to use those files with fast-bert.
However, using this command line:
```shell
python convert_roberta_original_pytorch_checkpoint_to_pytorch.py --roberta_checkpoint_path ./ --pytorch_dump_folder_path ./ --classification_head
```
I get the following error:
```python
AttributeError Traceback (most recent call last)
<ipython-input-27-ea791887ff26> in <module>
----> 1 convert_roberta_original_pytorch_checkpoint_to_pytorch.convert_roberta_checkpoint_to_pytorch(CAMEMBERT_PATH, CAMEMBERT_PATH, True)
~/anaconda3/envs/NLP/lib/python3.7/site-packages/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py in convert_roberta_checkpoint_to_pytorch(roberta_checkpoint_path, pytorch_dump_folder_path, classification_head)
48 roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
49 roberta.eval() # disable dropout
---> 50 roberta_sent_encoder = roberta.model.decoder.sentence_encoder
51 config = RobertaConfig(
52 vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,
~/anaconda3/envs/NLP/lib/python3.7/site-packages/torch/nn/modules/module.py in __getattr__(self, name)
592 return modules[name]
593 raise AttributeError("'{}' object has no attribute '{}'".format(
--> 594 type(self).__name__, name))
595
596 def __setattr__(self, name, value):
AttributeError: 'RobertaModel' object has no attribute 'decoder'
```
And indeed when I check the fairseq/pytorch RobertaModel has no decoder attribute.
Am I doing this wrong ? I see no other conversion script to fit my CamemBERT model so I guess the RoBERTa one is the good one.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2018 The HuggingFace Inc. team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Convert RoBERTa checkpoint."""
16
17
18 import argparse
19 import logging
20 import pathlib
21
22 import fairseq
23 import torch
24 from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
25 from fairseq.modules import TransformerSentenceEncoderLayer
26 from packaging import version
27
28 from transformers.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput
29 from transformers.modeling_roberta import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification
30
31
32 if version.parse(fairseq.__version__) < version.parse("0.9.0"):
33 raise Exception("requires fairseq >= 0.9.0")
34
35
36 logging.basicConfig(level=logging.INFO)
37 logger = logging.getLogger(__name__)
38
39 SAMPLE_TEXT = "Hello world! cécé herlolip"
40
41
42 def convert_roberta_checkpoint_to_pytorch(
43 roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool
44 ):
45 """
46 Copy/paste/tweak roberta's weights to our BERT structure.
47 """
48 roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
49 roberta.eval() # disable dropout
50 roberta_sent_encoder = roberta.model.decoder.sentence_encoder
51 config = RobertaConfig(
52 vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,
53 hidden_size=roberta.args.encoder_embed_dim,
54 num_hidden_layers=roberta.args.encoder_layers,
55 num_attention_heads=roberta.args.encoder_attention_heads,
56 intermediate_size=roberta.args.encoder_ffn_embed_dim,
57 max_position_embeddings=514,
58 type_vocab_size=1,
59 layer_norm_eps=1e-5, # PyTorch default used in fairseq
60 )
61 if classification_head:
62 config.num_labels = roberta.args.num_classes
63 print("Our BERT config:", config)
64
65 model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)
66 model.eval()
67
68 # Now let's copy all the weights.
69 # Embeddings
70 model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight
71 model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight
72 model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
73 model.roberta.embeddings.token_type_embeddings.weight
74 ) # just zero them out b/c RoBERTa doesn't use them.
75 model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight
76 model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias
77
78 for i in range(config.num_hidden_layers):
79 # Encoder: start of layer
80 layer: BertLayer = model.roberta.encoder.layer[i]
81 roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
82
83 # self attention
84 self_attn: BertSelfAttention = layer.attention.self
85 assert (
86 roberta_layer.self_attn.k_proj.weight.data.shape
87 == roberta_layer.self_attn.q_proj.weight.data.shape
88 == roberta_layer.self_attn.v_proj.weight.data.shape
89 == torch.Size((config.hidden_size, config.hidden_size))
90 )
91
92 self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight
93 self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias
94 self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight
95 self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias
96 self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight
97 self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias
98
99 # self-attention output
100 self_output: BertSelfOutput = layer.attention.output
101 assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
102 self_output.dense.weight = roberta_layer.self_attn.out_proj.weight
103 self_output.dense.bias = roberta_layer.self_attn.out_proj.bias
104 self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight
105 self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias
106
107 # intermediate
108 intermediate: BertIntermediate = layer.intermediate
109 assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape
110 intermediate.dense.weight = roberta_layer.fc1.weight
111 intermediate.dense.bias = roberta_layer.fc1.bias
112
113 # output
114 bert_output: BertOutput = layer.output
115 assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape
116 bert_output.dense.weight = roberta_layer.fc2.weight
117 bert_output.dense.bias = roberta_layer.fc2.bias
118 bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight
119 bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias
120 # end of layer
121
122 if classification_head:
123 model.classifier.dense.weight = roberta.model.classification_heads["mnli"].dense.weight
124 model.classifier.dense.bias = roberta.model.classification_heads["mnli"].dense.bias
125 model.classifier.out_proj.weight = roberta.model.classification_heads["mnli"].out_proj.weight
126 model.classifier.out_proj.bias = roberta.model.classification_heads["mnli"].out_proj.bias
127 else:
128 # LM Head
129 model.lm_head.dense.weight = roberta.model.decoder.lm_head.dense.weight
130 model.lm_head.dense.bias = roberta.model.decoder.lm_head.dense.bias
131 model.lm_head.layer_norm.weight = roberta.model.decoder.lm_head.layer_norm.weight
132 model.lm_head.layer_norm.bias = roberta.model.decoder.lm_head.layer_norm.bias
133 model.lm_head.decoder.weight = roberta.model.decoder.lm_head.weight
134 model.lm_head.decoder.bias = roberta.model.decoder.lm_head.bias
135
136 # Let's check that we get the same results.
137 input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1
138
139 our_output = model(input_ids)[0]
140 if classification_head:
141 their_output = roberta.model.classification_heads["mnli"](roberta.extract_features(input_ids))
142 else:
143 their_output = roberta.model(input_ids)[0]
144 print(our_output.shape, their_output.shape)
145 max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
146 print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
147 success = torch.allclose(our_output, their_output, atol=1e-3)
148 print("Do both models output the same tensors?", "🔥" if success else "💩")
149 if not success:
150 raise Exception("Something went wRoNg")
151
152 pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
153 print(f"Saving model to {pytorch_dump_folder_path}")
154 model.save_pretrained(pytorch_dump_folder_path)
155
156
157 if __name__ == "__main__":
158 parser = argparse.ArgumentParser()
159 # Required parameters
160 parser.add_argument(
161 "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
162 )
163 parser.add_argument(
164 "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
165 )
166 parser.add_argument(
167 "--classification_head", action="store_true", help="Whether to convert a final classification head."
168 )
169 args = parser.parse_args()
170 convert_roberta_checkpoint_to_pytorch(
171 args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
172 )
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py
--- a/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py
+++ b/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py
@@ -47,7 +47,7 @@
"""
roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
roberta.eval() # disable dropout
- roberta_sent_encoder = roberta.model.decoder.sentence_encoder
+ roberta_sent_encoder = roberta.model.encoder.sentence_encoder
config = RobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,
hidden_size=roberta.args.encoder_embed_dim,
@@ -59,7 +59,7 @@
layer_norm_eps=1e-5, # PyTorch default used in fairseq
)
if classification_head:
- config.num_labels = roberta.args.num_classes
+ config.num_labels = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our BERT config:", config)
model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)
@@ -126,12 +126,12 @@
model.classifier.out_proj.bias = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
- model.lm_head.dense.weight = roberta.model.decoder.lm_head.dense.weight
- model.lm_head.dense.bias = roberta.model.decoder.lm_head.dense.bias
- model.lm_head.layer_norm.weight = roberta.model.decoder.lm_head.layer_norm.weight
- model.lm_head.layer_norm.bias = roberta.model.decoder.lm_head.layer_norm.bias
- model.lm_head.decoder.weight = roberta.model.decoder.lm_head.weight
- model.lm_head.decoder.bias = roberta.model.decoder.lm_head.bias
+ model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight
+ model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias
+ model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight
+ model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias
+ model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight
+ model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1
| {"golden_diff": "diff --git a/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py\n--- a/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py\n+++ b/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py\n@@ -47,7 +47,7 @@\n \"\"\"\n roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)\n roberta.eval() # disable dropout\n- roberta_sent_encoder = roberta.model.decoder.sentence_encoder\n+ roberta_sent_encoder = roberta.model.encoder.sentence_encoder\n config = RobertaConfig(\n vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,\n hidden_size=roberta.args.encoder_embed_dim,\n@@ -59,7 +59,7 @@\n layer_norm_eps=1e-5, # PyTorch default used in fairseq\n )\n if classification_head:\n- config.num_labels = roberta.args.num_classes\n+ config.num_labels = roberta.model.classification_heads[\"mnli\"].out_proj.weight.shape[0]\n print(\"Our BERT config:\", config)\n \n model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)\n@@ -126,12 +126,12 @@\n model.classifier.out_proj.bias = roberta.model.classification_heads[\"mnli\"].out_proj.bias\n else:\n # LM Head\n- model.lm_head.dense.weight = roberta.model.decoder.lm_head.dense.weight\n- model.lm_head.dense.bias = roberta.model.decoder.lm_head.dense.bias\n- model.lm_head.layer_norm.weight = roberta.model.decoder.lm_head.layer_norm.weight\n- model.lm_head.layer_norm.bias = roberta.model.decoder.lm_head.layer_norm.bias\n- model.lm_head.decoder.weight = roberta.model.decoder.lm_head.weight\n- model.lm_head.decoder.bias = roberta.model.decoder.lm_head.bias\n+ model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight\n+ model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias\n+ model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight\n+ model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias\n+ model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight\n+ model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias\n \n # Let's check that we get the same results.\n input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1\n", "issue": "convert_roberta: AttributeError when converting CamemBERT model.pt to pytorch_model.bin\nHi,\r\n\r\nI trained a CamemBERT model with the fairseq library which gave me the following files:\r\n- dict.txt: vocabulary coming from the sentencepiece model\r\n- sentencepiece.bpe.model\r\n- model.pt\r\n\r\nNow I am trying to convert the model.pt into pytorch_model.bin and config.json as mentionned here ([fairseq/issues#1514](https://github.com/pytorch/fairseq/issues/1514)) and here ([transformers/issue#1850](https://github.com/huggingface/transformers/issues/1850)), by using the conversion script of the transformers library ([transfomers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/blob/master/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py)). The goal is to use those files with fast-bert.\r\n\r\nHowever, using this command line:\r\n```shell\r\npython convert_roberta_original_pytorch_checkpoint_to_pytorch.py --roberta_checkpoint_path ./ --pytorch_dump_folder_path ./ --classification_head\r\n```\r\n\r\nI get the following error:\r\n\r\n```python\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-27-ea791887ff26> in <module>\r\n----> 1 convert_roberta_original_pytorch_checkpoint_to_pytorch.convert_roberta_checkpoint_to_pytorch(CAMEMBERT_PATH, CAMEMBERT_PATH, True)\r\n\r\n~/anaconda3/envs/NLP/lib/python3.7/site-packages/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py in convert_roberta_checkpoint_to_pytorch(roberta_checkpoint_path, pytorch_dump_folder_path, classification_head)\r\n 48 roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)\r\n 49 roberta.eval() # disable dropout\r\n---> 50 roberta_sent_encoder = roberta.model.decoder.sentence_encoder\r\n 51 config = RobertaConfig(\r\n 52 vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,\r\n\r\n~/anaconda3/envs/NLP/lib/python3.7/site-packages/torch/nn/modules/module.py in __getattr__(self, name)\r\n 592 return modules[name]\r\n 593 raise AttributeError(\"'{}' object has no attribute '{}'\".format(\r\n--> 594 type(self).__name__, name))\r\n 595 \r\n 596 def __setattr__(self, name, value):\r\n\r\nAttributeError: 'RobertaModel' object has no attribute 'decoder'\r\n```\r\n\r\nAnd indeed when I check the fairseq/pytorch RobertaModel has no decoder attribute. \r\n\r\nAm I doing this wrong ? I see no other conversion script to fit my CamemBERT model so I guess the RoBERTa one is the good one.\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Convert RoBERTa checkpoint.\"\"\"\n\n\nimport argparse\nimport logging\nimport pathlib\n\nimport fairseq\nimport torch\nfrom fairseq.models.roberta import RobertaModel as FairseqRobertaModel\nfrom fairseq.modules import TransformerSentenceEncoderLayer\nfrom packaging import version\n\nfrom transformers.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput\nfrom transformers.modeling_roberta import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification\n\n\nif version.parse(fairseq.__version__) < version.parse(\"0.9.0\"):\n raise Exception(\"requires fairseq >= 0.9.0\")\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nSAMPLE_TEXT = \"Hello world! c\u00e9c\u00e9 herlolip\"\n\n\ndef convert_roberta_checkpoint_to_pytorch(\n roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool\n):\n \"\"\"\n Copy/paste/tweak roberta's weights to our BERT structure.\n \"\"\"\n roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)\n roberta.eval() # disable dropout\n roberta_sent_encoder = roberta.model.decoder.sentence_encoder\n config = RobertaConfig(\n vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,\n hidden_size=roberta.args.encoder_embed_dim,\n num_hidden_layers=roberta.args.encoder_layers,\n num_attention_heads=roberta.args.encoder_attention_heads,\n intermediate_size=roberta.args.encoder_ffn_embed_dim,\n max_position_embeddings=514,\n type_vocab_size=1,\n layer_norm_eps=1e-5, # PyTorch default used in fairseq\n )\n if classification_head:\n config.num_labels = roberta.args.num_classes\n print(\"Our BERT config:\", config)\n\n model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)\n model.eval()\n\n # Now let's copy all the weights.\n # Embeddings\n model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight\n model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight\n model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(\n model.roberta.embeddings.token_type_embeddings.weight\n ) # just zero them out b/c RoBERTa doesn't use them.\n model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight\n model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias\n\n for i in range(config.num_hidden_layers):\n # Encoder: start of layer\n layer: BertLayer = model.roberta.encoder.layer[i]\n roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]\n\n # self attention\n self_attn: BertSelfAttention = layer.attention.self\n assert (\n roberta_layer.self_attn.k_proj.weight.data.shape\n == roberta_layer.self_attn.q_proj.weight.data.shape\n == roberta_layer.self_attn.v_proj.weight.data.shape\n == torch.Size((config.hidden_size, config.hidden_size))\n )\n\n self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight\n self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias\n self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight\n self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias\n self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight\n self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias\n\n # self-attention output\n self_output: BertSelfOutput = layer.attention.output\n assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape\n self_output.dense.weight = roberta_layer.self_attn.out_proj.weight\n self_output.dense.bias = roberta_layer.self_attn.out_proj.bias\n self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight\n self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias\n\n # intermediate\n intermediate: BertIntermediate = layer.intermediate\n assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape\n intermediate.dense.weight = roberta_layer.fc1.weight\n intermediate.dense.bias = roberta_layer.fc1.bias\n\n # output\n bert_output: BertOutput = layer.output\n assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape\n bert_output.dense.weight = roberta_layer.fc2.weight\n bert_output.dense.bias = roberta_layer.fc2.bias\n bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight\n bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias\n # end of layer\n\n if classification_head:\n model.classifier.dense.weight = roberta.model.classification_heads[\"mnli\"].dense.weight\n model.classifier.dense.bias = roberta.model.classification_heads[\"mnli\"].dense.bias\n model.classifier.out_proj.weight = roberta.model.classification_heads[\"mnli\"].out_proj.weight\n model.classifier.out_proj.bias = roberta.model.classification_heads[\"mnli\"].out_proj.bias\n else:\n # LM Head\n model.lm_head.dense.weight = roberta.model.decoder.lm_head.dense.weight\n model.lm_head.dense.bias = roberta.model.decoder.lm_head.dense.bias\n model.lm_head.layer_norm.weight = roberta.model.decoder.lm_head.layer_norm.weight\n model.lm_head.layer_norm.bias = roberta.model.decoder.lm_head.layer_norm.bias\n model.lm_head.decoder.weight = roberta.model.decoder.lm_head.weight\n model.lm_head.decoder.bias = roberta.model.decoder.lm_head.bias\n\n # Let's check that we get the same results.\n input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1\n\n our_output = model(input_ids)[0]\n if classification_head:\n their_output = roberta.model.classification_heads[\"mnli\"](roberta.extract_features(input_ids))\n else:\n their_output = roberta.model(input_ids)[0]\n print(our_output.shape, their_output.shape)\n max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()\n print(f\"max_absolute_diff = {max_absolute_diff}\") # ~ 1e-7\n success = torch.allclose(our_output, their_output, atol=1e-3)\n print(\"Do both models output the same tensors?\", \"\ud83d\udd25\" if success else \"\ud83d\udca9\")\n if not success:\n raise Exception(\"Something went wRoNg\")\n\n pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)\n print(f\"Saving model to {pytorch_dump_folder_path}\")\n model.save_pretrained(pytorch_dump_folder_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # Required parameters\n parser.add_argument(\n \"--roberta_checkpoint_path\", default=None, type=str, required=True, help=\"Path the official PyTorch dump.\"\n )\n parser.add_argument(\n \"--pytorch_dump_folder_path\", default=None, type=str, required=True, help=\"Path to the output PyTorch model.\"\n )\n parser.add_argument(\n \"--classification_head\", action=\"store_true\", help=\"Whether to convert a final classification head.\"\n )\n args = parser.parse_args()\n convert_roberta_checkpoint_to_pytorch(\n args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head\n )\n", "path": "src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2018 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Convert RoBERTa checkpoint.\"\"\"\n\n\nimport argparse\nimport logging\nimport pathlib\n\nimport fairseq\nimport torch\nfrom fairseq.models.roberta import RobertaModel as FairseqRobertaModel\nfrom fairseq.modules import TransformerSentenceEncoderLayer\nfrom packaging import version\n\nfrom transformers.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput\nfrom transformers.modeling_roberta import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification\n\n\nif version.parse(fairseq.__version__) < version.parse(\"0.9.0\"):\n raise Exception(\"requires fairseq >= 0.9.0\")\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nSAMPLE_TEXT = \"Hello world! c\u00e9c\u00e9 herlolip\"\n\n\ndef convert_roberta_checkpoint_to_pytorch(\n roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool\n):\n \"\"\"\n Copy/paste/tweak roberta's weights to our BERT structure.\n \"\"\"\n roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)\n roberta.eval() # disable dropout\n roberta_sent_encoder = roberta.model.encoder.sentence_encoder\n config = RobertaConfig(\n vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,\n hidden_size=roberta.args.encoder_embed_dim,\n num_hidden_layers=roberta.args.encoder_layers,\n num_attention_heads=roberta.args.encoder_attention_heads,\n intermediate_size=roberta.args.encoder_ffn_embed_dim,\n max_position_embeddings=514,\n type_vocab_size=1,\n layer_norm_eps=1e-5, # PyTorch default used in fairseq\n )\n if classification_head:\n config.num_labels = roberta.model.classification_heads[\"mnli\"].out_proj.weight.shape[0]\n print(\"Our BERT config:\", config)\n\n model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)\n model.eval()\n\n # Now let's copy all the weights.\n # Embeddings\n model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight\n model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight\n model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(\n model.roberta.embeddings.token_type_embeddings.weight\n ) # just zero them out b/c RoBERTa doesn't use them.\n model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight\n model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias\n\n for i in range(config.num_hidden_layers):\n # Encoder: start of layer\n layer: BertLayer = model.roberta.encoder.layer[i]\n roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]\n\n # self attention\n self_attn: BertSelfAttention = layer.attention.self\n assert (\n roberta_layer.self_attn.k_proj.weight.data.shape\n == roberta_layer.self_attn.q_proj.weight.data.shape\n == roberta_layer.self_attn.v_proj.weight.data.shape\n == torch.Size((config.hidden_size, config.hidden_size))\n )\n\n self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight\n self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias\n self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight\n self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias\n self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight\n self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias\n\n # self-attention output\n self_output: BertSelfOutput = layer.attention.output\n assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape\n self_output.dense.weight = roberta_layer.self_attn.out_proj.weight\n self_output.dense.bias = roberta_layer.self_attn.out_proj.bias\n self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight\n self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias\n\n # intermediate\n intermediate: BertIntermediate = layer.intermediate\n assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape\n intermediate.dense.weight = roberta_layer.fc1.weight\n intermediate.dense.bias = roberta_layer.fc1.bias\n\n # output\n bert_output: BertOutput = layer.output\n assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape\n bert_output.dense.weight = roberta_layer.fc2.weight\n bert_output.dense.bias = roberta_layer.fc2.bias\n bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight\n bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias\n # end of layer\n\n if classification_head:\n model.classifier.dense.weight = roberta.model.classification_heads[\"mnli\"].dense.weight\n model.classifier.dense.bias = roberta.model.classification_heads[\"mnli\"].dense.bias\n model.classifier.out_proj.weight = roberta.model.classification_heads[\"mnli\"].out_proj.weight\n model.classifier.out_proj.bias = roberta.model.classification_heads[\"mnli\"].out_proj.bias\n else:\n # LM Head\n model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight\n model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias\n model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight\n model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias\n model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight\n model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias\n\n # Let's check that we get the same results.\n input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1\n\n our_output = model(input_ids)[0]\n if classification_head:\n their_output = roberta.model.classification_heads[\"mnli\"](roberta.extract_features(input_ids))\n else:\n their_output = roberta.model(input_ids)[0]\n print(our_output.shape, their_output.shape)\n max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()\n print(f\"max_absolute_diff = {max_absolute_diff}\") # ~ 1e-7\n success = torch.allclose(our_output, their_output, atol=1e-3)\n print(\"Do both models output the same tensors?\", \"\ud83d\udd25\" if success else \"\ud83d\udca9\")\n if not success:\n raise Exception(\"Something went wRoNg\")\n\n pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)\n print(f\"Saving model to {pytorch_dump_folder_path}\")\n model.save_pretrained(pytorch_dump_folder_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # Required parameters\n parser.add_argument(\n \"--roberta_checkpoint_path\", default=None, type=str, required=True, help=\"Path the official PyTorch dump.\"\n )\n parser.add_argument(\n \"--pytorch_dump_folder_path\", default=None, type=str, required=True, help=\"Path to the output PyTorch model.\"\n )\n parser.add_argument(\n \"--classification_head\", action=\"store_true\", help=\"Whether to convert a final classification head.\"\n )\n args = parser.parse_args()\n convert_roberta_checkpoint_to_pytorch(\n args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head\n )\n", "path": "src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py"}]} | 3,132 | 609 |
gh_patches_debug_9956 | rasdani/github-patches | git_diff | iterative__dvc-8823 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dvc repro: Tuples in parameters are always marked as modified
# Bug Report
<!--
## Issue name
Issue names must follow the pattern `command: description` where the command is the dvc command that you are trying to run. The description should describe the consequence of the bug.
Example: `repro: doesn't detect input changes`
-->
## Description
<!--
A clear and concise description of what the bug is.
-->
When tuples are used in parameters, dvc always marks them as modified.
```sh
dvc status
```
outputs
```yaml
train_model:
changed deps:
model/parameters.py:
modified: NGRAM_VECTORIZER_RANGE
```
regardless of whether `NGRAM_VECTORIZER_RANGE` has been changed
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/dependency/param.py`
Content:
```
1 import logging
2 import os
3 import typing
4 from collections import defaultdict
5 from typing import Dict, cast
6
7 import dpath.util
8 from voluptuous import Any
9
10 from dvc.exceptions import DvcException
11 from dvc.utils.serialize import ParseError, load_path
12 from dvc_data.hashfile.hash_info import HashInfo
13
14 from .base import Dependency
15
16 logger = logging.getLogger(__name__)
17
18
19 class MissingParamsError(DvcException):
20 pass
21
22
23 class MissingParamsFile(DvcException):
24 pass
25
26
27 class ParamsIsADirectoryError(DvcException):
28 pass
29
30
31 class BadParamFileError(DvcException):
32 pass
33
34
35 class ParamsDependency(Dependency):
36 PARAM_PARAMS = "params"
37 PARAM_SCHEMA = {PARAM_PARAMS: Any(dict, list, None)}
38 DEFAULT_PARAMS_FILE = "params.yaml"
39
40 def __init__(self, stage, path, params=None, repo=None):
41 self.params = list(params) if params else []
42 hash_info = HashInfo()
43 if isinstance(params, dict):
44 hash_info = HashInfo(
45 self.PARAM_PARAMS,
46 params, # type: ignore[arg-type]
47 )
48 repo = repo or stage.repo
49 path = path or os.path.join(repo.root_dir, self.DEFAULT_PARAMS_FILE)
50 super().__init__(stage, path, repo=repo)
51 self.hash_info = hash_info
52
53 def dumpd(self, **kwargs):
54 ret = super().dumpd()
55 if not self.hash_info:
56 ret[self.PARAM_PARAMS] = self.params or {}
57 return ret
58
59 def fill_values(self, values=None):
60 """Load params values dynamically."""
61 if values is None:
62 return
63
64 info = {}
65 if not self.params:
66 info.update(values)
67 for param in self.params:
68 if param in values:
69 info[param] = values[param]
70 self.hash_info = HashInfo(
71 self.PARAM_PARAMS,
72 info, # type: ignore[arg-type]
73 )
74
75 def read_params(
76 self, flatten: bool = True, **kwargs: typing.Any
77 ) -> Dict[str, typing.Any]:
78 try:
79 config = self.read_file()
80 except MissingParamsFile:
81 config = {}
82
83 if not self.params:
84 return config
85
86 ret = {}
87 if flatten:
88 for param in self.params:
89 try:
90 ret[param] = dpath.util.get(config, param, separator=".")
91 except KeyError:
92 continue
93 return ret
94
95 from dpath.util import merge
96
97 for param in self.params:
98 merge(
99 ret,
100 dpath.util.search(config, param, separator="."),
101 separator=".",
102 )
103 return ret
104
105 def workspace_status(self):
106 if not self.exists:
107 return {str(self): "deleted"}
108 if self.hash_info.value is None:
109 return {str(self): "new"}
110
111 from funcy import ldistinct
112
113 status: Dict[str, Any] = defaultdict(dict)
114 info = cast(dict, self.hash_info.value) if self.hash_info else {}
115 actual = self.read_params()
116
117 # NOTE: we want to preserve the order of params as specified in the
118 # status. In case of tracking the whole file, the order is top-level
119 # keys in the file and then the keys in the `info` from `dvc.lock`
120 # (which are alphabetically sorted).
121 params = self.params or ldistinct([*actual.keys(), *info.keys()])
122 for param in params:
123 if param not in actual:
124 st = "deleted"
125 elif param not in info:
126 st = "new"
127 elif actual[param] != info[param]:
128 st = "modified"
129 else:
130 assert actual[param] == info[param]
131 continue
132
133 status[str(self)][param] = st
134
135 return status
136
137 def status(self):
138 return self.workspace_status()
139
140 def validate_filepath(self):
141 if not self.exists:
142 raise MissingParamsFile(f"Parameters file '{self}' does not exist")
143 if self.isdir():
144 raise ParamsIsADirectoryError(
145 f"'{self}' is a directory, expected a parameters file"
146 )
147
148 def read_file(self):
149 self.validate_filepath()
150 try:
151 return load_path(self.fs_path, self.repo.fs)
152 except ParseError as exc:
153 raise BadParamFileError(
154 f"Unable to read parameters from '{self}'"
155 ) from exc
156
157 def get_hash(self):
158 info = self.read_params()
159
160 missing_params = set(self.params) - set(info.keys())
161 if missing_params:
162 raise MissingParamsError(
163 "Parameters '{}' are missing from '{}'.".format(
164 ", ".join(missing_params), self
165 )
166 )
167
168 return HashInfo(self.PARAM_PARAMS, info) # type: ignore[arg-type]
169
170 def save(self):
171 if not self.exists:
172 raise self.DoesNotExistError(self)
173
174 if not self.isfile() and not self.isdir():
175 raise self.IsNotFileOrDirError(self)
176
177 self.ignore()
178 self.hash_info = self.get_hash()
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/dependency/param.py b/dvc/dependency/param.py
--- a/dvc/dependency/param.py
+++ b/dvc/dependency/param.py
@@ -125,9 +125,14 @@
elif param not in info:
st = "new"
elif actual[param] != info[param]:
- st = "modified"
+ if (
+ isinstance(actual[param], tuple)
+ and list(actual[param]) == info[param]
+ ):
+ continue
+ else:
+ st = "modified"
else:
- assert actual[param] == info[param]
continue
status[str(self)][param] = st
| {"golden_diff": "diff --git a/dvc/dependency/param.py b/dvc/dependency/param.py\n--- a/dvc/dependency/param.py\n+++ b/dvc/dependency/param.py\n@@ -125,9 +125,14 @@\n elif param not in info:\n st = \"new\"\n elif actual[param] != info[param]:\n- st = \"modified\"\n+ if (\n+ isinstance(actual[param], tuple)\n+ and list(actual[param]) == info[param]\n+ ):\n+ continue\n+ else:\n+ st = \"modified\"\n else:\n- assert actual[param] == info[param]\n continue\n \n status[str(self)][param] = st\n", "issue": "dvc repro: Tuples in parameters are always marked as modified\n# Bug Report\r\n\r\n<!--\r\n## Issue name\r\n\r\nIssue names must follow the pattern `command: description` where the command is the dvc command that you are trying to run. The description should describe the consequence of the bug.\r\n\r\nExample: `repro: doesn't detect input changes`\r\n-->\r\n\r\n## Description\r\n\r\n<!--\r\nA clear and concise description of what the bug is.\r\n-->\r\nWhen tuples are used in parameters, dvc always marks them as modified.\r\n\r\n```sh\r\ndvc status\r\n```\r\noutputs\r\n```yaml\r\ntrain_model:\r\n changed deps:\r\n model/parameters.py: \r\n modified: NGRAM_VECTORIZER_RANGE\r\n```\r\n\r\nregardless of whether `NGRAM_VECTORIZER_RANGE` has been changed\r\n\n", "before_files": [{"content": "import logging\nimport os\nimport typing\nfrom collections import defaultdict\nfrom typing import Dict, cast\n\nimport dpath.util\nfrom voluptuous import Any\n\nfrom dvc.exceptions import DvcException\nfrom dvc.utils.serialize import ParseError, load_path\nfrom dvc_data.hashfile.hash_info import HashInfo\n\nfrom .base import Dependency\n\nlogger = logging.getLogger(__name__)\n\n\nclass MissingParamsError(DvcException):\n pass\n\n\nclass MissingParamsFile(DvcException):\n pass\n\n\nclass ParamsIsADirectoryError(DvcException):\n pass\n\n\nclass BadParamFileError(DvcException):\n pass\n\n\nclass ParamsDependency(Dependency):\n PARAM_PARAMS = \"params\"\n PARAM_SCHEMA = {PARAM_PARAMS: Any(dict, list, None)}\n DEFAULT_PARAMS_FILE = \"params.yaml\"\n\n def __init__(self, stage, path, params=None, repo=None):\n self.params = list(params) if params else []\n hash_info = HashInfo()\n if isinstance(params, dict):\n hash_info = HashInfo(\n self.PARAM_PARAMS,\n params, # type: ignore[arg-type]\n )\n repo = repo or stage.repo\n path = path or os.path.join(repo.root_dir, self.DEFAULT_PARAMS_FILE)\n super().__init__(stage, path, repo=repo)\n self.hash_info = hash_info\n\n def dumpd(self, **kwargs):\n ret = super().dumpd()\n if not self.hash_info:\n ret[self.PARAM_PARAMS] = self.params or {}\n return ret\n\n def fill_values(self, values=None):\n \"\"\"Load params values dynamically.\"\"\"\n if values is None:\n return\n\n info = {}\n if not self.params:\n info.update(values)\n for param in self.params:\n if param in values:\n info[param] = values[param]\n self.hash_info = HashInfo(\n self.PARAM_PARAMS,\n info, # type: ignore[arg-type]\n )\n\n def read_params(\n self, flatten: bool = True, **kwargs: typing.Any\n ) -> Dict[str, typing.Any]:\n try:\n config = self.read_file()\n except MissingParamsFile:\n config = {}\n\n if not self.params:\n return config\n\n ret = {}\n if flatten:\n for param in self.params:\n try:\n ret[param] = dpath.util.get(config, param, separator=\".\")\n except KeyError:\n continue\n return ret\n\n from dpath.util import merge\n\n for param in self.params:\n merge(\n ret,\n dpath.util.search(config, param, separator=\".\"),\n separator=\".\",\n )\n return ret\n\n def workspace_status(self):\n if not self.exists:\n return {str(self): \"deleted\"}\n if self.hash_info.value is None:\n return {str(self): \"new\"}\n\n from funcy import ldistinct\n\n status: Dict[str, Any] = defaultdict(dict)\n info = cast(dict, self.hash_info.value) if self.hash_info else {}\n actual = self.read_params()\n\n # NOTE: we want to preserve the order of params as specified in the\n # status. In case of tracking the whole file, the order is top-level\n # keys in the file and then the keys in the `info` from `dvc.lock`\n # (which are alphabetically sorted).\n params = self.params or ldistinct([*actual.keys(), *info.keys()])\n for param in params:\n if param not in actual:\n st = \"deleted\"\n elif param not in info:\n st = \"new\"\n elif actual[param] != info[param]:\n st = \"modified\"\n else:\n assert actual[param] == info[param]\n continue\n\n status[str(self)][param] = st\n\n return status\n\n def status(self):\n return self.workspace_status()\n\n def validate_filepath(self):\n if not self.exists:\n raise MissingParamsFile(f\"Parameters file '{self}' does not exist\")\n if self.isdir():\n raise ParamsIsADirectoryError(\n f\"'{self}' is a directory, expected a parameters file\"\n )\n\n def read_file(self):\n self.validate_filepath()\n try:\n return load_path(self.fs_path, self.repo.fs)\n except ParseError as exc:\n raise BadParamFileError(\n f\"Unable to read parameters from '{self}'\"\n ) from exc\n\n def get_hash(self):\n info = self.read_params()\n\n missing_params = set(self.params) - set(info.keys())\n if missing_params:\n raise MissingParamsError(\n \"Parameters '{}' are missing from '{}'.\".format(\n \", \".join(missing_params), self\n )\n )\n\n return HashInfo(self.PARAM_PARAMS, info) # type: ignore[arg-type]\n\n def save(self):\n if not self.exists:\n raise self.DoesNotExistError(self)\n\n if not self.isfile() and not self.isdir():\n raise self.IsNotFileOrDirError(self)\n\n self.ignore()\n self.hash_info = self.get_hash()\n", "path": "dvc/dependency/param.py"}], "after_files": [{"content": "import logging\nimport os\nimport typing\nfrom collections import defaultdict\nfrom typing import Dict, cast\n\nimport dpath.util\nfrom voluptuous import Any\n\nfrom dvc.exceptions import DvcException\nfrom dvc.utils.serialize import ParseError, load_path\nfrom dvc_data.hashfile.hash_info import HashInfo\n\nfrom .base import Dependency\n\nlogger = logging.getLogger(__name__)\n\n\nclass MissingParamsError(DvcException):\n pass\n\n\nclass MissingParamsFile(DvcException):\n pass\n\n\nclass ParamsIsADirectoryError(DvcException):\n pass\n\n\nclass BadParamFileError(DvcException):\n pass\n\n\nclass ParamsDependency(Dependency):\n PARAM_PARAMS = \"params\"\n PARAM_SCHEMA = {PARAM_PARAMS: Any(dict, list, None)}\n DEFAULT_PARAMS_FILE = \"params.yaml\"\n\n def __init__(self, stage, path, params=None, repo=None):\n self.params = list(params) if params else []\n hash_info = HashInfo()\n if isinstance(params, dict):\n hash_info = HashInfo(\n self.PARAM_PARAMS,\n params, # type: ignore[arg-type]\n )\n repo = repo or stage.repo\n path = path or os.path.join(repo.root_dir, self.DEFAULT_PARAMS_FILE)\n super().__init__(stage, path, repo=repo)\n self.hash_info = hash_info\n\n def dumpd(self, **kwargs):\n ret = super().dumpd()\n if not self.hash_info:\n ret[self.PARAM_PARAMS] = self.params or {}\n return ret\n\n def fill_values(self, values=None):\n \"\"\"Load params values dynamically.\"\"\"\n if values is None:\n return\n\n info = {}\n if not self.params:\n info.update(values)\n for param in self.params:\n if param in values:\n info[param] = values[param]\n self.hash_info = HashInfo(\n self.PARAM_PARAMS,\n info, # type: ignore[arg-type]\n )\n\n def read_params(\n self, flatten: bool = True, **kwargs: typing.Any\n ) -> Dict[str, typing.Any]:\n try:\n config = self.read_file()\n except MissingParamsFile:\n config = {}\n\n if not self.params:\n return config\n\n ret = {}\n if flatten:\n for param in self.params:\n try:\n ret[param] = dpath.util.get(config, param, separator=\".\")\n except KeyError:\n continue\n return ret\n\n from dpath.util import merge\n\n for param in self.params:\n merge(\n ret,\n dpath.util.search(config, param, separator=\".\"),\n separator=\".\",\n )\n return ret\n\n def workspace_status(self):\n if not self.exists:\n return {str(self): \"deleted\"}\n if self.hash_info.value is None:\n return {str(self): \"new\"}\n\n from funcy import ldistinct\n\n status: Dict[str, Any] = defaultdict(dict)\n info = cast(dict, self.hash_info.value) if self.hash_info else {}\n actual = self.read_params()\n\n # NOTE: we want to preserve the order of params as specified in the\n # status. In case of tracking the whole file, the order is top-level\n # keys in the file and then the keys in the `info` from `dvc.lock`\n # (which are alphabetically sorted).\n params = self.params or ldistinct([*actual.keys(), *info.keys()])\n for param in params:\n if param not in actual:\n st = \"deleted\"\n elif param not in info:\n st = \"new\"\n elif actual[param] != info[param]:\n if (\n isinstance(actual[param], tuple)\n and list(actual[param]) == info[param]\n ):\n continue\n else:\n st = \"modified\"\n else:\n continue\n\n status[str(self)][param] = st\n\n return status\n\n def status(self):\n return self.workspace_status()\n\n def validate_filepath(self):\n if not self.exists:\n raise MissingParamsFile(f\"Parameters file '{self}' does not exist\")\n if self.isdir():\n raise ParamsIsADirectoryError(\n f\"'{self}' is a directory, expected a parameters file\"\n )\n\n def read_file(self):\n self.validate_filepath()\n try:\n return load_path(self.fs_path, self.repo.fs)\n except ParseError as exc:\n raise BadParamFileError(\n f\"Unable to read parameters from '{self}'\"\n ) from exc\n\n def get_hash(self):\n info = self.read_params()\n\n missing_params = set(self.params) - set(info.keys())\n if missing_params:\n raise MissingParamsError(\n \"Parameters '{}' are missing from '{}'.\".format(\n \", \".join(missing_params), self\n )\n )\n\n return HashInfo(self.PARAM_PARAMS, info) # type: ignore[arg-type]\n\n def save(self):\n if not self.exists:\n raise self.DoesNotExistError(self)\n\n if not self.isfile() and not self.isdir():\n raise self.IsNotFileOrDirError(self)\n\n self.ignore()\n self.hash_info = self.get_hash()\n", "path": "dvc/dependency/param.py"}]} | 1,941 | 151 |
gh_patches_debug_25469 | rasdani/github-patches | git_diff | ray-project__ray-9572 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[rllib] Train script overwrites framework field in experiment config argument
[rllib]
### What is the problem?
Framework setting passed to training script via config argument is ignored, because training script uses separate arguments for this field (`--torch`, `--eager`). If none of these args is passed to the script the framework is set to `tf`, regardless of options passed in `--config` argument.
### Expected behavior
Script should use default value for `framework` field only if it is not already present in `config` dict (it can be passed as cmd argument or be loaded from file).
### Reproduction (REQUIRED)
Run `rllib train --run DQN --env CartPole-v0 --config "{\"framework\": \"tfe\"}"` - script uses `tf` instead of `tfe`
- [x] I have verified my script runs in a clean environment and reproduces the issue.
- [x] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rllib/train.py`
Content:
```
1 #!/usr/bin/env python
2
3 import argparse
4 import os
5 from pathlib import Path
6 import yaml
7
8 import ray
9 from ray.cluster_utils import Cluster
10 from ray.tune.config_parser import make_parser
11 from ray.tune.result import DEFAULT_RESULTS_DIR
12 from ray.tune.resources import resources_to_json
13 from ray.tune.tune import _make_scheduler, run_experiments
14 from ray.rllib.utils.framework import try_import_tf, try_import_torch
15
16 # Try to import both backends for flag checking/warnings.
17 tf1, tf, tfv = try_import_tf()
18 torch, _ = try_import_torch()
19
20 EXAMPLE_USAGE = """
21 Training example via RLlib CLI:
22 rllib train --run DQN --env CartPole-v0
23
24 Grid search example via RLlib CLI:
25 rllib train -f tuned_examples/cartpole-grid-search-example.yaml
26
27 Grid search example via executable:
28 ./train.py -f tuned_examples/cartpole-grid-search-example.yaml
29
30 Note that -f overrides all other trial-specific command-line options.
31 """
32
33
34 def create_parser(parser_creator=None):
35 parser = make_parser(
36 parser_creator=parser_creator,
37 formatter_class=argparse.RawDescriptionHelpFormatter,
38 description="Train a reinforcement learning agent.",
39 epilog=EXAMPLE_USAGE)
40
41 # See also the base parser definition in ray/tune/config_parser.py
42 parser.add_argument(
43 "--ray-address",
44 default=None,
45 type=str,
46 help="Connect to an existing Ray cluster at this address instead "
47 "of starting a new one.")
48 parser.add_argument(
49 "--no-ray-ui",
50 action="store_true",
51 help="Whether to disable the Ray web ui.")
52 parser.add_argument(
53 "--local-mode",
54 action="store_true",
55 help="Whether to run ray with `local_mode=True`. "
56 "Only if --ray-num-nodes is not used.")
57 parser.add_argument(
58 "--ray-num-cpus",
59 default=None,
60 type=int,
61 help="--num-cpus to use if starting a new cluster.")
62 parser.add_argument(
63 "--ray-num-gpus",
64 default=None,
65 type=int,
66 help="--num-gpus to use if starting a new cluster.")
67 parser.add_argument(
68 "--ray-num-nodes",
69 default=None,
70 type=int,
71 help="Emulate multiple cluster nodes for debugging.")
72 parser.add_argument(
73 "--ray-redis-max-memory",
74 default=None,
75 type=int,
76 help="--redis-max-memory to use if starting a new cluster.")
77 parser.add_argument(
78 "--ray-memory",
79 default=None,
80 type=int,
81 help="--memory to use if starting a new cluster.")
82 parser.add_argument(
83 "--ray-object-store-memory",
84 default=None,
85 type=int,
86 help="--object-store-memory to use if starting a new cluster.")
87 parser.add_argument(
88 "--experiment-name",
89 default="default",
90 type=str,
91 help="Name of the subdirectory under `local_dir` to put results in.")
92 parser.add_argument(
93 "--local-dir",
94 default=DEFAULT_RESULTS_DIR,
95 type=str,
96 help="Local dir to save training results to. Defaults to '{}'.".format(
97 DEFAULT_RESULTS_DIR))
98 parser.add_argument(
99 "--upload-dir",
100 default="",
101 type=str,
102 help="Optional URI to sync training results to (e.g. s3://bucket).")
103 parser.add_argument(
104 "-v", action="store_true", help="Whether to use INFO level logging.")
105 parser.add_argument(
106 "-vv", action="store_true", help="Whether to use DEBUG level logging.")
107 parser.add_argument(
108 "--resume",
109 action="store_true",
110 help="Whether to attempt to resume previous Tune experiments.")
111 parser.add_argument(
112 "--torch",
113 action="store_true",
114 help="Whether to use PyTorch (instead of tf) as the DL framework.")
115 parser.add_argument(
116 "--eager",
117 action="store_true",
118 help="Whether to attempt to enable TF eager execution.")
119 parser.add_argument(
120 "--trace",
121 action="store_true",
122 help="Whether to attempt to enable tracing for eager mode.")
123 parser.add_argument(
124 "--env", default=None, type=str, help="The gym environment to use.")
125 parser.add_argument(
126 "--queue-trials",
127 action="store_true",
128 help=(
129 "Whether to queue trials when the cluster does not currently have "
130 "enough resources to launch one. This should be set to True when "
131 "running on an autoscaling cluster to enable automatic scale-up."))
132 parser.add_argument(
133 "-f",
134 "--config-file",
135 default=None,
136 type=str,
137 help="If specified, use config options from this file. Note that this "
138 "overrides any trial-specific options set via flags above.")
139 return parser
140
141
142 def run(args, parser):
143 if args.config_file:
144 with open(args.config_file) as f:
145 experiments = yaml.safe_load(f)
146 else:
147 # Note: keep this in sync with tune/config_parser.py
148 experiments = {
149 args.experiment_name: { # i.e. log to ~/ray_results/default
150 "run": args.run,
151 "checkpoint_freq": args.checkpoint_freq,
152 "checkpoint_at_end": args.checkpoint_at_end,
153 "keep_checkpoints_num": args.keep_checkpoints_num,
154 "checkpoint_score_attr": args.checkpoint_score_attr,
155 "local_dir": args.local_dir,
156 "resources_per_trial": (
157 args.resources_per_trial and
158 resources_to_json(args.resources_per_trial)),
159 "stop": args.stop,
160 "config": dict(args.config, env=args.env),
161 "restore": args.restore,
162 "num_samples": args.num_samples,
163 "upload_dir": args.upload_dir,
164 }
165 }
166
167 verbose = 1
168 for exp in experiments.values():
169 # Bazel makes it hard to find files specified in `args` (and `data`).
170 # Look for them here.
171 # NOTE: Some of our yaml files don't have a `config` section.
172 if exp.get("config", {}).get("input") and \
173 not os.path.exists(exp["config"]["input"]):
174 # This script runs in the ray/rllib dir.
175 rllib_dir = Path(__file__).parent
176 input_file = rllib_dir.absolute().joinpath(exp["config"]["input"])
177 exp["config"]["input"] = str(input_file)
178
179 if not exp.get("run"):
180 parser.error("the following arguments are required: --run")
181 if not exp.get("env") and not exp.get("config", {}).get("env"):
182 parser.error("the following arguments are required: --env")
183 if args.eager:
184 exp["config"]["framework"] = "tfe"
185 elif args.torch:
186 exp["config"]["framework"] = "torch"
187 else:
188 exp["config"]["framework"] = "tf"
189 if args.v:
190 exp["config"]["log_level"] = "INFO"
191 verbose = 2
192 if args.vv:
193 exp["config"]["log_level"] = "DEBUG"
194 verbose = 3
195 if args.trace:
196 if exp["config"]["framework"] != "tfe":
197 raise ValueError("Must enable --eager to enable tracing.")
198 exp["config"]["eager_tracing"] = True
199
200 if args.ray_num_nodes:
201 cluster = Cluster()
202 for _ in range(args.ray_num_nodes):
203 cluster.add_node(
204 num_cpus=args.ray_num_cpus or 1,
205 num_gpus=args.ray_num_gpus or 0,
206 object_store_memory=args.ray_object_store_memory,
207 memory=args.ray_memory,
208 redis_max_memory=args.ray_redis_max_memory)
209 ray.init(address=cluster.address)
210 else:
211 ray.init(
212 include_dashboard=not args.no_ray_ui,
213 address=args.ray_address,
214 object_store_memory=args.ray_object_store_memory,
215 memory=args.ray_memory,
216 redis_max_memory=args.ray_redis_max_memory,
217 num_cpus=args.ray_num_cpus,
218 num_gpus=args.ray_num_gpus,
219 local_mode=args.local_mode)
220
221 run_experiments(
222 experiments,
223 scheduler=_make_scheduler(args),
224 queue_trials=args.queue_trials,
225 resume=args.resume,
226 verbose=verbose,
227 concurrent=True)
228
229 ray.shutdown()
230
231
232 if __name__ == "__main__":
233 parser = create_parser()
234 args = parser.parse_args()
235 run(args, parser)
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rllib/train.py b/rllib/train.py
--- a/rllib/train.py
+++ b/rllib/train.py
@@ -180,22 +180,23 @@
parser.error("the following arguments are required: --run")
if not exp.get("env") and not exp.get("config", {}).get("env"):
parser.error("the following arguments are required: --env")
- if args.eager:
- exp["config"]["framework"] = "tfe"
- elif args.torch:
+
+ if args.torch:
exp["config"]["framework"] = "torch"
- else:
- exp["config"]["framework"] = "tf"
+ elif args.eager:
+ exp["config"]["framework"] = "tfe"
+
+ if args.trace:
+ if exp["config"]["framework"] not in ["tf2", "tfe"]:
+ raise ValueError("Must enable --eager to enable tracing.")
+ exp["config"]["eager_tracing"] = True
+
if args.v:
exp["config"]["log_level"] = "INFO"
verbose = 2
if args.vv:
exp["config"]["log_level"] = "DEBUG"
verbose = 3
- if args.trace:
- if exp["config"]["framework"] != "tfe":
- raise ValueError("Must enable --eager to enable tracing.")
- exp["config"]["eager_tracing"] = True
if args.ray_num_nodes:
cluster = Cluster()
| {"golden_diff": "diff --git a/rllib/train.py b/rllib/train.py\n--- a/rllib/train.py\n+++ b/rllib/train.py\n@@ -180,22 +180,23 @@\n parser.error(\"the following arguments are required: --run\")\n if not exp.get(\"env\") and not exp.get(\"config\", {}).get(\"env\"):\n parser.error(\"the following arguments are required: --env\")\n- if args.eager:\n- exp[\"config\"][\"framework\"] = \"tfe\"\n- elif args.torch:\n+\n+ if args.torch:\n exp[\"config\"][\"framework\"] = \"torch\"\n- else:\n- exp[\"config\"][\"framework\"] = \"tf\"\n+ elif args.eager:\n+ exp[\"config\"][\"framework\"] = \"tfe\"\n+\n+ if args.trace:\n+ if exp[\"config\"][\"framework\"] not in [\"tf2\", \"tfe\"]:\n+ raise ValueError(\"Must enable --eager to enable tracing.\")\n+ exp[\"config\"][\"eager_tracing\"] = True\n+\n if args.v:\n exp[\"config\"][\"log_level\"] = \"INFO\"\n verbose = 2\n if args.vv:\n exp[\"config\"][\"log_level\"] = \"DEBUG\"\n verbose = 3\n- if args.trace:\n- if exp[\"config\"][\"framework\"] != \"tfe\":\n- raise ValueError(\"Must enable --eager to enable tracing.\")\n- exp[\"config\"][\"eager_tracing\"] = True\n \n if args.ray_num_nodes:\n cluster = Cluster()\n", "issue": "[rllib] Train script overwrites framework field in experiment config argument\n[rllib]\r\n\r\n### What is the problem?\r\n\r\nFramework setting passed to training script via config argument is ignored, because training script uses separate arguments for this field (`--torch`, `--eager`). If none of these args is passed to the script the framework is set to `tf`, regardless of options passed in `--config` argument.\r\n\r\n### Expected behavior\r\n\r\nScript should use default value for `framework` field only if it is not already present in `config` dict (it can be passed as cmd argument or be loaded from file).\r\n\r\n### Reproduction (REQUIRED)\r\nRun `rllib train --run DQN --env CartPole-v0 --config \"{\\\"framework\\\": \\\"tfe\\\"}\"` - script uses `tf` instead of `tfe`\r\n\r\n- [x] I have verified my script runs in a clean environment and reproduces the issue.\r\n- [x] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport argparse\nimport os\nfrom pathlib import Path\nimport yaml\n\nimport ray\nfrom ray.cluster_utils import Cluster\nfrom ray.tune.config_parser import make_parser\nfrom ray.tune.result import DEFAULT_RESULTS_DIR\nfrom ray.tune.resources import resources_to_json\nfrom ray.tune.tune import _make_scheduler, run_experiments\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\n\n# Try to import both backends for flag checking/warnings.\ntf1, tf, tfv = try_import_tf()\ntorch, _ = try_import_torch()\n\nEXAMPLE_USAGE = \"\"\"\nTraining example via RLlib CLI:\n rllib train --run DQN --env CartPole-v0\n\nGrid search example via RLlib CLI:\n rllib train -f tuned_examples/cartpole-grid-search-example.yaml\n\nGrid search example via executable:\n ./train.py -f tuned_examples/cartpole-grid-search-example.yaml\n\nNote that -f overrides all other trial-specific command-line options.\n\"\"\"\n\n\ndef create_parser(parser_creator=None):\n parser = make_parser(\n parser_creator=parser_creator,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Train a reinforcement learning agent.\",\n epilog=EXAMPLE_USAGE)\n\n # See also the base parser definition in ray/tune/config_parser.py\n parser.add_argument(\n \"--ray-address\",\n default=None,\n type=str,\n help=\"Connect to an existing Ray cluster at this address instead \"\n \"of starting a new one.\")\n parser.add_argument(\n \"--no-ray-ui\",\n action=\"store_true\",\n help=\"Whether to disable the Ray web ui.\")\n parser.add_argument(\n \"--local-mode\",\n action=\"store_true\",\n help=\"Whether to run ray with `local_mode=True`. \"\n \"Only if --ray-num-nodes is not used.\")\n parser.add_argument(\n \"--ray-num-cpus\",\n default=None,\n type=int,\n help=\"--num-cpus to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-num-gpus\",\n default=None,\n type=int,\n help=\"--num-gpus to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-num-nodes\",\n default=None,\n type=int,\n help=\"Emulate multiple cluster nodes for debugging.\")\n parser.add_argument(\n \"--ray-redis-max-memory\",\n default=None,\n type=int,\n help=\"--redis-max-memory to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-memory\",\n default=None,\n type=int,\n help=\"--memory to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-object-store-memory\",\n default=None,\n type=int,\n help=\"--object-store-memory to use if starting a new cluster.\")\n parser.add_argument(\n \"--experiment-name\",\n default=\"default\",\n type=str,\n help=\"Name of the subdirectory under `local_dir` to put results in.\")\n parser.add_argument(\n \"--local-dir\",\n default=DEFAULT_RESULTS_DIR,\n type=str,\n help=\"Local dir to save training results to. Defaults to '{}'.\".format(\n DEFAULT_RESULTS_DIR))\n parser.add_argument(\n \"--upload-dir\",\n default=\"\",\n type=str,\n help=\"Optional URI to sync training results to (e.g. s3://bucket).\")\n parser.add_argument(\n \"-v\", action=\"store_true\", help=\"Whether to use INFO level logging.\")\n parser.add_argument(\n \"-vv\", action=\"store_true\", help=\"Whether to use DEBUG level logging.\")\n parser.add_argument(\n \"--resume\",\n action=\"store_true\",\n help=\"Whether to attempt to resume previous Tune experiments.\")\n parser.add_argument(\n \"--torch\",\n action=\"store_true\",\n help=\"Whether to use PyTorch (instead of tf) as the DL framework.\")\n parser.add_argument(\n \"--eager\",\n action=\"store_true\",\n help=\"Whether to attempt to enable TF eager execution.\")\n parser.add_argument(\n \"--trace\",\n action=\"store_true\",\n help=\"Whether to attempt to enable tracing for eager mode.\")\n parser.add_argument(\n \"--env\", default=None, type=str, help=\"The gym environment to use.\")\n parser.add_argument(\n \"--queue-trials\",\n action=\"store_true\",\n help=(\n \"Whether to queue trials when the cluster does not currently have \"\n \"enough resources to launch one. This should be set to True when \"\n \"running on an autoscaling cluster to enable automatic scale-up.\"))\n parser.add_argument(\n \"-f\",\n \"--config-file\",\n default=None,\n type=str,\n help=\"If specified, use config options from this file. Note that this \"\n \"overrides any trial-specific options set via flags above.\")\n return parser\n\n\ndef run(args, parser):\n if args.config_file:\n with open(args.config_file) as f:\n experiments = yaml.safe_load(f)\n else:\n # Note: keep this in sync with tune/config_parser.py\n experiments = {\n args.experiment_name: { # i.e. log to ~/ray_results/default\n \"run\": args.run,\n \"checkpoint_freq\": args.checkpoint_freq,\n \"checkpoint_at_end\": args.checkpoint_at_end,\n \"keep_checkpoints_num\": args.keep_checkpoints_num,\n \"checkpoint_score_attr\": args.checkpoint_score_attr,\n \"local_dir\": args.local_dir,\n \"resources_per_trial\": (\n args.resources_per_trial and\n resources_to_json(args.resources_per_trial)),\n \"stop\": args.stop,\n \"config\": dict(args.config, env=args.env),\n \"restore\": args.restore,\n \"num_samples\": args.num_samples,\n \"upload_dir\": args.upload_dir,\n }\n }\n\n verbose = 1\n for exp in experiments.values():\n # Bazel makes it hard to find files specified in `args` (and `data`).\n # Look for them here.\n # NOTE: Some of our yaml files don't have a `config` section.\n if exp.get(\"config\", {}).get(\"input\") and \\\n not os.path.exists(exp[\"config\"][\"input\"]):\n # This script runs in the ray/rllib dir.\n rllib_dir = Path(__file__).parent\n input_file = rllib_dir.absolute().joinpath(exp[\"config\"][\"input\"])\n exp[\"config\"][\"input\"] = str(input_file)\n\n if not exp.get(\"run\"):\n parser.error(\"the following arguments are required: --run\")\n if not exp.get(\"env\") and not exp.get(\"config\", {}).get(\"env\"):\n parser.error(\"the following arguments are required: --env\")\n if args.eager:\n exp[\"config\"][\"framework\"] = \"tfe\"\n elif args.torch:\n exp[\"config\"][\"framework\"] = \"torch\"\n else:\n exp[\"config\"][\"framework\"] = \"tf\"\n if args.v:\n exp[\"config\"][\"log_level\"] = \"INFO\"\n verbose = 2\n if args.vv:\n exp[\"config\"][\"log_level\"] = \"DEBUG\"\n verbose = 3\n if args.trace:\n if exp[\"config\"][\"framework\"] != \"tfe\":\n raise ValueError(\"Must enable --eager to enable tracing.\")\n exp[\"config\"][\"eager_tracing\"] = True\n\n if args.ray_num_nodes:\n cluster = Cluster()\n for _ in range(args.ray_num_nodes):\n cluster.add_node(\n num_cpus=args.ray_num_cpus or 1,\n num_gpus=args.ray_num_gpus or 0,\n object_store_memory=args.ray_object_store_memory,\n memory=args.ray_memory,\n redis_max_memory=args.ray_redis_max_memory)\n ray.init(address=cluster.address)\n else:\n ray.init(\n include_dashboard=not args.no_ray_ui,\n address=args.ray_address,\n object_store_memory=args.ray_object_store_memory,\n memory=args.ray_memory,\n redis_max_memory=args.ray_redis_max_memory,\n num_cpus=args.ray_num_cpus,\n num_gpus=args.ray_num_gpus,\n local_mode=args.local_mode)\n\n run_experiments(\n experiments,\n scheduler=_make_scheduler(args),\n queue_trials=args.queue_trials,\n resume=args.resume,\n verbose=verbose,\n concurrent=True)\n\n ray.shutdown()\n\n\nif __name__ == \"__main__\":\n parser = create_parser()\n args = parser.parse_args()\n run(args, parser)\n", "path": "rllib/train.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport argparse\nimport os\nfrom pathlib import Path\nimport yaml\n\nimport ray\nfrom ray.cluster_utils import Cluster\nfrom ray.tune.config_parser import make_parser\nfrom ray.tune.result import DEFAULT_RESULTS_DIR\nfrom ray.tune.resources import resources_to_json\nfrom ray.tune.tune import _make_scheduler, run_experiments\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\n\n# Try to import both backends for flag checking/warnings.\ntf1, tf, tfv = try_import_tf()\ntorch, _ = try_import_torch()\n\nEXAMPLE_USAGE = \"\"\"\nTraining example via RLlib CLI:\n rllib train --run DQN --env CartPole-v0\n\nGrid search example via RLlib CLI:\n rllib train -f tuned_examples/cartpole-grid-search-example.yaml\n\nGrid search example via executable:\n ./train.py -f tuned_examples/cartpole-grid-search-example.yaml\n\nNote that -f overrides all other trial-specific command-line options.\n\"\"\"\n\n\ndef create_parser(parser_creator=None):\n parser = make_parser(\n parser_creator=parser_creator,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Train a reinforcement learning agent.\",\n epilog=EXAMPLE_USAGE)\n\n # See also the base parser definition in ray/tune/config_parser.py\n parser.add_argument(\n \"--ray-address\",\n default=None,\n type=str,\n help=\"Connect to an existing Ray cluster at this address instead \"\n \"of starting a new one.\")\n parser.add_argument(\n \"--no-ray-ui\",\n action=\"store_true\",\n help=\"Whether to disable the Ray web ui.\")\n parser.add_argument(\n \"--local-mode\",\n action=\"store_true\",\n help=\"Whether to run ray with `local_mode=True`. \"\n \"Only if --ray-num-nodes is not used.\")\n parser.add_argument(\n \"--ray-num-cpus\",\n default=None,\n type=int,\n help=\"--num-cpus to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-num-gpus\",\n default=None,\n type=int,\n help=\"--num-gpus to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-num-nodes\",\n default=None,\n type=int,\n help=\"Emulate multiple cluster nodes for debugging.\")\n parser.add_argument(\n \"--ray-redis-max-memory\",\n default=None,\n type=int,\n help=\"--redis-max-memory to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-memory\",\n default=None,\n type=int,\n help=\"--memory to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-object-store-memory\",\n default=None,\n type=int,\n help=\"--object-store-memory to use if starting a new cluster.\")\n parser.add_argument(\n \"--experiment-name\",\n default=\"default\",\n type=str,\n help=\"Name of the subdirectory under `local_dir` to put results in.\")\n parser.add_argument(\n \"--local-dir\",\n default=DEFAULT_RESULTS_DIR,\n type=str,\n help=\"Local dir to save training results to. Defaults to '{}'.\".format(\n DEFAULT_RESULTS_DIR))\n parser.add_argument(\n \"--upload-dir\",\n default=\"\",\n type=str,\n help=\"Optional URI to sync training results to (e.g. s3://bucket).\")\n parser.add_argument(\n \"-v\", action=\"store_true\", help=\"Whether to use INFO level logging.\")\n parser.add_argument(\n \"-vv\", action=\"store_true\", help=\"Whether to use DEBUG level logging.\")\n parser.add_argument(\n \"--resume\",\n action=\"store_true\",\n help=\"Whether to attempt to resume previous Tune experiments.\")\n parser.add_argument(\n \"--torch\",\n action=\"store_true\",\n help=\"Whether to use PyTorch (instead of tf) as the DL framework.\")\n parser.add_argument(\n \"--eager\",\n action=\"store_true\",\n help=\"Whether to attempt to enable TF eager execution.\")\n parser.add_argument(\n \"--trace\",\n action=\"store_true\",\n help=\"Whether to attempt to enable tracing for eager mode.\")\n parser.add_argument(\n \"--env\", default=None, type=str, help=\"The gym environment to use.\")\n parser.add_argument(\n \"--queue-trials\",\n action=\"store_true\",\n help=(\n \"Whether to queue trials when the cluster does not currently have \"\n \"enough resources to launch one. This should be set to True when \"\n \"running on an autoscaling cluster to enable automatic scale-up.\"))\n parser.add_argument(\n \"-f\",\n \"--config-file\",\n default=None,\n type=str,\n help=\"If specified, use config options from this file. Note that this \"\n \"overrides any trial-specific options set via flags above.\")\n return parser\n\n\ndef run(args, parser):\n if args.config_file:\n with open(args.config_file) as f:\n experiments = yaml.safe_load(f)\n else:\n # Note: keep this in sync with tune/config_parser.py\n experiments = {\n args.experiment_name: { # i.e. log to ~/ray_results/default\n \"run\": args.run,\n \"checkpoint_freq\": args.checkpoint_freq,\n \"checkpoint_at_end\": args.checkpoint_at_end,\n \"keep_checkpoints_num\": args.keep_checkpoints_num,\n \"checkpoint_score_attr\": args.checkpoint_score_attr,\n \"local_dir\": args.local_dir,\n \"resources_per_trial\": (\n args.resources_per_trial and\n resources_to_json(args.resources_per_trial)),\n \"stop\": args.stop,\n \"config\": dict(args.config, env=args.env),\n \"restore\": args.restore,\n \"num_samples\": args.num_samples,\n \"upload_dir\": args.upload_dir,\n }\n }\n\n verbose = 1\n for exp in experiments.values():\n # Bazel makes it hard to find files specified in `args` (and `data`).\n # Look for them here.\n # NOTE: Some of our yaml files don't have a `config` section.\n if exp.get(\"config\", {}).get(\"input\") and \\\n not os.path.exists(exp[\"config\"][\"input\"]):\n # This script runs in the ray/rllib dir.\n rllib_dir = Path(__file__).parent\n input_file = rllib_dir.absolute().joinpath(exp[\"config\"][\"input\"])\n exp[\"config\"][\"input\"] = str(input_file)\n\n if not exp.get(\"run\"):\n parser.error(\"the following arguments are required: --run\")\n if not exp.get(\"env\") and not exp.get(\"config\", {}).get(\"env\"):\n parser.error(\"the following arguments are required: --env\")\n\n if args.torch:\n exp[\"config\"][\"framework\"] = \"torch\"\n elif args.eager:\n exp[\"config\"][\"framework\"] = \"tfe\"\n\n if args.trace:\n if exp[\"config\"][\"framework\"] not in [\"tf2\", \"tfe\"]:\n raise ValueError(\"Must enable --eager to enable tracing.\")\n exp[\"config\"][\"eager_tracing\"] = True\n\n if args.v:\n exp[\"config\"][\"log_level\"] = \"INFO\"\n verbose = 2\n if args.vv:\n exp[\"config\"][\"log_level\"] = \"DEBUG\"\n verbose = 3\n\n if args.ray_num_nodes:\n cluster = Cluster()\n for _ in range(args.ray_num_nodes):\n cluster.add_node(\n num_cpus=args.ray_num_cpus or 1,\n num_gpus=args.ray_num_gpus or 0,\n object_store_memory=args.ray_object_store_memory,\n memory=args.ray_memory,\n redis_max_memory=args.ray_redis_max_memory)\n ray.init(address=cluster.address)\n else:\n ray.init(\n include_dashboard=not args.no_ray_ui,\n address=args.ray_address,\n object_store_memory=args.ray_object_store_memory,\n memory=args.ray_memory,\n redis_max_memory=args.ray_redis_max_memory,\n num_cpus=args.ray_num_cpus,\n num_gpus=args.ray_num_gpus,\n local_mode=args.local_mode)\n\n run_experiments(\n experiments,\n scheduler=_make_scheduler(args),\n queue_trials=args.queue_trials,\n resume=args.resume,\n verbose=verbose,\n concurrent=True)\n\n ray.shutdown()\n\n\nif __name__ == \"__main__\":\n parser = create_parser()\n args = parser.parse_args()\n run(args, parser)\n", "path": "rllib/train.py"}]} | 2,880 | 335 |
gh_patches_debug_23344 | rasdani/github-patches | git_diff | ultrabug__py3status-2161 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Undocumented requirement of dateutil
**Describe the bug**
The timewarrior module, amongst others, depend on the dateutil module, but it is not described as a requirement:
https://github.com/ultrabug/py3status/blob/786d1b2f8d9c9196664b3fe60970968af48114e3/py3status/modules/timewarrior.py#L143
https://github.com/ultrabug/py3status/blob/786d1b2f8d9c9196664b3fe60970968af48114e3/py3status/modules/timewarrior.py#L56-L57
**Your py3status version**
I found it here:
```
py3status version 3.43 (python 3.10.4) on i3
``
**To Reproduce**
Steps to reproduce the behavior along with your py3status configuration (filter out any private values):
1. Try to use py3status with timewarrior without dateutil installed. This'll happen on nixos
2. See the error when clicking the timewarrior module on your bar
**Expected behavior**
I expect dateutil to be declared as a requirement for the timewarrior module
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py3status/modules/timewarrior.py`
Content:
```
1 r"""
2 Track your time with Timewarrior.
3
4 Timewarrior is a time tracking utility that offers simple stopwatch features
5 as well as sophisticated calendar-base backfill, along with flexible reporting.
6 See https://taskwarrior.org/docs/timewarrior for more information.
7
8 Configuration parameters:
9 cache_timeout: refresh interval for this module, otherwise auto
10 (default None)
11 filter: specify interval and/or tag to filter (default '1day')
12 format: display format for this module
13 (default '[Timew {format_time}]|No Timew')
14 format_datetime: specify strftime characters to format (default {})
15 format_duration: display format for time duration
16 (default '\?not_zero [{days}d ][{hours}:]{minutes}:{seconds}')
17 format_tag: display format for tags (default '\?color=state_tag {name}')
18 format_tag_separator: show separator if more than one (default ' ')
19 format_time: display format for tracked times
20 (default '[\?color=state_time [{format_tag} ]{format_duration}]')
21 format_time_separator: show separator if more than one (default ' ')
22 thresholds: specify color thresholds to use
23 *(default {'state_tag': [(0, 'darkgray'), (1, 'darkgray')],
24 'state_time': [(0, 'darkgray'), (1, 'degraded')]})*
25
26 Format placeholders:
27 {format_time} format for tracked times
28 {tracking} time tracking state, eg False, True
29
30 format_time placeholders:
31 {state} time tracking state, eg False, True
32 {format_tag} format for tags
33 {format_duration} format for time duration
34 {start} start date, eg 20171021T010203Z
35 {end} end date, eg 20171021T010203Z
36
37 format_tag placeholders:
38 {name} tag name, eg gaming, studying, gardening
39
40 format_datetime placeholders:
41 key: start, end
42 value: strftime characters, eg '%b %d' ----> 'Oct 06'
43
44 format_duration placeholders:
45 {days} days
46 {hours} hours
47 {minutes} minutes
48 {seconds} seconds
49
50 Color thresholds:
51 format_time:
52 state_time: print color based on the state of time tracking
53 format_tag:
54 state_tag: print color based on the state of time tracking
55
56 Requires:
57 timew: feature-rich time tracking utility
58
59 Recommendations:
60 We can refresh a module using `py3-cmd` command.
61 An excellent example of using this command in a function.
62
63 ```
64 ~/.{bash,zsh}{rc,_profile}
65 ---------------------------
66 function timew () {
67 command timew "$@" && py3-cmd refresh timewarrior
68 }
69 ```
70
71 With this, you can consider giving `cache_timeout` a much larger number,
72 eg 3600 (an hour), so the module does not need to be updated that often.
73
74 Examples:
75 ```
76 # show times matching the filter, see documentation for more filters
77 timewarrior {
78 filter = ':day' # filter times not in 24 hours of current day
79 filter = '12hours' # filter times not in 12 hours of current time
80 filter = '5min' # filter times not in 5 minutes of current time
81 filter = '1sec' # filter times not in 1 second of current time
82 filter = '5pm to 11:59pm # filter times not in 5pm to 11:59pm range
83 }
84
85 # intervals
86 timewarrior {
87 # if you are printing other intervals too with '1day' filter or so,
88 # then you may want to add this too for better bar readability
89 format_time_separator = ', '
90
91 # you also can change the thresholds with different colors
92 thresholds = {
93 'state_tag': [(0, 'darkgray'), (1, 'degraded')],
94 'state_time': [(0, 'darkgray'), (1, 'degraded')],
95 }
96 }
97
98 # cache_timeout
99 timewarrior {
100 # auto refresh every 10 seconds when there is no active time tracking
101 # auto refresh every second when there is active time tracking
102 cache_timeout = None
103
104 # refresh every minute when there is no active time tracking
105 # refresh every second when there is active time tracking
106 cache_timeout = 60
107
108 # explicit refresh every 20 seconds when there is no active time tracking
109 # explicit refresh every 5 seconds when there is active time tracking
110 cache_timeout = (20, 5)
111 }
112
113 # add your snippets here
114 timewarrior {
115 format = "..."
116 }
117 ```
118
119 @author lasers
120
121 SAMPLE OUTPUT
122 [
123 {'full_text': 'Timew '},
124 {'full_text': 'gaming ', 'color': '#a9a9a9'},
125 {'full_text': '15:02 ', 'color': '#a9a9a9'},
126 {'full_text': 'studying ', 'color': '#a9a9a9'},
127 {'full_text': '03:42', 'color': '#ffff00'}
128 ]
129
130 no_tag
131 [
132 {'full_text': 'Timew '},
133 {'full_text': 'gardening ', 'color': '#a9a9a9'},
134 {'full_text': '20:37', 'color': '#ffff00'}
135 ]
136
137 no_timew
138 {'full_text': 'No Timew'}
139 """
140
141 from json import loads as json_loads
142 import datetime as dt
143 from dateutil.relativedelta import relativedelta
144
145 STRING_NOT_INSTALLED = "not installed"
146 DATETIME = "%Y%m%dT%H%M%SZ"
147 STRING_INVALID_TIMEOUT = "invalid cache_timeout"
148
149
150 class Py3status:
151 """"""
152
153 # available configuration parameters
154 cache_timeout = None
155 filter = "1day"
156 format = "[Timew {format_time}]|No Timew"
157 format_datetime = {}
158 format_duration = r"\?not_zero [{days}d ][{hours}:]{minutes}:{seconds}"
159 format_tag = r"\?color=state_tag {name}"
160 format_tag_separator = " "
161 format_time = r"[\?color=state_time [{format_tag} ]{format_duration}]"
162 format_time_separator = " "
163 thresholds = {
164 "state_tag": [(0, "darkgray"), (1, "darkgray")],
165 "state_time": [(0, "darkgray"), (1, "degraded")],
166 }
167
168 class Meta:
169 update_config = {
170 "update_placeholder_format": [
171 {
172 "placeholder_formats": {"minutes": ":02d", "seconds": ":02d"},
173 "format_strings": ["format_duration"],
174 }
175 ]
176 }
177
178 def post_config_hook(self):
179 if not self.py3.check_commands("timew"):
180 raise Exception(STRING_NOT_INSTALLED)
181
182 if self.cache_timeout is None:
183 self.sleep_timeout = 10
184 self.cache_timeout = 0
185 elif isinstance(self.cache_timeout, tuple):
186 if len(self.cache_timeout) != 2:
187 raise Exception(STRING_INVALID_TIMEOUT)
188 self.sleep_timeout = self.cache_timeout[0]
189 self.cache_timeout = self.cache_timeout[1]
190 elif isinstance(self.cache_timeout, int):
191 self.sleep_timeout = self.cache_timeout
192 self.cache_timeout = 0
193
194 self.timewarrior_command = "timew export"
195 if self.filter:
196 self.timewarrior_command += f" {self.filter}"
197
198 self.init = {"datetimes": []}
199 for word in ["start", "end"]:
200 if (self.py3.format_contains(self.format_time, word)) and (
201 word in self.format_datetime
202 ):
203 self.init["datetimes"].append(word)
204
205 self.tracking = None
206 self.thresholds_init = {}
207 for name in ("format", "format_tag", "format_time"):
208 self.thresholds_init[name] = self.py3.get_color_names_list(
209 getattr(self, name)
210 )
211
212 def _get_timewarrior_data(self):
213 return json_loads(self.py3.command_output(self.timewarrior_command))
214
215 def _manipulate(self, data):
216 new_time = []
217 self.tracking = False
218
219 for i, time in enumerate(data):
220 time["index"] = len(data) - i
221 time["state_time"] = "end" not in time
222
223 # tags
224 new_tag = []
225 time["tags"] = time.get("tags", [])
226 for tag_name in time["tags"]:
227 tag_data = {"name": tag_name, "state_tag": time["state_time"]}
228 for x in self.thresholds_init["format_tag"]:
229 if x in tag_data:
230 self.py3.threshold_get_color(tag_data[x], x)
231 new_tag.append(self.py3.safe_format(self.format_tag, tag_data))
232
233 format_tag_separator = self.py3.safe_format(self.format_tag_separator)
234 format_tag = self.py3.composite_join(format_tag_separator, new_tag)
235
236 time["format_tag"] = format_tag
237 del time["tags"]
238
239 # duraton
240 if time["state_time"]:
241 self.tracking = True
242 end = dt.datetime.utcnow()
243 else:
244 end = dt.datetime.strptime(time["end"], DATETIME)
245
246 start = dt.datetime.strptime(time["start"], DATETIME)
247 duration = relativedelta(end, start)
248
249 time["format_duration"] = self.py3.safe_format(
250 self.format_duration,
251 {
252 "days": duration.days,
253 "hours": duration.hours,
254 "minutes": duration.minutes,
255 "seconds": duration.seconds,
256 },
257 )
258
259 # datetime
260 for word in self.init["datetimes"]:
261 if word in time:
262 time[word] = self.py3.safe_format(
263 dt.datetime.strftime(
264 dt.datetime.strptime(time[word], DATETIME),
265 self.format_datetime[word],
266 )
267 )
268
269 # time
270 for x in self.thresholds_init["format_time"]:
271 if x in time:
272 self.py3.threshold_get_color(time[x], x)
273
274 new_time.append(self.py3.safe_format(self.format_time, time))
275
276 format_time_separator = self.py3.safe_format(self.format_time_separator)
277 format_time = self.py3.composite_join(format_time_separator, new_time)
278 return format_time
279
280 def timewarrior(self):
281 timewarrior_data = self._get_timewarrior_data()
282 format_time = self._manipulate(timewarrior_data)
283
284 if self.tracking:
285 cached_until = self.cache_timeout
286 else:
287 cached_until = self.sleep_timeout
288
289 timew_data = {"format_time": format_time, "tracking": self.tracking}
290
291 for x in self.thresholds_init["format"]:
292 if x in timew_data:
293 self.py3.threshold_get_color(timew_data[x], x)
294
295 return {
296 "cached_until": self.py3.time_in(cached_until),
297 "full_text": self.py3.safe_format(self.format, timew_data),
298 }
299
300
301 if __name__ == "__main__":
302 from py3status.module_test import module_test
303
304 module_test(Py3status)
305
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/py3status/modules/timewarrior.py b/py3status/modules/timewarrior.py
--- a/py3status/modules/timewarrior.py
+++ b/py3status/modules/timewarrior.py
@@ -140,7 +140,6 @@
from json import loads as json_loads
import datetime as dt
-from dateutil.relativedelta import relativedelta
STRING_NOT_INSTALLED = "not installed"
DATETIME = "%Y%m%dT%H%M%SZ"
@@ -244,15 +243,15 @@
end = dt.datetime.strptime(time["end"], DATETIME)
start = dt.datetime.strptime(time["start"], DATETIME)
- duration = relativedelta(end, start)
+ duration = end - start
time["format_duration"] = self.py3.safe_format(
self.format_duration,
{
"days": duration.days,
- "hours": duration.hours,
- "minutes": duration.minutes,
- "seconds": duration.seconds,
+ "hours": duration.seconds // (60 * 60),
+ "minutes": (duration.seconds // 60) % 60,
+ "seconds": duration.seconds % 60,
},
)
| {"golden_diff": "diff --git a/py3status/modules/timewarrior.py b/py3status/modules/timewarrior.py\n--- a/py3status/modules/timewarrior.py\n+++ b/py3status/modules/timewarrior.py\n@@ -140,7 +140,6 @@\n \n from json import loads as json_loads\n import datetime as dt\n-from dateutil.relativedelta import relativedelta\n \n STRING_NOT_INSTALLED = \"not installed\"\n DATETIME = \"%Y%m%dT%H%M%SZ\"\n@@ -244,15 +243,15 @@\n end = dt.datetime.strptime(time[\"end\"], DATETIME)\n \n start = dt.datetime.strptime(time[\"start\"], DATETIME)\n- duration = relativedelta(end, start)\n+ duration = end - start\n \n time[\"format_duration\"] = self.py3.safe_format(\n self.format_duration,\n {\n \"days\": duration.days,\n- \"hours\": duration.hours,\n- \"minutes\": duration.minutes,\n- \"seconds\": duration.seconds,\n+ \"hours\": duration.seconds // (60 * 60),\n+ \"minutes\": (duration.seconds // 60) % 60,\n+ \"seconds\": duration.seconds % 60,\n },\n )\n", "issue": "Undocumented requirement of dateutil\n**Describe the bug**\r\nThe timewarrior module, amongst others, depend on the dateutil module, but it is not described as a requirement:\r\nhttps://github.com/ultrabug/py3status/blob/786d1b2f8d9c9196664b3fe60970968af48114e3/py3status/modules/timewarrior.py#L143\r\n\r\nhttps://github.com/ultrabug/py3status/blob/786d1b2f8d9c9196664b3fe60970968af48114e3/py3status/modules/timewarrior.py#L56-L57\r\n\r\n**Your py3status version**\r\nI found it here:\r\n```\r\npy3status version 3.43 (python 3.10.4) on i3\r\n``\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior along with your py3status configuration (filter out any private values):\r\n1. Try to use py3status with timewarrior without dateutil installed. This'll happen on nixos\r\n2. See the error when clicking the timewarrior module on your bar\r\n\r\n**Expected behavior**\r\nI expect dateutil to be declared as a requirement for the timewarrior module\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "r\"\"\"\nTrack your time with Timewarrior.\n\nTimewarrior is a time tracking utility that offers simple stopwatch features\nas well as sophisticated calendar-base backfill, along with flexible reporting.\nSee https://taskwarrior.org/docs/timewarrior for more information.\n\nConfiguration parameters:\n cache_timeout: refresh interval for this module, otherwise auto\n (default None)\n filter: specify interval and/or tag to filter (default '1day')\n format: display format for this module\n (default '[Timew {format_time}]|No Timew')\n format_datetime: specify strftime characters to format (default {})\n format_duration: display format for time duration\n (default '\\?not_zero [{days}d ][{hours}:]{minutes}:{seconds}')\n format_tag: display format for tags (default '\\?color=state_tag {name}')\n format_tag_separator: show separator if more than one (default ' ')\n format_time: display format for tracked times\n (default '[\\?color=state_time [{format_tag} ]{format_duration}]')\n format_time_separator: show separator if more than one (default ' ')\n thresholds: specify color thresholds to use\n *(default {'state_tag': [(0, 'darkgray'), (1, 'darkgray')],\n 'state_time': [(0, 'darkgray'), (1, 'degraded')]})*\n\nFormat placeholders:\n {format_time} format for tracked times\n {tracking} time tracking state, eg False, True\n\nformat_time placeholders:\n {state} time tracking state, eg False, True\n {format_tag} format for tags\n {format_duration} format for time duration\n {start} start date, eg 20171021T010203Z\n {end} end date, eg 20171021T010203Z\n\nformat_tag placeholders:\n {name} tag name, eg gaming, studying, gardening\n\nformat_datetime placeholders:\n key: start, end\n value: strftime characters, eg '%b %d' ----> 'Oct 06'\n\nformat_duration placeholders:\n {days} days\n {hours} hours\n {minutes} minutes\n {seconds} seconds\n\nColor thresholds:\n format_time:\n state_time: print color based on the state of time tracking\n format_tag:\n state_tag: print color based on the state of time tracking\n\nRequires:\n timew: feature-rich time tracking utility\n\nRecommendations:\n We can refresh a module using `py3-cmd` command.\n An excellent example of using this command in a function.\n\n ```\n ~/.{bash,zsh}{rc,_profile}\n ---------------------------\n function timew () {\n command timew \"$@\" && py3-cmd refresh timewarrior\n }\n ```\n\n With this, you can consider giving `cache_timeout` a much larger number,\n eg 3600 (an hour), so the module does not need to be updated that often.\n\nExamples:\n```\n# show times matching the filter, see documentation for more filters\ntimewarrior {\n filter = ':day' # filter times not in 24 hours of current day\n filter = '12hours' # filter times not in 12 hours of current time\n filter = '5min' # filter times not in 5 minutes of current time\n filter = '1sec' # filter times not in 1 second of current time\n filter = '5pm to 11:59pm # filter times not in 5pm to 11:59pm range\n}\n\n# intervals\ntimewarrior {\n # if you are printing other intervals too with '1day' filter or so,\n # then you may want to add this too for better bar readability\n format_time_separator = ', '\n\n # you also can change the thresholds with different colors\n thresholds = {\n 'state_tag': [(0, 'darkgray'), (1, 'degraded')],\n 'state_time': [(0, 'darkgray'), (1, 'degraded')],\n }\n}\n\n# cache_timeout\ntimewarrior {\n # auto refresh every 10 seconds when there is no active time tracking\n # auto refresh every second when there is active time tracking\n cache_timeout = None\n\n # refresh every minute when there is no active time tracking\n # refresh every second when there is active time tracking\n cache_timeout = 60\n\n # explicit refresh every 20 seconds when there is no active time tracking\n # explicit refresh every 5 seconds when there is active time tracking\n cache_timeout = (20, 5)\n}\n\n# add your snippets here\ntimewarrior {\n format = \"...\"\n}\n```\n\n@author lasers\n\nSAMPLE OUTPUT\n[\n {'full_text': 'Timew '},\n {'full_text': 'gaming ', 'color': '#a9a9a9'},\n {'full_text': '15:02 ', 'color': '#a9a9a9'},\n {'full_text': 'studying ', 'color': '#a9a9a9'},\n {'full_text': '03:42', 'color': '#ffff00'}\n]\n\nno_tag\n[\n {'full_text': 'Timew '},\n {'full_text': 'gardening ', 'color': '#a9a9a9'},\n {'full_text': '20:37', 'color': '#ffff00'}\n]\n\nno_timew\n{'full_text': 'No Timew'}\n\"\"\"\n\nfrom json import loads as json_loads\nimport datetime as dt\nfrom dateutil.relativedelta import relativedelta\n\nSTRING_NOT_INSTALLED = \"not installed\"\nDATETIME = \"%Y%m%dT%H%M%SZ\"\nSTRING_INVALID_TIMEOUT = \"invalid cache_timeout\"\n\n\nclass Py3status:\n \"\"\"\"\"\"\n\n # available configuration parameters\n cache_timeout = None\n filter = \"1day\"\n format = \"[Timew {format_time}]|No Timew\"\n format_datetime = {}\n format_duration = r\"\\?not_zero [{days}d ][{hours}:]{minutes}:{seconds}\"\n format_tag = r\"\\?color=state_tag {name}\"\n format_tag_separator = \" \"\n format_time = r\"[\\?color=state_time [{format_tag} ]{format_duration}]\"\n format_time_separator = \" \"\n thresholds = {\n \"state_tag\": [(0, \"darkgray\"), (1, \"darkgray\")],\n \"state_time\": [(0, \"darkgray\"), (1, \"degraded\")],\n }\n\n class Meta:\n update_config = {\n \"update_placeholder_format\": [\n {\n \"placeholder_formats\": {\"minutes\": \":02d\", \"seconds\": \":02d\"},\n \"format_strings\": [\"format_duration\"],\n }\n ]\n }\n\n def post_config_hook(self):\n if not self.py3.check_commands(\"timew\"):\n raise Exception(STRING_NOT_INSTALLED)\n\n if self.cache_timeout is None:\n self.sleep_timeout = 10\n self.cache_timeout = 0\n elif isinstance(self.cache_timeout, tuple):\n if len(self.cache_timeout) != 2:\n raise Exception(STRING_INVALID_TIMEOUT)\n self.sleep_timeout = self.cache_timeout[0]\n self.cache_timeout = self.cache_timeout[1]\n elif isinstance(self.cache_timeout, int):\n self.sleep_timeout = self.cache_timeout\n self.cache_timeout = 0\n\n self.timewarrior_command = \"timew export\"\n if self.filter:\n self.timewarrior_command += f\" {self.filter}\"\n\n self.init = {\"datetimes\": []}\n for word in [\"start\", \"end\"]:\n if (self.py3.format_contains(self.format_time, word)) and (\n word in self.format_datetime\n ):\n self.init[\"datetimes\"].append(word)\n\n self.tracking = None\n self.thresholds_init = {}\n for name in (\"format\", \"format_tag\", \"format_time\"):\n self.thresholds_init[name] = self.py3.get_color_names_list(\n getattr(self, name)\n )\n\n def _get_timewarrior_data(self):\n return json_loads(self.py3.command_output(self.timewarrior_command))\n\n def _manipulate(self, data):\n new_time = []\n self.tracking = False\n\n for i, time in enumerate(data):\n time[\"index\"] = len(data) - i\n time[\"state_time\"] = \"end\" not in time\n\n # tags\n new_tag = []\n time[\"tags\"] = time.get(\"tags\", [])\n for tag_name in time[\"tags\"]:\n tag_data = {\"name\": tag_name, \"state_tag\": time[\"state_time\"]}\n for x in self.thresholds_init[\"format_tag\"]:\n if x in tag_data:\n self.py3.threshold_get_color(tag_data[x], x)\n new_tag.append(self.py3.safe_format(self.format_tag, tag_data))\n\n format_tag_separator = self.py3.safe_format(self.format_tag_separator)\n format_tag = self.py3.composite_join(format_tag_separator, new_tag)\n\n time[\"format_tag\"] = format_tag\n del time[\"tags\"]\n\n # duraton\n if time[\"state_time\"]:\n self.tracking = True\n end = dt.datetime.utcnow()\n else:\n end = dt.datetime.strptime(time[\"end\"], DATETIME)\n\n start = dt.datetime.strptime(time[\"start\"], DATETIME)\n duration = relativedelta(end, start)\n\n time[\"format_duration\"] = self.py3.safe_format(\n self.format_duration,\n {\n \"days\": duration.days,\n \"hours\": duration.hours,\n \"minutes\": duration.minutes,\n \"seconds\": duration.seconds,\n },\n )\n\n # datetime\n for word in self.init[\"datetimes\"]:\n if word in time:\n time[word] = self.py3.safe_format(\n dt.datetime.strftime(\n dt.datetime.strptime(time[word], DATETIME),\n self.format_datetime[word],\n )\n )\n\n # time\n for x in self.thresholds_init[\"format_time\"]:\n if x in time:\n self.py3.threshold_get_color(time[x], x)\n\n new_time.append(self.py3.safe_format(self.format_time, time))\n\n format_time_separator = self.py3.safe_format(self.format_time_separator)\n format_time = self.py3.composite_join(format_time_separator, new_time)\n return format_time\n\n def timewarrior(self):\n timewarrior_data = self._get_timewarrior_data()\n format_time = self._manipulate(timewarrior_data)\n\n if self.tracking:\n cached_until = self.cache_timeout\n else:\n cached_until = self.sleep_timeout\n\n timew_data = {\"format_time\": format_time, \"tracking\": self.tracking}\n\n for x in self.thresholds_init[\"format\"]:\n if x in timew_data:\n self.py3.threshold_get_color(timew_data[x], x)\n\n return {\n \"cached_until\": self.py3.time_in(cached_until),\n \"full_text\": self.py3.safe_format(self.format, timew_data),\n }\n\n\nif __name__ == \"__main__\":\n from py3status.module_test import module_test\n\n module_test(Py3status)\n", "path": "py3status/modules/timewarrior.py"}], "after_files": [{"content": "r\"\"\"\nTrack your time with Timewarrior.\n\nTimewarrior is a time tracking utility that offers simple stopwatch features\nas well as sophisticated calendar-base backfill, along with flexible reporting.\nSee https://taskwarrior.org/docs/timewarrior for more information.\n\nConfiguration parameters:\n cache_timeout: refresh interval for this module, otherwise auto\n (default None)\n filter: specify interval and/or tag to filter (default '1day')\n format: display format for this module\n (default '[Timew {format_time}]|No Timew')\n format_datetime: specify strftime characters to format (default {})\n format_duration: display format for time duration\n (default '\\?not_zero [{days}d ][{hours}:]{minutes}:{seconds}')\n format_tag: display format for tags (default '\\?color=state_tag {name}')\n format_tag_separator: show separator if more than one (default ' ')\n format_time: display format for tracked times\n (default '[\\?color=state_time [{format_tag} ]{format_duration}]')\n format_time_separator: show separator if more than one (default ' ')\n thresholds: specify color thresholds to use\n *(default {'state_tag': [(0, 'darkgray'), (1, 'darkgray')],\n 'state_time': [(0, 'darkgray'), (1, 'degraded')]})*\n\nFormat placeholders:\n {format_time} format for tracked times\n {tracking} time tracking state, eg False, True\n\nformat_time placeholders:\n {state} time tracking state, eg False, True\n {format_tag} format for tags\n {format_duration} format for time duration\n {start} start date, eg 20171021T010203Z\n {end} end date, eg 20171021T010203Z\n\nformat_tag placeholders:\n {name} tag name, eg gaming, studying, gardening\n\nformat_datetime placeholders:\n key: start, end\n value: strftime characters, eg '%b %d' ----> 'Oct 06'\n\nformat_duration placeholders:\n {days} days\n {hours} hours\n {minutes} minutes\n {seconds} seconds\n\nColor thresholds:\n format_time:\n state_time: print color based on the state of time tracking\n format_tag:\n state_tag: print color based on the state of time tracking\n\nRequires:\n timew: feature-rich time tracking utility\n\nRecommendations:\n We can refresh a module using `py3-cmd` command.\n An excellent example of using this command in a function.\n\n ```\n ~/.{bash,zsh}{rc,_profile}\n ---------------------------\n function timew () {\n command timew \"$@\" && py3-cmd refresh timewarrior\n }\n ```\n\n With this, you can consider giving `cache_timeout` a much larger number,\n eg 3600 (an hour), so the module does not need to be updated that often.\n\nExamples:\n```\n# show times matching the filter, see documentation for more filters\ntimewarrior {\n filter = ':day' # filter times not in 24 hours of current day\n filter = '12hours' # filter times not in 12 hours of current time\n filter = '5min' # filter times not in 5 minutes of current time\n filter = '1sec' # filter times not in 1 second of current time\n filter = '5pm to 11:59pm # filter times not in 5pm to 11:59pm range\n}\n\n# intervals\ntimewarrior {\n # if you are printing other intervals too with '1day' filter or so,\n # then you may want to add this too for better bar readability\n format_time_separator = ', '\n\n # you also can change the thresholds with different colors\n thresholds = {\n 'state_tag': [(0, 'darkgray'), (1, 'degraded')],\n 'state_time': [(0, 'darkgray'), (1, 'degraded')],\n }\n}\n\n# cache_timeout\ntimewarrior {\n # auto refresh every 10 seconds when there is no active time tracking\n # auto refresh every second when there is active time tracking\n cache_timeout = None\n\n # refresh every minute when there is no active time tracking\n # refresh every second when there is active time tracking\n cache_timeout = 60\n\n # explicit refresh every 20 seconds when there is no active time tracking\n # explicit refresh every 5 seconds when there is active time tracking\n cache_timeout = (20, 5)\n}\n\n# add your snippets here\ntimewarrior {\n format = \"...\"\n}\n```\n\n@author lasers\n\nSAMPLE OUTPUT\n[\n {'full_text': 'Timew '},\n {'full_text': 'gaming ', 'color': '#a9a9a9'},\n {'full_text': '15:02 ', 'color': '#a9a9a9'},\n {'full_text': 'studying ', 'color': '#a9a9a9'},\n {'full_text': '03:42', 'color': '#ffff00'}\n]\n\nno_tag\n[\n {'full_text': 'Timew '},\n {'full_text': 'gardening ', 'color': '#a9a9a9'},\n {'full_text': '20:37', 'color': '#ffff00'}\n]\n\nno_timew\n{'full_text': 'No Timew'}\n\"\"\"\n\nfrom json import loads as json_loads\nimport datetime as dt\n\nSTRING_NOT_INSTALLED = \"not installed\"\nDATETIME = \"%Y%m%dT%H%M%SZ\"\nSTRING_INVALID_TIMEOUT = \"invalid cache_timeout\"\n\n\nclass Py3status:\n \"\"\"\"\"\"\n\n # available configuration parameters\n cache_timeout = None\n filter = \"1day\"\n format = \"[Timew {format_time}]|No Timew\"\n format_datetime = {}\n format_duration = r\"\\?not_zero [{days}d ][{hours}:]{minutes}:{seconds}\"\n format_tag = r\"\\?color=state_tag {name}\"\n format_tag_separator = \" \"\n format_time = r\"[\\?color=state_time [{format_tag} ]{format_duration}]\"\n format_time_separator = \" \"\n thresholds = {\n \"state_tag\": [(0, \"darkgray\"), (1, \"darkgray\")],\n \"state_time\": [(0, \"darkgray\"), (1, \"degraded\")],\n }\n\n class Meta:\n update_config = {\n \"update_placeholder_format\": [\n {\n \"placeholder_formats\": {\"minutes\": \":02d\", \"seconds\": \":02d\"},\n \"format_strings\": [\"format_duration\"],\n }\n ]\n }\n\n def post_config_hook(self):\n if not self.py3.check_commands(\"timew\"):\n raise Exception(STRING_NOT_INSTALLED)\n\n if self.cache_timeout is None:\n self.sleep_timeout = 10\n self.cache_timeout = 0\n elif isinstance(self.cache_timeout, tuple):\n if len(self.cache_timeout) != 2:\n raise Exception(STRING_INVALID_TIMEOUT)\n self.sleep_timeout = self.cache_timeout[0]\n self.cache_timeout = self.cache_timeout[1]\n elif isinstance(self.cache_timeout, int):\n self.sleep_timeout = self.cache_timeout\n self.cache_timeout = 0\n\n self.timewarrior_command = \"timew export\"\n if self.filter:\n self.timewarrior_command += f\" {self.filter}\"\n\n self.init = {\"datetimes\": []}\n for word in [\"start\", \"end\"]:\n if (self.py3.format_contains(self.format_time, word)) and (\n word in self.format_datetime\n ):\n self.init[\"datetimes\"].append(word)\n\n self.tracking = None\n self.thresholds_init = {}\n for name in (\"format\", \"format_tag\", \"format_time\"):\n self.thresholds_init[name] = self.py3.get_color_names_list(\n getattr(self, name)\n )\n\n def _get_timewarrior_data(self):\n return json_loads(self.py3.command_output(self.timewarrior_command))\n\n def _manipulate(self, data):\n new_time = []\n self.tracking = False\n\n for i, time in enumerate(data):\n time[\"index\"] = len(data) - i\n time[\"state_time\"] = \"end\" not in time\n\n # tags\n new_tag = []\n time[\"tags\"] = time.get(\"tags\", [])\n for tag_name in time[\"tags\"]:\n tag_data = {\"name\": tag_name, \"state_tag\": time[\"state_time\"]}\n for x in self.thresholds_init[\"format_tag\"]:\n if x in tag_data:\n self.py3.threshold_get_color(tag_data[x], x)\n new_tag.append(self.py3.safe_format(self.format_tag, tag_data))\n\n format_tag_separator = self.py3.safe_format(self.format_tag_separator)\n format_tag = self.py3.composite_join(format_tag_separator, new_tag)\n\n time[\"format_tag\"] = format_tag\n del time[\"tags\"]\n\n # duraton\n if time[\"state_time\"]:\n self.tracking = True\n end = dt.datetime.utcnow()\n else:\n end = dt.datetime.strptime(time[\"end\"], DATETIME)\n\n start = dt.datetime.strptime(time[\"start\"], DATETIME)\n duration = end - start\n\n time[\"format_duration\"] = self.py3.safe_format(\n self.format_duration,\n {\n \"days\": duration.days,\n \"hours\": duration.seconds // (60 * 60),\n \"minutes\": (duration.seconds // 60) % 60,\n \"seconds\": duration.seconds % 60,\n },\n )\n\n # datetime\n for word in self.init[\"datetimes\"]:\n if word in time:\n time[word] = self.py3.safe_format(\n dt.datetime.strftime(\n dt.datetime.strptime(time[word], DATETIME),\n self.format_datetime[word],\n )\n )\n\n # time\n for x in self.thresholds_init[\"format_time\"]:\n if x in time:\n self.py3.threshold_get_color(time[x], x)\n\n new_time.append(self.py3.safe_format(self.format_time, time))\n\n format_time_separator = self.py3.safe_format(self.format_time_separator)\n format_time = self.py3.composite_join(format_time_separator, new_time)\n return format_time\n\n def timewarrior(self):\n timewarrior_data = self._get_timewarrior_data()\n format_time = self._manipulate(timewarrior_data)\n\n if self.tracking:\n cached_until = self.cache_timeout\n else:\n cached_until = self.sleep_timeout\n\n timew_data = {\"format_time\": format_time, \"tracking\": self.tracking}\n\n for x in self.thresholds_init[\"format\"]:\n if x in timew_data:\n self.py3.threshold_get_color(timew_data[x], x)\n\n return {\n \"cached_until\": self.py3.time_in(cached_until),\n \"full_text\": self.py3.safe_format(self.format, timew_data),\n }\n\n\nif __name__ == \"__main__\":\n from py3status.module_test import module_test\n\n module_test(Py3status)\n", "path": "py3status/modules/timewarrior.py"}]} | 3,874 | 278 |
gh_patches_debug_31622 | rasdani/github-patches | git_diff | lightly-ai__lightly-425 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lightly-upload fails with video dataset
# lightly-upload fails with video dataset
Bug introduced in #417 which breaks image upload for video datasets because we try to determine the file size of each image on disk.
Relevant lines:
https://github.com/lightly-ai/lightly/blob/faff18868914f6884dab73a00ec4940e35106f48/lightly/api/api_workflow_upload_dataset.py#L143
Possible fix: Wrap the file size extraction in a try-catch and use the old method as a fallback.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightly/data/dataset.py`
Content:
```
1 """ Lightly Dataset """
2
3 # Copyright (c) 2020. Lightly AG and its affiliates.
4 # All Rights Reserved
5
6 import os
7 import shutil
8 import tempfile
9
10 import PIL.Image
11 from PIL import Image
12 from typing import List, Union, Callable
13
14 import torch.utils.data as data
15 import torchvision.datasets as datasets
16 from torchvision import transforms
17
18 from lightly.data._helpers import _load_dataset
19 from lightly.data._helpers import DatasetFolder
20 from lightly.data._video import VideoDataset
21
22
23 def _get_filename_by_index(dataset, index):
24 """Default function which maps the index of an image to a filename.
25
26 """
27 if isinstance(dataset, datasets.ImageFolder):
28 # filename is the path of the image relative to the dataset root
29 full_path = dataset.imgs[index][0]
30 return os.path.relpath(full_path, dataset.root)
31 elif isinstance(dataset, DatasetFolder):
32 # filename is the path of the image relative to the dataset root
33 full_path = dataset.samples[index][0]
34 return os.path.relpath(full_path, dataset.root)
35 elif isinstance(dataset, VideoDataset):
36 # filename is constructed by the video dataset
37 return dataset.get_filename(index)
38 else:
39 # dummy to prevent crashes
40 return str(index)
41
42
43 def _ensure_dir(path):
44 """Makes sure that the directory at path exists.
45
46 """
47 dirname = os.path.dirname(path)
48 os.makedirs(dirname, exist_ok=True)
49
50
51 def _copy_image(input_dir, output_dir, filename):
52 """Copies an image from the input directory to the output directory.
53
54 """
55 source = os.path.join(input_dir, filename)
56 target = os.path.join(output_dir, filename)
57 _ensure_dir(target)
58 shutil.copyfile(source, target)
59
60
61 def _save_image(image, output_dir, filename, fmt):
62 """Saves an image in the output directory.
63
64 """
65 target = os.path.join(output_dir, filename)
66 _ensure_dir(target)
67 try:
68 # try to save the image with the specified format or
69 # derive the format from the filename (if format=None)
70 image.save(target, format=fmt)
71 except ValueError:
72 # could not determine format from filename
73 image.save(target, format='png')
74
75
76 def _dump_image(dataset, output_dir, filename, index, fmt):
77 """Saves a single image to the output directory.
78
79 Will copy the image from the input directory to the output directory
80 if possible. If not (e.g. for VideoDatasets), will load the image and
81 then save it to the output directory with the specified format.
82
83 """
84
85 if isinstance(dataset, datasets.ImageFolder):
86 # can safely copy the image from the input to the output directory
87 _copy_image(dataset.root, output_dir, filename)
88 elif isinstance(dataset, DatasetFolder):
89 # can safely copy the image from the input to the output directory
90 _copy_image(dataset.root, output_dir, filename)
91 else:
92 # need to load the image and save it to the output directory
93 image, _ = dataset[index]
94 _save_image(image, output_dir, filename, fmt)
95
96
97 class LightlyDataset:
98 """Provides a uniform data interface for the embedding models.
99
100 Should be used for all models and functions in the lightly package.
101 Returns a tuple (sample, target, fname) when accessed using __getitem__.
102
103 The LightlyDataset supports different input sources. You can use it
104 on a folder of images. You can also use it on a folder with subfolders
105 with images (ImageNet style). If the input_dir has subfolders each subfolder
106 gets its own target label. You can also work with videos (requires pyav).
107 If there are multiple videos in the input_dir each video gets a different
108 target label assigned. If input_dir contains images and videos
109 only the videos are used.
110
111 Can also be used in combination with the `from_torch_dataset` method
112 to load a dataset offered by torchvision (e.g. cifar10).
113
114 Args:
115 input_dir:
116 Path to directory holding the images or videos to load.
117 transform:
118 Image transforms (as in torchvision).
119 index_to_filename:
120 Function which takes the dataset and index as input and returns
121 the filename of the file at the index. If None, uses default.
122
123 Examples:
124 >>> # load a dataset consisting of images from a local folder
125 >>> # mydata/
126 >>> # `- img1.png
127 >>> # `- img2.png
128 >>> # `- ...
129 >>> import lightly.data as data
130 >>> dataset = data.LightlyDataset(input_dir='path/to/mydata/')
131 >>> sample, target, fname = dataset[0]
132 >>>
133 >>> # also works with subfolders
134 >>> # mydata/
135 >>> # `- subfolder1
136 >>> # `- img1.png
137 >>> # `- subfolder2
138 >>> # ...
139 >>>
140 >>> # also works with videos
141 >>> # mydata/
142 >>> # `- video1.mp4
143 >>> # `- video2.mp4
144 >>> # `- ...
145 """
146
147 def __init__(self,
148 input_dir: str,
149 transform: transforms.Compose = None,
150 index_to_filename: Callable[[datasets.VisionDataset, int], str] = None):
151
152 # can pass input_dir=None to create an "empty" dataset
153 self.input_dir = input_dir
154 if self.input_dir is not None:
155 self.dataset = _load_dataset(self.input_dir, transform)
156
157 # initialize function to get filename of image
158 self.index_to_filename = _get_filename_by_index
159 if index_to_filename is not None:
160 self.index_to_filename = index_to_filename
161
162 @classmethod
163 def from_torch_dataset(cls,
164 dataset,
165 transform=None,
166 index_to_filename=None):
167 """Builds a LightlyDataset from a PyTorch (or torchvision) dataset.
168
169 Args:
170 dataset:
171 PyTorch/torchvision dataset.
172 transform:
173 Image transforms (as in torchvision).
174 index_to_filename:
175 Function which takes the dataset and index as input and returns
176 the filename of the file at the index. If None, uses default.
177
178 Returns:
179 A LightlyDataset object.
180
181 Examples:
182 >>> # load cifar10 from torchvision
183 >>> import torchvision
184 >>> import lightly.data as data
185 >>> base = torchvision.datasets.CIFAR10(root='./')
186 >>> dataset = data.LightlyDataset.from_torch_dataset(base)
187
188 """
189 # create an "empty" dataset object
190 dataset_obj = cls(
191 None,
192 transform=transform,
193 index_to_filename=index_to_filename
194 )
195
196 # populate it with the torch dataset
197 dataset_obj.dataset = dataset
198 return dataset_obj
199
200 def __getitem__(self, index: int):
201 """Returns (sample, target, fname) of item at index.
202
203 Args:
204 index:
205 Index of the queried item.
206
207 Returns:
208 The image, target, and filename of the item at index.
209
210 """
211 fname = self.index_to_filename(self.dataset, index)
212 sample, target = self.dataset.__getitem__(index)
213
214 return sample, target, fname
215
216 def __len__(self):
217 """Returns the length of the dataset.
218
219 """
220 return len(self.dataset)
221
222 def __add__(self, other):
223 """Adds another item to the dataset.
224
225 """
226 raise NotImplementedError()
227
228 def get_filenames(self) -> List[str]:
229 """Returns all filenames in the dataset.
230
231 """
232 list_of_filenames = []
233 for index in range(len(self)):
234 fname = self.index_to_filename(self.dataset, index)
235 list_of_filenames.append(fname)
236 return list_of_filenames
237
238 def dump(self,
239 output_dir: str,
240 filenames: Union[List[str], None] = None,
241 format: Union[str, None] = None):
242 """Saves images in the dataset to the output directory.
243
244 Will copy the images from the input directory to the output directory
245 if possible. If not (e.g. for VideoDatasets), will load the images and
246 then save them to the output directory with the specified format.
247
248 Args:
249 output_dir:
250 Output directory where the image is stored.
251 filenames:
252 Filenames of the images to store. If None, stores all images.
253 format:
254 Image format. Can be any pillow image format (png, jpg, ...).
255 By default we try to use the same format as the input data. If
256 not possible (e.g. for videos) we dump the image
257 as a png image to prevent compression artifacts.
258
259 """
260
261 if self.dataset.transform is not None:
262 raise RuntimeError('Cannot dump dataset which applies transforms!')
263
264 # create directory if it doesn't exist yet
265 os.makedirs(output_dir, exist_ok=True)
266
267 # dump all the files if no filenames were passed, otherwise dump only
268 # the ones referenced in the list
269 if filenames is None:
270 indices = [i for i in range(self.__len__())]
271 filenames = self.get_filenames()
272 else:
273 indices = []
274 all_filenames = self.get_filenames()
275 for i in range(len(filenames)):
276 if filenames[i] in all_filenames:
277 indices.append(i)
278
279 # dump images
280 for i, filename in zip(indices, filenames):
281 _dump_image(self.dataset, output_dir, filename, i, fmt=format)
282
283 def get_filepath_from_filename(self, filename: str, image: PIL.Image.Image = None):
284 """Returns the filepath given the filename of the image
285
286 Args:
287 filename:
288 The filename of the image
289 image:
290 The image corresponding to the filename
291
292 Returns:
293
294 """
295 if hasattr(self, 'input_dir') and isinstance(self.input_dir, str):
296 return os.path.join(self.input_dir, filename)
297 else:
298 if image is None:
299 raise ValueError("This LightlyDataset was created from a torch dataset and thus has no input_dir."
300 "Thus you must provide the image to be able to save it and return the path to it.")
301 folder_path = tempfile.mkdtemp()
302 filepath = os.path.join(folder_path,filename) + '.jpg'
303 image.save(filepath)
304 return filepath
305
306
307 @property
308 def transform(self):
309 """Getter for the transform of the dataset.
310
311 """
312 return self.dataset.transform
313
314 @transform.setter
315 def transform(self, t):
316 """Setter for the transform of the dataset.
317
318 """
319 self.dataset.transform = t
320
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lightly/data/dataset.py b/lightly/data/dataset.py
--- a/lightly/data/dataset.py
+++ b/lightly/data/dataset.py
@@ -283,6 +283,10 @@
def get_filepath_from_filename(self, filename: str, image: PIL.Image.Image = None):
"""Returns the filepath given the filename of the image
+ There are three cases:
+ - The dataset is a regular dataset with the images in the input dir.
+ - The dataset is a video dataset, thus the images have to be saved in a temporary folder.
+ - The dataset is a torch dataset, thus the images have to be saved in a temporary folder.
Args:
filename:
The filename of the image
@@ -290,18 +294,25 @@
The image corresponding to the filename
Returns:
+ The filename to the image, either the exiting one (case 1) or a newly created jpg (case 2, 3)
"""
- if hasattr(self, 'input_dir') and isinstance(self.input_dir, str):
- return os.path.join(self.input_dir, filename)
- else:
- if image is None:
- raise ValueError("This LightlyDataset was created from a torch dataset and thus has no input_dir."
- "Thus you must provide the image to be able to save it and return the path to it.")
- folder_path = tempfile.mkdtemp()
- filepath = os.path.join(folder_path,filename) + '.jpg'
- image.save(filepath)
- return filepath
+
+ has_input_dir = hasattr(self, 'input_dir') and isinstance(self.input_dir, str)
+ if has_input_dir:
+ path_to_image = os.path.join(self.input_dir, filename)
+ if os.path.isfile(path_to_image):
+ # Case 1
+ return path_to_image
+
+ if image is None:
+ raise ValueError("The parameter image must not be None for VideoDatasets and TorchDatasets")
+
+ # Case 2 and 3
+ folder_path = tempfile.mkdtemp()
+ filepath = os.path.join(folder_path,filename) + '.jpg'
+ image.save(filepath)
+ return filepath
@property
| {"golden_diff": "diff --git a/lightly/data/dataset.py b/lightly/data/dataset.py\n--- a/lightly/data/dataset.py\n+++ b/lightly/data/dataset.py\n@@ -283,6 +283,10 @@\n def get_filepath_from_filename(self, filename: str, image: PIL.Image.Image = None):\n \"\"\"Returns the filepath given the filename of the image\n \n+ There are three cases:\n+ - The dataset is a regular dataset with the images in the input dir.\n+ - The dataset is a video dataset, thus the images have to be saved in a temporary folder.\n+ - The dataset is a torch dataset, thus the images have to be saved in a temporary folder.\n Args:\n filename:\n The filename of the image\n@@ -290,18 +294,25 @@\n The image corresponding to the filename\n \n Returns:\n+ The filename to the image, either the exiting one (case 1) or a newly created jpg (case 2, 3)\n \n \"\"\"\n- if hasattr(self, 'input_dir') and isinstance(self.input_dir, str):\n- return os.path.join(self.input_dir, filename)\n- else:\n- if image is None:\n- raise ValueError(\"This LightlyDataset was created from a torch dataset and thus has no input_dir.\"\n- \"Thus you must provide the image to be able to save it and return the path to it.\")\n- folder_path = tempfile.mkdtemp()\n- filepath = os.path.join(folder_path,filename) + '.jpg'\n- image.save(filepath)\n- return filepath\n+\n+ has_input_dir = hasattr(self, 'input_dir') and isinstance(self.input_dir, str)\n+ if has_input_dir:\n+ path_to_image = os.path.join(self.input_dir, filename)\n+ if os.path.isfile(path_to_image):\n+ # Case 1\n+ return path_to_image\n+\n+ if image is None:\n+ raise ValueError(\"The parameter image must not be None for VideoDatasets and TorchDatasets\")\n+\n+ # Case 2 and 3\n+ folder_path = tempfile.mkdtemp()\n+ filepath = os.path.join(folder_path,filename) + '.jpg'\n+ image.save(filepath)\n+ return filepath\n \n \n @property\n", "issue": "lightly-upload fails with video dataset\n# lightly-upload fails with video dataset\r\n\r\nBug introduced in #417 which breaks image upload for video datasets because we try to determine the file size of each image on disk.\r\n\r\nRelevant lines:\r\nhttps://github.com/lightly-ai/lightly/blob/faff18868914f6884dab73a00ec4940e35106f48/lightly/api/api_workflow_upload_dataset.py#L143\r\n\r\nPossible fix: Wrap the file size extraction in a try-catch and use the old method as a fallback.\n", "before_files": [{"content": "\"\"\" Lightly Dataset \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport os\nimport shutil\nimport tempfile\n\nimport PIL.Image\nfrom PIL import Image\nfrom typing import List, Union, Callable\n\nimport torch.utils.data as data\nimport torchvision.datasets as datasets\nfrom torchvision import transforms\n\nfrom lightly.data._helpers import _load_dataset\nfrom lightly.data._helpers import DatasetFolder\nfrom lightly.data._video import VideoDataset\n\n\ndef _get_filename_by_index(dataset, index):\n \"\"\"Default function which maps the index of an image to a filename.\n\n \"\"\"\n if isinstance(dataset, datasets.ImageFolder):\n # filename is the path of the image relative to the dataset root\n full_path = dataset.imgs[index][0]\n return os.path.relpath(full_path, dataset.root)\n elif isinstance(dataset, DatasetFolder):\n # filename is the path of the image relative to the dataset root\n full_path = dataset.samples[index][0]\n return os.path.relpath(full_path, dataset.root)\n elif isinstance(dataset, VideoDataset):\n # filename is constructed by the video dataset\n return dataset.get_filename(index)\n else:\n # dummy to prevent crashes\n return str(index)\n\n\ndef _ensure_dir(path):\n \"\"\"Makes sure that the directory at path exists.\n\n \"\"\"\n dirname = os.path.dirname(path)\n os.makedirs(dirname, exist_ok=True)\n\n\ndef _copy_image(input_dir, output_dir, filename):\n \"\"\"Copies an image from the input directory to the output directory.\n\n \"\"\"\n source = os.path.join(input_dir, filename)\n target = os.path.join(output_dir, filename)\n _ensure_dir(target)\n shutil.copyfile(source, target)\n\n\ndef _save_image(image, output_dir, filename, fmt):\n \"\"\"Saves an image in the output directory.\n\n \"\"\"\n target = os.path.join(output_dir, filename)\n _ensure_dir(target)\n try:\n # try to save the image with the specified format or\n # derive the format from the filename (if format=None)\n image.save(target, format=fmt)\n except ValueError:\n # could not determine format from filename\n image.save(target, format='png')\n\n\ndef _dump_image(dataset, output_dir, filename, index, fmt):\n \"\"\"Saves a single image to the output directory.\n\n Will copy the image from the input directory to the output directory\n if possible. If not (e.g. for VideoDatasets), will load the image and\n then save it to the output directory with the specified format.\n\n \"\"\"\n\n if isinstance(dataset, datasets.ImageFolder):\n # can safely copy the image from the input to the output directory\n _copy_image(dataset.root, output_dir, filename)\n elif isinstance(dataset, DatasetFolder):\n # can safely copy the image from the input to the output directory\n _copy_image(dataset.root, output_dir, filename)\n else:\n # need to load the image and save it to the output directory\n image, _ = dataset[index]\n _save_image(image, output_dir, filename, fmt)\n\n\nclass LightlyDataset:\n \"\"\"Provides a uniform data interface for the embedding models.\n\n Should be used for all models and functions in the lightly package.\n Returns a tuple (sample, target, fname) when accessed using __getitem__.\n\n The LightlyDataset supports different input sources. You can use it\n on a folder of images. You can also use it on a folder with subfolders\n with images (ImageNet style). If the input_dir has subfolders each subfolder\n gets its own target label. You can also work with videos (requires pyav).\n If there are multiple videos in the input_dir each video gets a different\n target label assigned. If input_dir contains images and videos\n only the videos are used.\n\n Can also be used in combination with the `from_torch_dataset` method\n to load a dataset offered by torchvision (e.g. cifar10).\n\n Args:\n input_dir:\n Path to directory holding the images or videos to load.\n transform:\n Image transforms (as in torchvision).\n index_to_filename:\n Function which takes the dataset and index as input and returns\n the filename of the file at the index. If None, uses default.\n\n Examples:\n >>> # load a dataset consisting of images from a local folder\n >>> # mydata/\n >>> # `- img1.png\n >>> # `- img2.png\n >>> # `- ...\n >>> import lightly.data as data\n >>> dataset = data.LightlyDataset(input_dir='path/to/mydata/')\n >>> sample, target, fname = dataset[0]\n >>>\n >>> # also works with subfolders\n >>> # mydata/\n >>> # `- subfolder1\n >>> # `- img1.png\n >>> # `- subfolder2\n >>> # ...\n >>>\n >>> # also works with videos\n >>> # mydata/\n >>> # `- video1.mp4\n >>> # `- video2.mp4\n >>> # `- ...\n \"\"\"\n\n def __init__(self,\n input_dir: str,\n transform: transforms.Compose = None,\n index_to_filename: Callable[[datasets.VisionDataset, int], str] = None):\n\n # can pass input_dir=None to create an \"empty\" dataset\n self.input_dir = input_dir\n if self.input_dir is not None:\n self.dataset = _load_dataset(self.input_dir, transform)\n\n # initialize function to get filename of image\n self.index_to_filename = _get_filename_by_index\n if index_to_filename is not None:\n self.index_to_filename = index_to_filename\n\n @classmethod\n def from_torch_dataset(cls,\n dataset,\n transform=None,\n index_to_filename=None):\n \"\"\"Builds a LightlyDataset from a PyTorch (or torchvision) dataset.\n\n Args:\n dataset:\n PyTorch/torchvision dataset.\n transform:\n Image transforms (as in torchvision).\n index_to_filename:\n Function which takes the dataset and index as input and returns\n the filename of the file at the index. If None, uses default.\n\n Returns:\n A LightlyDataset object.\n\n Examples:\n >>> # load cifar10 from torchvision\n >>> import torchvision\n >>> import lightly.data as data\n >>> base = torchvision.datasets.CIFAR10(root='./')\n >>> dataset = data.LightlyDataset.from_torch_dataset(base)\n\n \"\"\"\n # create an \"empty\" dataset object\n dataset_obj = cls(\n None,\n transform=transform,\n index_to_filename=index_to_filename\n )\n\n # populate it with the torch dataset\n dataset_obj.dataset = dataset\n return dataset_obj\n\n def __getitem__(self, index: int):\n \"\"\"Returns (sample, target, fname) of item at index.\n\n Args:\n index:\n Index of the queried item.\n\n Returns:\n The image, target, and filename of the item at index.\n\n \"\"\"\n fname = self.index_to_filename(self.dataset, index)\n sample, target = self.dataset.__getitem__(index)\n\n return sample, target, fname\n\n def __len__(self):\n \"\"\"Returns the length of the dataset.\n\n \"\"\"\n return len(self.dataset)\n\n def __add__(self, other):\n \"\"\"Adds another item to the dataset.\n\n \"\"\"\n raise NotImplementedError()\n\n def get_filenames(self) -> List[str]:\n \"\"\"Returns all filenames in the dataset.\n\n \"\"\"\n list_of_filenames = []\n for index in range(len(self)):\n fname = self.index_to_filename(self.dataset, index)\n list_of_filenames.append(fname)\n return list_of_filenames\n\n def dump(self,\n output_dir: str,\n filenames: Union[List[str], None] = None,\n format: Union[str, None] = None):\n \"\"\"Saves images in the dataset to the output directory.\n\n Will copy the images from the input directory to the output directory\n if possible. If not (e.g. for VideoDatasets), will load the images and\n then save them to the output directory with the specified format.\n\n Args:\n output_dir:\n Output directory where the image is stored.\n filenames:\n Filenames of the images to store. If None, stores all images.\n format:\n Image format. Can be any pillow image format (png, jpg, ...).\n By default we try to use the same format as the input data. If\n not possible (e.g. for videos) we dump the image \n as a png image to prevent compression artifacts.\n\n \"\"\"\n\n if self.dataset.transform is not None:\n raise RuntimeError('Cannot dump dataset which applies transforms!')\n\n # create directory if it doesn't exist yet\n os.makedirs(output_dir, exist_ok=True)\n\n # dump all the files if no filenames were passed, otherwise dump only\n # the ones referenced in the list\n if filenames is None:\n indices = [i for i in range(self.__len__())]\n filenames = self.get_filenames()\n else:\n indices = []\n all_filenames = self.get_filenames()\n for i in range(len(filenames)):\n if filenames[i] in all_filenames:\n indices.append(i)\n\n # dump images\n for i, filename in zip(indices, filenames):\n _dump_image(self.dataset, output_dir, filename, i, fmt=format)\n\n def get_filepath_from_filename(self, filename: str, image: PIL.Image.Image = None):\n \"\"\"Returns the filepath given the filename of the image\n\n Args:\n filename:\n The filename of the image\n image:\n The image corresponding to the filename\n\n Returns:\n\n \"\"\"\n if hasattr(self, 'input_dir') and isinstance(self.input_dir, str):\n return os.path.join(self.input_dir, filename)\n else:\n if image is None:\n raise ValueError(\"This LightlyDataset was created from a torch dataset and thus has no input_dir.\"\n \"Thus you must provide the image to be able to save it and return the path to it.\")\n folder_path = tempfile.mkdtemp()\n filepath = os.path.join(folder_path,filename) + '.jpg'\n image.save(filepath)\n return filepath\n\n\n @property\n def transform(self):\n \"\"\"Getter for the transform of the dataset.\n\n \"\"\"\n return self.dataset.transform\n\n @transform.setter\n def transform(self, t):\n \"\"\"Setter for the transform of the dataset.\n\n \"\"\"\n self.dataset.transform = t\n", "path": "lightly/data/dataset.py"}], "after_files": [{"content": "\"\"\" Lightly Dataset \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport os\nimport shutil\nimport tempfile\n\nimport PIL.Image\nfrom PIL import Image\nfrom typing import List, Union, Callable\n\nimport torch.utils.data as data\nimport torchvision.datasets as datasets\nfrom torchvision import transforms\n\nfrom lightly.data._helpers import _load_dataset\nfrom lightly.data._helpers import DatasetFolder\nfrom lightly.data._video import VideoDataset\n\n\ndef _get_filename_by_index(dataset, index):\n \"\"\"Default function which maps the index of an image to a filename.\n\n \"\"\"\n if isinstance(dataset, datasets.ImageFolder):\n # filename is the path of the image relative to the dataset root\n full_path = dataset.imgs[index][0]\n return os.path.relpath(full_path, dataset.root)\n elif isinstance(dataset, DatasetFolder):\n # filename is the path of the image relative to the dataset root\n full_path = dataset.samples[index][0]\n return os.path.relpath(full_path, dataset.root)\n elif isinstance(dataset, VideoDataset):\n # filename is constructed by the video dataset\n return dataset.get_filename(index)\n else:\n # dummy to prevent crashes\n return str(index)\n\n\ndef _ensure_dir(path):\n \"\"\"Makes sure that the directory at path exists.\n\n \"\"\"\n dirname = os.path.dirname(path)\n os.makedirs(dirname, exist_ok=True)\n\n\ndef _copy_image(input_dir, output_dir, filename):\n \"\"\"Copies an image from the input directory to the output directory.\n\n \"\"\"\n source = os.path.join(input_dir, filename)\n target = os.path.join(output_dir, filename)\n _ensure_dir(target)\n shutil.copyfile(source, target)\n\n\ndef _save_image(image, output_dir, filename, fmt):\n \"\"\"Saves an image in the output directory.\n\n \"\"\"\n target = os.path.join(output_dir, filename)\n _ensure_dir(target)\n try:\n # try to save the image with the specified format or\n # derive the format from the filename (if format=None)\n image.save(target, format=fmt)\n except ValueError:\n # could not determine format from filename\n image.save(target, format='png')\n\n\ndef _dump_image(dataset, output_dir, filename, index, fmt):\n \"\"\"Saves a single image to the output directory.\n\n Will copy the image from the input directory to the output directory\n if possible. If not (e.g. for VideoDatasets), will load the image and\n then save it to the output directory with the specified format.\n\n \"\"\"\n\n if isinstance(dataset, datasets.ImageFolder):\n # can safely copy the image from the input to the output directory\n _copy_image(dataset.root, output_dir, filename)\n elif isinstance(dataset, DatasetFolder):\n # can safely copy the image from the input to the output directory\n _copy_image(dataset.root, output_dir, filename)\n else:\n # need to load the image and save it to the output directory\n image, _ = dataset[index]\n _save_image(image, output_dir, filename, fmt)\n\n\nclass LightlyDataset:\n \"\"\"Provides a uniform data interface for the embedding models.\n\n Should be used for all models and functions in the lightly package.\n Returns a tuple (sample, target, fname) when accessed using __getitem__.\n\n The LightlyDataset supports different input sources. You can use it\n on a folder of images. You can also use it on a folder with subfolders\n with images (ImageNet style). If the input_dir has subfolders each subfolder\n gets its own target label. You can also work with videos (requires pyav).\n If there are multiple videos in the input_dir each video gets a different\n target label assigned. If input_dir contains images and videos\n only the videos are used.\n\n Can also be used in combination with the `from_torch_dataset` method\n to load a dataset offered by torchvision (e.g. cifar10).\n\n Args:\n input_dir:\n Path to directory holding the images or videos to load.\n transform:\n Image transforms (as in torchvision).\n index_to_filename:\n Function which takes the dataset and index as input and returns\n the filename of the file at the index. If None, uses default.\n\n Examples:\n >>> # load a dataset consisting of images from a local folder\n >>> # mydata/\n >>> # `- img1.png\n >>> # `- img2.png\n >>> # `- ...\n >>> import lightly.data as data\n >>> dataset = data.LightlyDataset(input_dir='path/to/mydata/')\n >>> sample, target, fname = dataset[0]\n >>>\n >>> # also works with subfolders\n >>> # mydata/\n >>> # `- subfolder1\n >>> # `- img1.png\n >>> # `- subfolder2\n >>> # ...\n >>>\n >>> # also works with videos\n >>> # mydata/\n >>> # `- video1.mp4\n >>> # `- video2.mp4\n >>> # `- ...\n \"\"\"\n\n def __init__(self,\n input_dir: str,\n transform: transforms.Compose = None,\n index_to_filename: Callable[[datasets.VisionDataset, int], str] = None):\n\n # can pass input_dir=None to create an \"empty\" dataset\n self.input_dir = input_dir\n if self.input_dir is not None:\n self.dataset = _load_dataset(self.input_dir, transform)\n\n # initialize function to get filename of image\n self.index_to_filename = _get_filename_by_index\n if index_to_filename is not None:\n self.index_to_filename = index_to_filename\n\n @classmethod\n def from_torch_dataset(cls,\n dataset,\n transform=None,\n index_to_filename=None):\n \"\"\"Builds a LightlyDataset from a PyTorch (or torchvision) dataset.\n\n Args:\n dataset:\n PyTorch/torchvision dataset.\n transform:\n Image transforms (as in torchvision).\n index_to_filename:\n Function which takes the dataset and index as input and returns\n the filename of the file at the index. If None, uses default.\n\n Returns:\n A LightlyDataset object.\n\n Examples:\n >>> # load cifar10 from torchvision\n >>> import torchvision\n >>> import lightly.data as data\n >>> base = torchvision.datasets.CIFAR10(root='./')\n >>> dataset = data.LightlyDataset.from_torch_dataset(base)\n\n \"\"\"\n # create an \"empty\" dataset object\n dataset_obj = cls(\n None,\n transform=transform,\n index_to_filename=index_to_filename\n )\n\n # populate it with the torch dataset\n dataset_obj.dataset = dataset\n return dataset_obj\n\n def __getitem__(self, index: int):\n \"\"\"Returns (sample, target, fname) of item at index.\n\n Args:\n index:\n Index of the queried item.\n\n Returns:\n The image, target, and filename of the item at index.\n\n \"\"\"\n fname = self.index_to_filename(self.dataset, index)\n sample, target = self.dataset.__getitem__(index)\n\n return sample, target, fname\n\n def __len__(self):\n \"\"\"Returns the length of the dataset.\n\n \"\"\"\n return len(self.dataset)\n\n def __add__(self, other):\n \"\"\"Adds another item to the dataset.\n\n \"\"\"\n raise NotImplementedError()\n\n def get_filenames(self) -> List[str]:\n \"\"\"Returns all filenames in the dataset.\n\n \"\"\"\n list_of_filenames = []\n for index in range(len(self)):\n fname = self.index_to_filename(self.dataset, index)\n list_of_filenames.append(fname)\n return list_of_filenames\n\n def dump(self,\n output_dir: str,\n filenames: Union[List[str], None] = None,\n format: Union[str, None] = None):\n \"\"\"Saves images in the dataset to the output directory.\n\n Will copy the images from the input directory to the output directory\n if possible. If not (e.g. for VideoDatasets), will load the images and\n then save them to the output directory with the specified format.\n\n Args:\n output_dir:\n Output directory where the image is stored.\n filenames:\n Filenames of the images to store. If None, stores all images.\n format:\n Image format. Can be any pillow image format (png, jpg, ...).\n By default we try to use the same format as the input data. If\n not possible (e.g. for videos) we dump the image \n as a png image to prevent compression artifacts.\n\n \"\"\"\n\n if self.dataset.transform is not None:\n raise RuntimeError('Cannot dump dataset which applies transforms!')\n\n # create directory if it doesn't exist yet\n os.makedirs(output_dir, exist_ok=True)\n\n # dump all the files if no filenames were passed, otherwise dump only\n # the ones referenced in the list\n if filenames is None:\n indices = [i for i in range(self.__len__())]\n filenames = self.get_filenames()\n else:\n indices = []\n all_filenames = self.get_filenames()\n for i in range(len(filenames)):\n if filenames[i] in all_filenames:\n indices.append(i)\n\n # dump images\n for i, filename in zip(indices, filenames):\n _dump_image(self.dataset, output_dir, filename, i, fmt=format)\n\n def get_filepath_from_filename(self, filename: str, image: PIL.Image.Image = None):\n \"\"\"Returns the filepath given the filename of the image\n\n There are three cases:\n - The dataset is a regular dataset with the images in the input dir.\n - The dataset is a video dataset, thus the images have to be saved in a temporary folder.\n - The dataset is a torch dataset, thus the images have to be saved in a temporary folder.\n Args:\n filename:\n The filename of the image\n image:\n The image corresponding to the filename\n\n Returns:\n The filename to the image, either the exiting one (case 1) or a newly created jpg (case 2, 3)\n\n \"\"\"\n\n has_input_dir = hasattr(self, 'input_dir') and isinstance(self.input_dir, str)\n if has_input_dir:\n path_to_image = os.path.join(self.input_dir, filename)\n if os.path.isfile(path_to_image):\n # Case 1\n return path_to_image\n\n if image is None:\n raise ValueError(\"The parameter image must not be None for VideoDatasets and TorchDatasets\")\n\n # Case 2 and 3\n folder_path = tempfile.mkdtemp()\n filepath = os.path.join(folder_path,filename) + '.jpg'\n image.save(filepath)\n return filepath\n\n\n @property\n def transform(self):\n \"\"\"Getter for the transform of the dataset.\n\n \"\"\"\n return self.dataset.transform\n\n @transform.setter\n def transform(self, t):\n \"\"\"Setter for the transform of the dataset.\n\n \"\"\"\n self.dataset.transform = t\n", "path": "lightly/data/dataset.py"}]} | 3,520 | 494 |
gh_patches_debug_5546 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-5695 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mitmdump jumps to 100% CPU when parent process exits
#### Problem Description
It took me two days to make this reproduce in isolation. I hope someone with Python skills can figure out what is happening here. Depending on what the root cause is this might not even be related to my funny architecture.
I'm spawning `mitmdump` from Node.js. If the node process exits mitmdump will be re-assigned to become a child of `systemd` (some unix wizardry). It will then immediately jump to 100% CPU and stay there. This _only_ happens when an addon is using at least one network event (go figure...). E.g. I'm using `client_connected` (works with `clientconnect` on v6 as well). If the addon is only using sth. like `running` the bug does not occur. Even better: if the addon originally only has "running" nothing bad happens. But if I then add a `client_connected` and save the file (and the addon is automatically reloaded) it will instantly jump to 100% CPU.
My guess is that it might be related to stdout and the switcheroo with the parent process? In my actual architecture the mitmdump process will poll the parent via gRPC every second and shutdown if it's gone. But the 100% CPU prevents that.
Update: while trying to write down the exact steps it turns out this might only reproduce via local venv and and not if you download the binary. I'm not sure, it's confusing. I'm confused. But I have video proof, so I'm not completely insane.
#### Steps to reproduce the behavior:
index.js
```js
const path = require('path');
const { spawn } = require('child_process');
function handleStdOut(data) {
console.log(`mitmdump stdout: ${data}`);
}
function handleStdError(data) {
console.error(`mitmdump stderr: ${data}`);
}
function handleExit(code) {
console.log(`mitm process exited with code ${code}`);
}
const mitm = spawn(
// Adjust this path
'/home/alex/Projects/Bandsalat/src/forks/mitmproxy/venv/bin/mitmdump',
['--quiet', '--set', 'connection_strategy=lazy', '--scripts', 'addon.py'],
{
detached: true,
windowsHide: true,
env: {
PYTHONUNBUFFERED: '1',
},
}
);
console.log(mitm.spawnargs);
mitm.unref();
mitm.on('exit', handleExit);
mitm.stdout.on('data', handleStdOut);
mitm.stderr.on('data', handleStdError);
```
addon.py
```py
class MyAddon:
def running(self):
print('running')
def client_connected(self, client):
print('client_connected')
addons = [
MyAddon()
]
```
1. I'm on Ubuntu
2. Adjust index.js to point to your local mitmproxy git venv
3. Launch `node index.js` (Node 14 or 16 work both for me)
4. Now open Chromium with mitmproxy configured. You don't need to enter any URL, Chromium will phone home anyway.
5. Keep Chromium open and ctrl+c the node process
6. Observe your fan getting louder and `top` showing mitmdump at 100% CPU
https://user-images.githubusercontent.com/679144/124594746-740a7080-de60-11eb-9ffb-a5fc4b3ba24a.mp4
#### System Information
Happens with both v6 and HEAD.
```
Mitmproxy: 7.0.0.dev (+492, commit af27556)
Python: 3.8.10
OpenSSL: OpenSSL 1.1.1i 8 Dec 2020
Platform: Linux-5.8.0-59-generic-x86_64-with-glibc2.29
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/termlog.py`
Content:
```
1 from __future__ import annotations
2 import asyncio
3 import logging
4 from typing import IO
5
6 import sys
7
8 from mitmproxy import ctx, log
9 from mitmproxy.utils import vt_codes
10
11
12 class TermLog:
13 def __init__(
14 self,
15 out: IO[str] | None = None
16 ):
17 self.logger = TermLogHandler(out)
18 self.logger.install()
19
20 def load(self, loader):
21 loader.add_option(
22 "termlog_verbosity", str, "info", "Log verbosity.", choices=log.LogLevels
23 )
24 self.logger.setLevel(logging.INFO)
25
26 def configure(self, updated):
27 if "termlog_verbosity" in updated:
28 self.logger.setLevel(ctx.options.termlog_verbosity.upper())
29
30 def done(self):
31 t = self._teardown()
32 try:
33 # try to delay teardown a bit.
34 asyncio.create_task(t)
35 except RuntimeError:
36 # no event loop, we're in a test.
37 asyncio.run(t)
38
39 async def _teardown(self):
40 self.logger.uninstall()
41
42
43 class TermLogHandler(log.MitmLogHandler):
44 def __init__(
45 self,
46 out: IO[str] | None = None
47 ):
48 super().__init__()
49 self.file: IO[str] = out or sys.stdout
50 self.has_vt_codes = vt_codes.ensure_supported(self.file)
51 self.formatter = log.MitmFormatter(self.has_vt_codes)
52
53 def emit(self, record: logging.LogRecord) -> None:
54 print(
55 self.format(record),
56 file=self.file
57 )
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/addons/termlog.py b/mitmproxy/addons/termlog.py
--- a/mitmproxy/addons/termlog.py
+++ b/mitmproxy/addons/termlog.py
@@ -51,7 +51,9 @@
self.formatter = log.MitmFormatter(self.has_vt_codes)
def emit(self, record: logging.LogRecord) -> None:
- print(
- self.format(record),
- file=self.file
- )
+ try:
+ print(self.format(record), file=self.file)
+ except OSError:
+ # We cannot print, exit immediately.
+ # See https://github.com/mitmproxy/mitmproxy/issues/4669
+ sys.exit(1)
| {"golden_diff": "diff --git a/mitmproxy/addons/termlog.py b/mitmproxy/addons/termlog.py\n--- a/mitmproxy/addons/termlog.py\n+++ b/mitmproxy/addons/termlog.py\n@@ -51,7 +51,9 @@\n self.formatter = log.MitmFormatter(self.has_vt_codes)\n \n def emit(self, record: logging.LogRecord) -> None:\n- print(\n- self.format(record),\n- file=self.file\n- )\n+ try:\n+ print(self.format(record), file=self.file)\n+ except OSError:\n+ # We cannot print, exit immediately.\n+ # See https://github.com/mitmproxy/mitmproxy/issues/4669\n+ sys.exit(1)\n", "issue": "mitmdump jumps to 100% CPU when parent process exits\n#### Problem Description\r\n\r\nIt took me two days to make this reproduce in isolation. I hope someone with Python skills can figure out what is happening here. Depending on what the root cause is this might not even be related to my funny architecture.\r\n\r\nI'm spawning `mitmdump` from Node.js. If the node process exits mitmdump will be re-assigned to become a child of `systemd` (some unix wizardry). It will then immediately jump to 100% CPU and stay there. This _only_ happens when an addon is using at least one network event (go figure...). E.g. I'm using `client_connected` (works with `clientconnect` on v6 as well). If the addon is only using sth. like `running` the bug does not occur. Even better: if the addon originally only has \"running\" nothing bad happens. But if I then add a `client_connected` and save the file (and the addon is automatically reloaded) it will instantly jump to 100% CPU.\r\n\r\nMy guess is that it might be related to stdout and the switcheroo with the parent process? In my actual architecture the mitmdump process will poll the parent via gRPC every second and shutdown if it's gone. But the 100% CPU prevents that.\r\n\r\nUpdate: while trying to write down the exact steps it turns out this might only reproduce via local venv and and not if you download the binary. I'm not sure, it's confusing. I'm confused. But I have video proof, so I'm not completely insane.\r\n\r\n#### Steps to reproduce the behavior:\r\n\r\nindex.js\r\n\r\n```js\r\nconst path = require('path');\r\nconst { spawn } = require('child_process');\r\n\r\nfunction handleStdOut(data) {\r\n console.log(`mitmdump stdout: ${data}`);\r\n}\r\n\r\nfunction handleStdError(data) {\r\n console.error(`mitmdump stderr: ${data}`);\r\n}\r\n\r\nfunction handleExit(code) {\r\n console.log(`mitm process exited with code ${code}`);\r\n}\r\n\r\nconst mitm = spawn(\r\n // Adjust this path\r\n '/home/alex/Projects/Bandsalat/src/forks/mitmproxy/venv/bin/mitmdump',\r\n ['--quiet', '--set', 'connection_strategy=lazy', '--scripts', 'addon.py'],\r\n {\r\n detached: true,\r\n windowsHide: true,\r\n env: {\r\n PYTHONUNBUFFERED: '1',\r\n },\r\n }\r\n);\r\n\r\nconsole.log(mitm.spawnargs);\r\n\r\nmitm.unref();\r\nmitm.on('exit', handleExit);\r\nmitm.stdout.on('data', handleStdOut);\r\nmitm.stderr.on('data', handleStdError);\r\n```\r\naddon.py\r\n\r\n```py\r\nclass MyAddon:\r\n def running(self):\r\n print('running')\r\n\r\n def client_connected(self, client):\r\n print('client_connected')\r\n\r\naddons = [\r\n MyAddon()\r\n]\r\n```\r\n\r\n1. I'm on Ubuntu\r\n2. Adjust index.js to point to your local mitmproxy git venv\r\n3. Launch `node index.js` (Node 14 or 16 work both for me)\r\n4. Now open Chromium with mitmproxy configured. You don't need to enter any URL, Chromium will phone home anyway.\r\n5. Keep Chromium open and ctrl+c the node process\r\n6. Observe your fan getting louder and `top` showing mitmdump at 100% CPU\r\n\r\nhttps://user-images.githubusercontent.com/679144/124594746-740a7080-de60-11eb-9ffb-a5fc4b3ba24a.mp4\r\n\r\n#### System Information\r\n\r\nHappens with both v6 and HEAD.\r\n\r\n```\r\nMitmproxy: 7.0.0.dev (+492, commit af27556)\r\nPython: 3.8.10\r\nOpenSSL: OpenSSL 1.1.1i 8 Dec 2020\r\nPlatform: Linux-5.8.0-59-generic-x86_64-with-glibc2.29\r\n```\r\n\n", "before_files": [{"content": "from __future__ import annotations\nimport asyncio\nimport logging\nfrom typing import IO\n\nimport sys\n\nfrom mitmproxy import ctx, log\nfrom mitmproxy.utils import vt_codes\n\n\nclass TermLog:\n def __init__(\n self,\n out: IO[str] | None = None\n ):\n self.logger = TermLogHandler(out)\n self.logger.install()\n\n def load(self, loader):\n loader.add_option(\n \"termlog_verbosity\", str, \"info\", \"Log verbosity.\", choices=log.LogLevels\n )\n self.logger.setLevel(logging.INFO)\n\n def configure(self, updated):\n if \"termlog_verbosity\" in updated:\n self.logger.setLevel(ctx.options.termlog_verbosity.upper())\n\n def done(self):\n t = self._teardown()\n try:\n # try to delay teardown a bit.\n asyncio.create_task(t)\n except RuntimeError:\n # no event loop, we're in a test.\n asyncio.run(t)\n\n async def _teardown(self):\n self.logger.uninstall()\n\n\nclass TermLogHandler(log.MitmLogHandler):\n def __init__(\n self,\n out: IO[str] | None = None\n ):\n super().__init__()\n self.file: IO[str] = out or sys.stdout\n self.has_vt_codes = vt_codes.ensure_supported(self.file)\n self.formatter = log.MitmFormatter(self.has_vt_codes)\n\n def emit(self, record: logging.LogRecord) -> None:\n print(\n self.format(record),\n file=self.file\n )\n", "path": "mitmproxy/addons/termlog.py"}], "after_files": [{"content": "from __future__ import annotations\nimport asyncio\nimport logging\nfrom typing import IO\n\nimport sys\n\nfrom mitmproxy import ctx, log\nfrom mitmproxy.utils import vt_codes\n\n\nclass TermLog:\n def __init__(\n self,\n out: IO[str] | None = None\n ):\n self.logger = TermLogHandler(out)\n self.logger.install()\n\n def load(self, loader):\n loader.add_option(\n \"termlog_verbosity\", str, \"info\", \"Log verbosity.\", choices=log.LogLevels\n )\n self.logger.setLevel(logging.INFO)\n\n def configure(self, updated):\n if \"termlog_verbosity\" in updated:\n self.logger.setLevel(ctx.options.termlog_verbosity.upper())\n\n def done(self):\n t = self._teardown()\n try:\n # try to delay teardown a bit.\n asyncio.create_task(t)\n except RuntimeError:\n # no event loop, we're in a test.\n asyncio.run(t)\n\n async def _teardown(self):\n self.logger.uninstall()\n\n\nclass TermLogHandler(log.MitmLogHandler):\n def __init__(\n self,\n out: IO[str] | None = None\n ):\n super().__init__()\n self.file: IO[str] = out or sys.stdout\n self.has_vt_codes = vt_codes.ensure_supported(self.file)\n self.formatter = log.MitmFormatter(self.has_vt_codes)\n\n def emit(self, record: logging.LogRecord) -> None:\n try:\n print(self.format(record), file=self.file)\n except OSError:\n # We cannot print, exit immediately.\n # See https://github.com/mitmproxy/mitmproxy/issues/4669\n sys.exit(1)\n", "path": "mitmproxy/addons/termlog.py"}]} | 1,587 | 167 |
gh_patches_debug_9900 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-777 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TensorBoardLogger creates another tfevents file.
## 🐛 Bug
TensorBoardLogger creates another tfevents file when fit() is running.
It seems that no metrics are logged in the redundant file, but it will be shown in TensorBoard as a `run`.
I don't do anything about loggers in my LightningModules.
**Expected file structure:**
```
|
|- save_dir
| |- name
| |- version_0
| |- events.out.tfevents.1579833025.ip-xxx-xxx-xxx-xxx.17584.0
| |- meta_tags.csv
|- train.py
```
**Observed file structure:**
```
|
|- save_dir
| |- name
| |- version_0
| |- 1579833032
| |- events.out.tfevents.1579833032.ip-xxx-xxx-xxx-xxx.17584.1
| |- events.out.tfevents.1579833025.ip-xxx-xxx-xxx-xxx.17584.0
| |- meta_tags.csv
|- train.py
```
### To Reproduce / Code sample
Basic training step of PyTorch Lightning:
```python
# define a logger
logger = TensorBoardLogger(
save_dir='runs',
name=args.model
)
# define callbacks
ckpt_path = Path(logger.experiment.log_dir) / 'ckpts'
checkpoint_callback = ModelCheckpoint(filepath=ckpt_path)
# instantiate trainer
trainer = Trainer(
logger=logger,
checkpoint_callback=checkpoint_callback,
gpus=args.gpus
)
# define a model
model = CoolModel(args)
# start training!
trainer.fit(model)
```
### Environment
- PyTorch Lightning Version (e.g., 1.0): `0.6.0`
- PyTorch Version (e.g., 1.0): `1.3.1`
- OS (e.g., Linux): `Ubuntu 16.04 LTS`
- How you installed PyTorch (`conda`, `pip`, source): `pip`
- Build command you used (if compiling from source):
- Python version: `3.7.4`
- CUDA/cuDNN version: `10.1`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/loggers/tensorboard.py`
Content:
```
1 import os
2 from warnings import warn
3 from argparse import Namespace
4 from pkg_resources import parse_version
5
6 import torch
7 import csv
8 from torch.utils.tensorboard import SummaryWriter
9
10 from .base import LightningLoggerBase, rank_zero_only
11
12
13 class TensorBoardLogger(LightningLoggerBase):
14 r"""
15
16 Log to local file system in TensorBoard format
17
18 Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to
19 `os.path.join(save_dir, name, version)`
20
21 Example
22 --------
23
24 .. code-block:: python
25
26 logger = TensorBoardLogger("tb_logs", name="my_model")
27 trainer = Trainer(logger=logger)
28 trainer.train(model)
29
30 Args:
31 save_dir (str): Save directory
32 name (str): Experiment name. Defaults to "default".
33 version (int): Experiment version. If version is not specified the logger inspects the save
34 directory for existing versions, then automatically assigns the next available version.
35 \**kwargs (dict): Other arguments are passed directly to the :class:`SummaryWriter` constructor.
36
37 """
38 NAME_CSV_TAGS = 'meta_tags.csv'
39
40 def __init__(self, save_dir, name="default", version=None, **kwargs):
41 super().__init__()
42 self.save_dir = save_dir
43 self._name = name
44 self._version = version
45
46 self._experiment = None
47 self.tags = {}
48 self.kwargs = kwargs
49
50 @property
51 def experiment(self):
52 r"""
53
54 Actual tensorboard object. To use tensorboard features do the following.
55
56 Example::
57
58 self.logger.experiment.some_tensorboard_function()
59
60 """
61 if self._experiment is not None:
62 return self._experiment
63
64 root_dir = os.path.join(self.save_dir, self.name)
65 os.makedirs(root_dir, exist_ok=True)
66 log_dir = os.path.join(root_dir, "version_" + str(self.version))
67 self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)
68 return self._experiment
69
70 @rank_zero_only
71 def log_hyperparams(self, params):
72 if params is None:
73 return
74
75 # in case converting from namespace
76 if isinstance(params, Namespace):
77 params = vars(params)
78 params = dict(params)
79
80 if parse_version(torch.__version__) < parse_version("1.3.0"):
81 warn(
82 f"Hyperparameter logging is not available for Torch version {torch.__version__}."
83 " Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable"
84 " hyperparameter logging."
85 )
86 else:
87 # `add_hparams` requires both - hparams and metric
88 self.experiment.add_hparams(hparam_dict=params, metric_dict={})
89 # some alternative should be added
90 self.tags.update(params)
91
92 @rank_zero_only
93 def log_metrics(self, metrics, step=None):
94 for k, v in metrics.items():
95 if isinstance(v, torch.Tensor):
96 v = v.item()
97 self.experiment.add_scalar(k, v, step)
98
99 @rank_zero_only
100 def save(self):
101 try:
102 self.experiment.flush()
103 except AttributeError:
104 # you are using PT version (<v1.2) which does not have implemented flush
105 self.experiment._get_file_writer().flush()
106
107 # create a preudo standard path ala test-tube
108 dir_path = os.path.join(self.save_dir, self.name, 'version_%s' % self.version)
109 if not os.path.isdir(dir_path):
110 dir_path = self.save_dir
111
112 # prepare the file path
113 meta_tags_path = os.path.join(dir_path, self.NAME_CSV_TAGS)
114
115 # save the metatags file
116 with open(meta_tags_path, 'w', newline='') as csvfile:
117 fieldnames = ['key', 'value']
118 writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
119 writer.writerow({'key': 'key', 'value': 'value'})
120 for k, v in self.tags.items():
121 writer.writerow({'key': k, 'value': v})
122
123 @rank_zero_only
124 def finalize(self, status):
125 self.save()
126
127 @property
128 def name(self):
129 return self._name
130
131 @property
132 def version(self):
133 if self._version is None:
134 self._version = self._get_next_version()
135 return self._version
136
137 def _get_next_version(self):
138 root_dir = os.path.join(self.save_dir, self.name)
139 existing_versions = []
140 for d in os.listdir(root_dir):
141 if os.path.isdir(os.path.join(root_dir, d)) and d.startswith("version_"):
142 existing_versions.append(int(d.split("_")[1]))
143
144 if len(existing_versions) == 0:
145 return 0
146
147 return max(existing_versions) + 1
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_lightning/loggers/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py
--- a/pytorch_lightning/loggers/tensorboard.py
+++ b/pytorch_lightning/loggers/tensorboard.py
@@ -84,8 +84,12 @@
" hyperparameter logging."
)
else:
- # `add_hparams` requires both - hparams and metric
- self.experiment.add_hparams(hparam_dict=params, metric_dict={})
+ from torch.utils.tensorboard.summary import hparams
+ exp, ssi, sei = hparams(params, {})
+ writer = self.experiment._get_file_writer()
+ writer.add_summary(exp)
+ writer.add_summary(ssi)
+ writer.add_summary(sei)
# some alternative should be added
self.tags.update(params)
| {"golden_diff": "diff --git a/pytorch_lightning/loggers/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py\n--- a/pytorch_lightning/loggers/tensorboard.py\n+++ b/pytorch_lightning/loggers/tensorboard.py\n@@ -84,8 +84,12 @@\n \" hyperparameter logging.\"\n )\n else:\n- # `add_hparams` requires both - hparams and metric\n- self.experiment.add_hparams(hparam_dict=params, metric_dict={})\n+ from torch.utils.tensorboard.summary import hparams\n+ exp, ssi, sei = hparams(params, {})\n+ writer = self.experiment._get_file_writer()\n+ writer.add_summary(exp)\n+ writer.add_summary(ssi)\n+ writer.add_summary(sei)\n # some alternative should be added\n self.tags.update(params)\n", "issue": "TensorBoardLogger creates another tfevents file.\n## \ud83d\udc1b Bug\r\nTensorBoardLogger creates another tfevents file when fit() is running.\r\nIt seems that no metrics are logged in the redundant file, but it will be shown in TensorBoard as a `run`.\r\nI don't do anything about loggers in my LightningModules.\r\n\r\n\r\n**Expected file structure:**\r\n```\r\n|\r\n|- save_dir\r\n| |- name\r\n| |- version_0\r\n| |- events.out.tfevents.1579833025.ip-xxx-xxx-xxx-xxx.17584.0\r\n| |- meta_tags.csv\r\n|- train.py\r\n```\r\n\r\n**Observed file structure:**\r\n```\r\n|\r\n|- save_dir\r\n| |- name\r\n| |- version_0\r\n| |- 1579833032\r\n| |- events.out.tfevents.1579833032.ip-xxx-xxx-xxx-xxx.17584.1\r\n| |- events.out.tfevents.1579833025.ip-xxx-xxx-xxx-xxx.17584.0\r\n| |- meta_tags.csv\r\n|- train.py\r\n```\r\n\r\n### To Reproduce / Code sample\r\nBasic training step of PyTorch Lightning:\r\n```python\r\n# define a logger\r\nlogger = TensorBoardLogger(\r\n save_dir='runs',\r\n name=args.model\r\n)\r\n\r\n# define callbacks\r\nckpt_path = Path(logger.experiment.log_dir) / 'ckpts'\r\ncheckpoint_callback = ModelCheckpoint(filepath=ckpt_path)\r\n\r\n# instantiate trainer\r\ntrainer = Trainer(\r\n logger=logger,\r\n checkpoint_callback=checkpoint_callback,\r\n gpus=args.gpus\r\n)\r\n\r\n# define a model\r\nmodel = CoolModel(args)\r\n\r\n# start training!\r\ntrainer.fit(model)\r\n```\r\n\r\n### Environment\r\n\r\n - PyTorch Lightning Version (e.g., 1.0): `0.6.0`\r\n - PyTorch Version (e.g., 1.0): `1.3.1`\r\n - OS (e.g., Linux): `Ubuntu 16.04 LTS`\r\n - How you installed PyTorch (`conda`, `pip`, source): `pip`\r\n - Build command you used (if compiling from source):\r\n - Python version: `3.7.4`\r\n - CUDA/cuDNN version: `10.1`\n", "before_files": [{"content": "import os\nfrom warnings import warn\nfrom argparse import Namespace\nfrom pkg_resources import parse_version\n\nimport torch\nimport csv\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom .base import LightningLoggerBase, rank_zero_only\n\n\nclass TensorBoardLogger(LightningLoggerBase):\n r\"\"\"\n\n Log to local file system in TensorBoard format\n\n Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to\n `os.path.join(save_dir, name, version)`\n\n Example\n --------\n\n .. code-block:: python\n\n logger = TensorBoardLogger(\"tb_logs\", name=\"my_model\")\n trainer = Trainer(logger=logger)\n trainer.train(model)\n\n Args:\n save_dir (str): Save directory\n name (str): Experiment name. Defaults to \"default\".\n version (int): Experiment version. If version is not specified the logger inspects the save\n directory for existing versions, then automatically assigns the next available version.\n \\**kwargs (dict): Other arguments are passed directly to the :class:`SummaryWriter` constructor.\n\n \"\"\"\n NAME_CSV_TAGS = 'meta_tags.csv'\n\n def __init__(self, save_dir, name=\"default\", version=None, **kwargs):\n super().__init__()\n self.save_dir = save_dir\n self._name = name\n self._version = version\n\n self._experiment = None\n self.tags = {}\n self.kwargs = kwargs\n\n @property\n def experiment(self):\n r\"\"\"\n\n Actual tensorboard object. To use tensorboard features do the following.\n\n Example::\n\n self.logger.experiment.some_tensorboard_function()\n\n \"\"\"\n if self._experiment is not None:\n return self._experiment\n\n root_dir = os.path.join(self.save_dir, self.name)\n os.makedirs(root_dir, exist_ok=True)\n log_dir = os.path.join(root_dir, \"version_\" + str(self.version))\n self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params):\n if params is None:\n return\n\n # in case converting from namespace\n if isinstance(params, Namespace):\n params = vars(params)\n params = dict(params)\n\n if parse_version(torch.__version__) < parse_version(\"1.3.0\"):\n warn(\n f\"Hyperparameter logging is not available for Torch version {torch.__version__}.\"\n \" Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable\"\n \" hyperparameter logging.\"\n )\n else:\n # `add_hparams` requires both - hparams and metric\n self.experiment.add_hparams(hparam_dict=params, metric_dict={})\n # some alternative should be added\n self.tags.update(params)\n\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n for k, v in metrics.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n self.experiment.add_scalar(k, v, step)\n\n @rank_zero_only\n def save(self):\n try:\n self.experiment.flush()\n except AttributeError:\n # you are using PT version (<v1.2) which does not have implemented flush\n self.experiment._get_file_writer().flush()\n\n # create a preudo standard path ala test-tube\n dir_path = os.path.join(self.save_dir, self.name, 'version_%s' % self.version)\n if not os.path.isdir(dir_path):\n dir_path = self.save_dir\n\n # prepare the file path\n meta_tags_path = os.path.join(dir_path, self.NAME_CSV_TAGS)\n\n # save the metatags file\n with open(meta_tags_path, 'w', newline='') as csvfile:\n fieldnames = ['key', 'value']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow({'key': 'key', 'value': 'value'})\n for k, v in self.tags.items():\n writer.writerow({'key': k, 'value': v})\n\n @rank_zero_only\n def finalize(self, status):\n self.save()\n\n @property\n def name(self):\n return self._name\n\n @property\n def version(self):\n if self._version is None:\n self._version = self._get_next_version()\n return self._version\n\n def _get_next_version(self):\n root_dir = os.path.join(self.save_dir, self.name)\n existing_versions = []\n for d in os.listdir(root_dir):\n if os.path.isdir(os.path.join(root_dir, d)) and d.startswith(\"version_\"):\n existing_versions.append(int(d.split(\"_\")[1]))\n\n if len(existing_versions) == 0:\n return 0\n\n return max(existing_versions) + 1\n", "path": "pytorch_lightning/loggers/tensorboard.py"}], "after_files": [{"content": "import os\nfrom warnings import warn\nfrom argparse import Namespace\nfrom pkg_resources import parse_version\n\nimport torch\nimport csv\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom .base import LightningLoggerBase, rank_zero_only\n\n\nclass TensorBoardLogger(LightningLoggerBase):\n r\"\"\"\n\n Log to local file system in TensorBoard format\n\n Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to\n `os.path.join(save_dir, name, version)`\n\n Example\n --------\n\n .. code-block:: python\n\n logger = TensorBoardLogger(\"tb_logs\", name=\"my_model\")\n trainer = Trainer(logger=logger)\n trainer.train(model)\n\n Args:\n save_dir (str): Save directory\n name (str): Experiment name. Defaults to \"default\".\n version (int): Experiment version. If version is not specified the logger inspects the save\n directory for existing versions, then automatically assigns the next available version.\n \\**kwargs (dict): Other arguments are passed directly to the :class:`SummaryWriter` constructor.\n\n \"\"\"\n NAME_CSV_TAGS = 'meta_tags.csv'\n\n def __init__(self, save_dir, name=\"default\", version=None, **kwargs):\n super().__init__()\n self.save_dir = save_dir\n self._name = name\n self._version = version\n\n self._experiment = None\n self.tags = {}\n self.kwargs = kwargs\n\n @property\n def experiment(self):\n r\"\"\"\n\n Actual tensorboard object. To use tensorboard features do the following.\n\n Example::\n\n self.logger.experiment.some_tensorboard_function()\n\n \"\"\"\n if self._experiment is not None:\n return self._experiment\n\n root_dir = os.path.join(self.save_dir, self.name)\n os.makedirs(root_dir, exist_ok=True)\n log_dir = os.path.join(root_dir, \"version_\" + str(self.version))\n self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params):\n if params is None:\n return\n\n # in case converting from namespace\n if isinstance(params, Namespace):\n params = vars(params)\n params = dict(params)\n\n if parse_version(torch.__version__) < parse_version(\"1.3.0\"):\n warn(\n f\"Hyperparameter logging is not available for Torch version {torch.__version__}.\"\n \" Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable\"\n \" hyperparameter logging.\"\n )\n else:\n from torch.utils.tensorboard.summary import hparams\n exp, ssi, sei = hparams(params, {})\n writer = self.experiment._get_file_writer()\n writer.add_summary(exp)\n writer.add_summary(ssi)\n writer.add_summary(sei)\n # some alternative should be added\n self.tags.update(params)\n\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n for k, v in metrics.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n self.experiment.add_scalar(k, v, step)\n\n @rank_zero_only\n def save(self):\n try:\n self.experiment.flush()\n except AttributeError:\n # you are using PT version (<v1.2) which does not have implemented flush\n self.experiment._get_file_writer().flush()\n\n # create a preudo standard path ala test-tube\n dir_path = os.path.join(self.save_dir, self.name, 'version_%s' % self.version)\n if not os.path.isdir(dir_path):\n dir_path = self.save_dir\n\n # prepare the file path\n meta_tags_path = os.path.join(dir_path, self.NAME_CSV_TAGS)\n\n # save the metatags file\n with open(meta_tags_path, 'w', newline='') as csvfile:\n fieldnames = ['key', 'value']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow({'key': 'key', 'value': 'value'})\n for k, v in self.tags.items():\n writer.writerow({'key': k, 'value': v})\n\n @rank_zero_only\n def finalize(self, status):\n self.save()\n\n @property\n def name(self):\n return self._name\n\n @property\n def version(self):\n if self._version is None:\n self._version = self._get_next_version()\n return self._version\n\n def _get_next_version(self):\n root_dir = os.path.join(self.save_dir, self.name)\n existing_versions = []\n for d in os.listdir(root_dir):\n if os.path.isdir(os.path.join(root_dir, d)) and d.startswith(\"version_\"):\n existing_versions.append(int(d.split(\"_\")[1]))\n\n if len(existing_versions) == 0:\n return 0\n\n return max(existing_versions) + 1\n", "path": "pytorch_lightning/loggers/tensorboard.py"}]} | 2,165 | 184 |
gh_patches_debug_27961 | rasdani/github-patches | git_diff | sunpy__sunpy-6926 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a "How do I..." page to our documentation
<!--
We know asking good questions takes effort, and we appreciate your time.
Thank you.
Please be aware that everyone has to follow our code of conduct:
https://sunpy.org/coc
These comments are hidden when you submit this github issue.
Please have a search on our GitHub repository to see if a similar issue has already been posted.
If a similar issue is closed, have a quick look to see if you are satisfied by the resolution.
If not please go ahead and open an issue!
-->
<!--
Provide a general description of the feature you would like.
If you prefer, you can also suggest a draft design or API.
-->
e.g. this page from the xarray docs: http://xarray.pydata.org/en/stable/howdoi.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/acquiring_data/searching_multiple_wavelengths.py`
Content:
```
1 """
2 ==============================================
3 Searching for multiple wavelengths with Fido
4 ==============================================
5
6 This example shows how you can search for several wavelengths of AIA data with Fido.
7 """
8 from astropy import units as u
9
10 from sunpy.net import Fido
11 from sunpy.net import attrs as a
12
13 ###############################################################################
14 # Here we are demonstrating how you can search for specific wavelengths of
15 # AIA data using `Fido <sunpy.net.fido_factory.UnifiedDownloaderFactory>`
16 # and the `sunpy.net.attrs.AttrOr` function.
17 # For example, you may only want a single wavelength, say 171 Angstrom:
18
19 aia_search = Fido.search(a.Time("2022-02-20 00:00", "2022-02-20 00:01"),
20 a.Instrument("AIA"),
21 a.Wavelength(171*u.angstrom))
22
23 print(aia_search)
24
25 ###############################################################################
26 # But say you actually want to search for several wavelengths, rather than just one.
27 # You could use the "|" operator, or instead you can use the `sunpy.net.attrs.AttrOr`
28 # function.
29
30 wavelengths = [94, 131, 171, 193, 211]*u.angstrom
31 aia_search = Fido.search(a.Time("2022-02-20 00:00", "2022-02-20 00:01"),
32 a.Instrument("AIA"),
33 a.AttrOr([a.Wavelength(wav) for wav in wavelengths]))
34
35 print(aia_search)
36
37 # This returns several searches for each of the wavelengths, which can be indexed.
38 # Here the first index is that of 94 angstrom.
39 print(aia_search[0])
40
41 ###############################################################################
42 # You can then pass the `Fido <sunpy.net.fido_factory.UnifiedDownloaderFactory>`
43 # result to :meth:`Fido.fetch <sunpy.net.fido_factory.UnifiedDownloaderFactory.fetch>`
44 # to download the data, i.e., ``Fido.fetch(aia_search)``.
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/acquiring_data/searching_multiple_wavelengths.py b/examples/acquiring_data/searching_multiple_wavelengths.py
deleted file mode 100644
--- a/examples/acquiring_data/searching_multiple_wavelengths.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""
-==============================================
-Searching for multiple wavelengths with Fido
-==============================================
-
-This example shows how you can search for several wavelengths of AIA data with Fido.
-"""
-from astropy import units as u
-
-from sunpy.net import Fido
-from sunpy.net import attrs as a
-
-###############################################################################
-# Here we are demonstrating how you can search for specific wavelengths of
-# AIA data using `Fido <sunpy.net.fido_factory.UnifiedDownloaderFactory>`
-# and the `sunpy.net.attrs.AttrOr` function.
-# For example, you may only want a single wavelength, say 171 Angstrom:
-
-aia_search = Fido.search(a.Time("2022-02-20 00:00", "2022-02-20 00:01"),
- a.Instrument("AIA"),
- a.Wavelength(171*u.angstrom))
-
-print(aia_search)
-
-###############################################################################
-# But say you actually want to search for several wavelengths, rather than just one.
-# You could use the "|" operator, or instead you can use the `sunpy.net.attrs.AttrOr`
-# function.
-
-wavelengths = [94, 131, 171, 193, 211]*u.angstrom
-aia_search = Fido.search(a.Time("2022-02-20 00:00", "2022-02-20 00:01"),
- a.Instrument("AIA"),
- a.AttrOr([a.Wavelength(wav) for wav in wavelengths]))
-
-print(aia_search)
-
-# This returns several searches for each of the wavelengths, which can be indexed.
-# Here the first index is that of 94 angstrom.
-print(aia_search[0])
-
-###############################################################################
-# You can then pass the `Fido <sunpy.net.fido_factory.UnifiedDownloaderFactory>`
-# result to :meth:`Fido.fetch <sunpy.net.fido_factory.UnifiedDownloaderFactory.fetch>`
-# to download the data, i.e., ``Fido.fetch(aia_search)``.
| {"golden_diff": "diff --git a/examples/acquiring_data/searching_multiple_wavelengths.py b/examples/acquiring_data/searching_multiple_wavelengths.py\ndeleted file mode 100644\n--- a/examples/acquiring_data/searching_multiple_wavelengths.py\n+++ /dev/null\n@@ -1,44 +0,0 @@\n-\"\"\"\n-==============================================\n-Searching for multiple wavelengths with Fido\n-==============================================\n-\n-This example shows how you can search for several wavelengths of AIA data with Fido.\n-\"\"\"\n-from astropy import units as u\n-\n-from sunpy.net import Fido\n-from sunpy.net import attrs as a\n-\n-###############################################################################\n-# Here we are demonstrating how you can search for specific wavelengths of\n-# AIA data using `Fido <sunpy.net.fido_factory.UnifiedDownloaderFactory>`\n-# and the `sunpy.net.attrs.AttrOr` function.\n-# For example, you may only want a single wavelength, say 171 Angstrom:\n-\n-aia_search = Fido.search(a.Time(\"2022-02-20 00:00\", \"2022-02-20 00:01\"),\n- a.Instrument(\"AIA\"),\n- a.Wavelength(171*u.angstrom))\n-\n-print(aia_search)\n-\n-###############################################################################\n-# But say you actually want to search for several wavelengths, rather than just one.\n-# You could use the \"|\" operator, or instead you can use the `sunpy.net.attrs.AttrOr`\n-# function.\n-\n-wavelengths = [94, 131, 171, 193, 211]*u.angstrom\n-aia_search = Fido.search(a.Time(\"2022-02-20 00:00\", \"2022-02-20 00:01\"),\n- a.Instrument(\"AIA\"),\n- a.AttrOr([a.Wavelength(wav) for wav in wavelengths]))\n-\n-print(aia_search)\n-\n-# This returns several searches for each of the wavelengths, which can be indexed.\n-# Here the first index is that of 94 angstrom.\n-print(aia_search[0])\n-\n-###############################################################################\n-# You can then pass the `Fido <sunpy.net.fido_factory.UnifiedDownloaderFactory>`\n-# result to :meth:`Fido.fetch <sunpy.net.fido_factory.UnifiedDownloaderFactory.fetch>`\n-# to download the data, i.e., ``Fido.fetch(aia_search)``.\n", "issue": "Add a \"How do I...\" page to our documentation\n<!--\r\nWe know asking good questions takes effort, and we appreciate your time.\r\nThank you.\r\n\r\nPlease be aware that everyone has to follow our code of conduct:\r\nhttps://sunpy.org/coc\r\n\r\nThese comments are hidden when you submit this github issue.\r\n\r\nPlease have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue!\r\n-->\r\n\r\n\r\n<!--\r\nProvide a general description of the feature you would like.\r\nIf you prefer, you can also suggest a draft design or API.\r\n-->\r\n\r\ne.g. this page from the xarray docs: http://xarray.pydata.org/en/stable/howdoi.html\r\n\n", "before_files": [{"content": "\"\"\"\n==============================================\nSearching for multiple wavelengths with Fido\n==============================================\n\nThis example shows how you can search for several wavelengths of AIA data with Fido.\n\"\"\"\nfrom astropy import units as u\n\nfrom sunpy.net import Fido\nfrom sunpy.net import attrs as a\n\n###############################################################################\n# Here we are demonstrating how you can search for specific wavelengths of\n# AIA data using `Fido <sunpy.net.fido_factory.UnifiedDownloaderFactory>`\n# and the `sunpy.net.attrs.AttrOr` function.\n# For example, you may only want a single wavelength, say 171 Angstrom:\n\naia_search = Fido.search(a.Time(\"2022-02-20 00:00\", \"2022-02-20 00:01\"),\n a.Instrument(\"AIA\"),\n a.Wavelength(171*u.angstrom))\n\nprint(aia_search)\n\n###############################################################################\n# But say you actually want to search for several wavelengths, rather than just one.\n# You could use the \"|\" operator, or instead you can use the `sunpy.net.attrs.AttrOr`\n# function.\n\nwavelengths = [94, 131, 171, 193, 211]*u.angstrom\naia_search = Fido.search(a.Time(\"2022-02-20 00:00\", \"2022-02-20 00:01\"),\n a.Instrument(\"AIA\"),\n a.AttrOr([a.Wavelength(wav) for wav in wavelengths]))\n\nprint(aia_search)\n\n# This returns several searches for each of the wavelengths, which can be indexed.\n# Here the first index is that of 94 angstrom.\nprint(aia_search[0])\n\n###############################################################################\n# You can then pass the `Fido <sunpy.net.fido_factory.UnifiedDownloaderFactory>`\n# result to :meth:`Fido.fetch <sunpy.net.fido_factory.UnifiedDownloaderFactory.fetch>`\n# to download the data, i.e., ``Fido.fetch(aia_search)``.\n", "path": "examples/acquiring_data/searching_multiple_wavelengths.py"}], "after_files": [{"content": null, "path": "examples/acquiring_data/searching_multiple_wavelengths.py"}]} | 974 | 551 |
gh_patches_debug_2798 | rasdani/github-patches | git_diff | svthalia__concrexit-1739 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Renewal of membership send email about references without reason
### Describe the bug
A renewal of a membership to the 'until study end' triggered the email for references.
### How to reproduce
Steps to reproduce the behaviour:
1. Create a registration for a completely new user
2. Create an upgrade for the membership
3. Get the email
### Expected behaviour
No email should be received for references.
### Screenshots
<img width="929" alt="Screenshot 2021-06-20 at 15 20 10" src="https://user-images.githubusercontent.com/1799914/122675750-2a245800-d1db-11eb-93eb-d02abc0f63d1.png">
<img width="656" alt="Screenshot 2021-06-20 at 15 20 20" src="https://user-images.githubusercontent.com/1799914/122675752-2b558500-d1db-11eb-9763-3c1f622826e7.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/registrations/forms.py`
Content:
```
1 """The forms defined by the registrations package."""
2 from django import forms
3 from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
4 from django.forms import TypedChoiceField
5 from django.urls import reverse_lazy
6 from django.utils import timezone
7 from django.utils.safestring import mark_safe
8 from django.utils.text import capfirst
9 from django.utils.translation import gettext_lazy as _
10
11 from members.models import Membership
12 from payments.widgets import SignatureWidget
13 from registrations import services
14 from .models import Registration, Renewal, Reference
15
16
17 class BaseRegistrationForm(forms.ModelForm):
18 """Base form for membership registrations."""
19
20 birthday = forms.DateField(
21 widget=forms.widgets.SelectDateWidget(
22 years=range(timezone.now().year - 50, timezone.now().year - 10)
23 ),
24 label=capfirst(_("birthday")),
25 )
26
27 privacy_policy = forms.BooleanField(required=True,)
28
29 def __init__(self, *args, **kwargs):
30 super().__init__(*args, **kwargs)
31 self.fields["privacy_policy"].label = mark_safe(
32 _('I accept the <a href="{}">privacy policy</a>.').format(
33 reverse_lazy("singlepages:privacy-policy")
34 )
35 )
36
37
38 class RegistrationAdminForm(forms.ModelForm):
39 """Custom admin form for Registration model to add the widget for the signature."""
40
41 class Meta:
42 fields = "__all__"
43 model = Registration
44 widgets = {
45 "signature": SignatureWidget(),
46 }
47
48
49 class MemberRegistrationForm(BaseRegistrationForm):
50 """Form for member registrations."""
51
52 this_year = timezone.now().year
53 years = reversed(
54 [(x, "{} - {}".format(x, x + 1)) for x in range(this_year - 20, this_year + 1)]
55 )
56
57 starting_year = TypedChoiceField(
58 choices=years,
59 coerce=int,
60 empty_value=this_year,
61 required=False,
62 help_text=_("What lecture year did you start studying at Radboud University?"),
63 )
64
65 class Meta:
66 model = Registration
67 widgets = {
68 "signature": SignatureWidget(),
69 }
70 fields = (
71 "length",
72 "first_name",
73 "last_name",
74 "birthday",
75 "email",
76 "phone_number",
77 "student_number",
78 "programme",
79 "starting_year",
80 "address_street",
81 "address_street2",
82 "address_postal_code",
83 "address_city",
84 "address_country",
85 "optin_birthday",
86 "optin_mailinglist",
87 "membership_type",
88 "direct_debit",
89 "initials",
90 "iban",
91 "bic",
92 "signature",
93 )
94
95
96 class BenefactorRegistrationForm(BaseRegistrationForm):
97 """Form for benefactor registrations."""
98
99 icis_employee = forms.BooleanField(
100 required=False, label=_("I am an employee of iCIS")
101 )
102
103 class Meta:
104 model = Registration
105 widgets = {
106 "signature": SignatureWidget(),
107 }
108 fields = (
109 "length",
110 "first_name",
111 "last_name",
112 "birthday",
113 "email",
114 "phone_number",
115 "student_number",
116 "address_street",
117 "address_street2",
118 "address_postal_code",
119 "address_city",
120 "address_country",
121 "optin_birthday",
122 "optin_mailinglist",
123 "contribution",
124 "membership_type",
125 "direct_debit",
126 "initials",
127 "iban",
128 "bic",
129 "signature",
130 )
131
132
133 class RenewalForm(forms.ModelForm):
134 """Form for membership renewals."""
135
136 privacy_policy = forms.BooleanField(required=True,)
137
138 icis_employee = forms.BooleanField(
139 required=False, label=_("I am an employee of iCIS")
140 )
141
142 contribution = forms.IntegerField(required=False,)
143
144 def __init__(self, *args, **kwargs):
145 super().__init__(*args, **kwargs)
146 self.fields["privacy_policy"].label = mark_safe(
147 _('I accept the <a href="{}">privacy policy</a>.').format(
148 reverse_lazy("singlepages:privacy-policy")
149 )
150 )
151
152 class Meta:
153 model = Renewal
154 fields = ("member", "length", "contribution", "membership_type")
155
156
157 class ReferenceForm(forms.ModelForm):
158 def clean(self):
159 super().clean()
160 membership = self.cleaned_data["member"].current_membership
161 if membership and membership.type == Membership.BENEFACTOR:
162 raise ValidationError(_("Benefactors cannot give references."))
163
164 membership = self.cleaned_data["member"].latest_membership
165 if (
166 membership
167 and membership.until
168 and membership.until < services.calculate_membership_since()
169 ):
170 raise ValidationError(
171 _(
172 "It's not possible to give references for "
173 "memberships that start after your own "
174 "membership's end."
175 )
176 )
177
178 class Meta:
179 model = Reference
180 fields = "__all__"
181 error_messages = {
182 NON_FIELD_ERRORS: {
183 "unique_together": _(
184 "You've already given a reference for this person."
185 ),
186 }
187 }
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/registrations/forms.py b/website/registrations/forms.py
--- a/website/registrations/forms.py
+++ b/website/registrations/forms.py
@@ -151,7 +151,14 @@
class Meta:
model = Renewal
- fields = ("member", "length", "contribution", "membership_type")
+ fields = (
+ "member",
+ "length",
+ "contribution",
+ "membership_type",
+ "no_references",
+ "remarks",
+ )
class ReferenceForm(forms.ModelForm):
| {"golden_diff": "diff --git a/website/registrations/forms.py b/website/registrations/forms.py\n--- a/website/registrations/forms.py\n+++ b/website/registrations/forms.py\n@@ -151,7 +151,14 @@\n \n class Meta:\n model = Renewal\n- fields = (\"member\", \"length\", \"contribution\", \"membership_type\")\n+ fields = (\n+ \"member\",\n+ \"length\",\n+ \"contribution\",\n+ \"membership_type\",\n+ \"no_references\",\n+ \"remarks\",\n+ )\n \n \n class ReferenceForm(forms.ModelForm):\n", "issue": "Renewal of membership send email about references without reason\n### Describe the bug\r\nA renewal of a membership to the 'until study end' triggered the email for references.\r\n\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Create a registration for a completely new user\r\n2. Create an upgrade for the membership\r\n3. Get the email\r\n\r\n### Expected behaviour\r\n\r\nNo email should be received for references.\r\n\r\n### Screenshots\r\n<img width=\"929\" alt=\"Screenshot 2021-06-20 at 15 20 10\" src=\"https://user-images.githubusercontent.com/1799914/122675750-2a245800-d1db-11eb-93eb-d02abc0f63d1.png\">\r\n<img width=\"656\" alt=\"Screenshot 2021-06-20 at 15 20 20\" src=\"https://user-images.githubusercontent.com/1799914/122675752-2b558500-d1db-11eb-9763-3c1f622826e7.png\">\r\n\r\n\n", "before_files": [{"content": "\"\"\"The forms defined by the registrations package.\"\"\"\nfrom django import forms\nfrom django.core.exceptions import NON_FIELD_ERRORS, ValidationError\nfrom django.forms import TypedChoiceField\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import capfirst\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Membership\nfrom payments.widgets import SignatureWidget\nfrom registrations import services\nfrom .models import Registration, Renewal, Reference\n\n\nclass BaseRegistrationForm(forms.ModelForm):\n \"\"\"Base form for membership registrations.\"\"\"\n\n birthday = forms.DateField(\n widget=forms.widgets.SelectDateWidget(\n years=range(timezone.now().year - 50, timezone.now().year - 10)\n ),\n label=capfirst(_(\"birthday\")),\n )\n\n privacy_policy = forms.BooleanField(required=True,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n\n\nclass RegistrationAdminForm(forms.ModelForm):\n \"\"\"Custom admin form for Registration model to add the widget for the signature.\"\"\"\n\n class Meta:\n fields = \"__all__\"\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n\n\nclass MemberRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for member registrations.\"\"\"\n\n this_year = timezone.now().year\n years = reversed(\n [(x, \"{} - {}\".format(x, x + 1)) for x in range(this_year - 20, this_year + 1)]\n )\n\n starting_year = TypedChoiceField(\n choices=years,\n coerce=int,\n empty_value=this_year,\n required=False,\n help_text=_(\"What lecture year did you start studying at Radboud University?\"),\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"programme\",\n \"starting_year\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass BenefactorRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for benefactor registrations.\"\"\"\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"contribution\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass RenewalForm(forms.ModelForm):\n \"\"\"Form for membership renewals.\"\"\"\n\n privacy_policy = forms.BooleanField(required=True,)\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n contribution = forms.IntegerField(required=False,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n\n class Meta:\n model = Renewal\n fields = (\"member\", \"length\", \"contribution\", \"membership_type\")\n\n\nclass ReferenceForm(forms.ModelForm):\n def clean(self):\n super().clean()\n membership = self.cleaned_data[\"member\"].current_membership\n if membership and membership.type == Membership.BENEFACTOR:\n raise ValidationError(_(\"Benefactors cannot give references.\"))\n\n membership = self.cleaned_data[\"member\"].latest_membership\n if (\n membership\n and membership.until\n and membership.until < services.calculate_membership_since()\n ):\n raise ValidationError(\n _(\n \"It's not possible to give references for \"\n \"memberships that start after your own \"\n \"membership's end.\"\n )\n )\n\n class Meta:\n model = Reference\n fields = \"__all__\"\n error_messages = {\n NON_FIELD_ERRORS: {\n \"unique_together\": _(\n \"You've already given a reference for this person.\"\n ),\n }\n }\n", "path": "website/registrations/forms.py"}], "after_files": [{"content": "\"\"\"The forms defined by the registrations package.\"\"\"\nfrom django import forms\nfrom django.core.exceptions import NON_FIELD_ERRORS, ValidationError\nfrom django.forms import TypedChoiceField\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import capfirst\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Membership\nfrom payments.widgets import SignatureWidget\nfrom registrations import services\nfrom .models import Registration, Renewal, Reference\n\n\nclass BaseRegistrationForm(forms.ModelForm):\n \"\"\"Base form for membership registrations.\"\"\"\n\n birthday = forms.DateField(\n widget=forms.widgets.SelectDateWidget(\n years=range(timezone.now().year - 50, timezone.now().year - 10)\n ),\n label=capfirst(_(\"birthday\")),\n )\n\n privacy_policy = forms.BooleanField(required=True,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n\n\nclass RegistrationAdminForm(forms.ModelForm):\n \"\"\"Custom admin form for Registration model to add the widget for the signature.\"\"\"\n\n class Meta:\n fields = \"__all__\"\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n\n\nclass MemberRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for member registrations.\"\"\"\n\n this_year = timezone.now().year\n years = reversed(\n [(x, \"{} - {}\".format(x, x + 1)) for x in range(this_year - 20, this_year + 1)]\n )\n\n starting_year = TypedChoiceField(\n choices=years,\n coerce=int,\n empty_value=this_year,\n required=False,\n help_text=_(\"What lecture year did you start studying at Radboud University?\"),\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"programme\",\n \"starting_year\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass BenefactorRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for benefactor registrations.\"\"\"\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"contribution\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass RenewalForm(forms.ModelForm):\n \"\"\"Form for membership renewals.\"\"\"\n\n privacy_policy = forms.BooleanField(required=True,)\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n contribution = forms.IntegerField(required=False,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n\n class Meta:\n model = Renewal\n fields = (\n \"member\",\n \"length\",\n \"contribution\",\n \"membership_type\",\n \"no_references\",\n \"remarks\",\n )\n\n\nclass ReferenceForm(forms.ModelForm):\n def clean(self):\n super().clean()\n membership = self.cleaned_data[\"member\"].current_membership\n if membership and membership.type == Membership.BENEFACTOR:\n raise ValidationError(_(\"Benefactors cannot give references.\"))\n\n membership = self.cleaned_data[\"member\"].latest_membership\n if (\n membership\n and membership.until\n and membership.until < services.calculate_membership_since()\n ):\n raise ValidationError(\n _(\n \"It's not possible to give references for \"\n \"memberships that start after your own \"\n \"membership's end.\"\n )\n )\n\n class Meta:\n model = Reference\n fields = \"__all__\"\n error_messages = {\n NON_FIELD_ERRORS: {\n \"unique_together\": _(\n \"You've already given a reference for this person.\"\n ),\n }\n }\n", "path": "website/registrations/forms.py"}]} | 2,074 | 132 |
gh_patches_debug_61059 | rasdani/github-patches | git_diff | google__turbinia-602 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Configuration file not behaving as expected
I was struggling a bit today with having the configuration file mapped to what I needed it to be (for launching dftimewolf with a one-off configuration of Turbinia).
My ~/.turbiniarc is set to what I want, but the config is still picked up from "whatever file it can find" in the directory pointed to by `TURBINIA_CONFIG_PATH` (specified in `ENVCONFIGVAR`)
This happens because when importing `evidence` (e.g. `from turbinia import evidence`), `LoadConfig` is called with no parameters, and thus populates the config with whatever files it can find there. Subsequent calls to `LoadConfig`, even when passing a `config_file` will still return this first configuration because it has already been loaded.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/config/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2016 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Basic Turbinia config."""
16
17 from __future__ import unicode_literals
18
19 import imp
20 import itertools
21 import logging
22 import os
23 import sys
24
25 from turbinia import TurbiniaException
26
27 DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
28
29 # Look for config files with these names
30 CONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config_tmpl.py']
31 # Look in homedir first, then /etc/turbinia
32 CONFIGPATH = [
33 os.path.expanduser('~'),
34 '/etc/turbinia',
35 os.path.dirname(os.path.abspath(__file__)),
36 ]
37 # Config setup reminder for cleaner error handling on empty configs.
38 CONFIG_MSG = (
39 'Copy turbinia/config/turbinia_config_tmpl.py to ~/.turbiniarc '
40 'or /etc/turbinia/turbinia.conf, edit, and re-run.')
41
42 # Required config vars
43 REQUIRED_VARS = [
44 # Turbinia Config
45 'INSTANCE_ID',
46 'STATE_MANAGER',
47 'TASK_MANAGER',
48 'LOG_FILE',
49 'LOCK_FILE',
50 'OUTPUT_DIR',
51 'TMP_DIR',
52 'SLEEP_TIME',
53 'SINGLE_RUN',
54 'MOUNT_DIR_PREFIX',
55 'SHARED_FILESYSTEM',
56 'DEBUG_TASKS',
57 'DEPENDENCIES',
58 'DOCKER_ENABLED',
59 'DISABLED_JOBS',
60 ]
61
62 # Optional config vars. Some may be mandatory depending on the configuration
63 # (e.g. if TASK_MANAGER is set to 'PSQ', then the GCE Config variables are
64 # required), but these requirements are not enforced.
65 OPTIONAL_VARS = [
66 # GCE CONFIG
67 'TURBINIA_PROJECT',
68 'TURBINIA_ZONE',
69 'TURBINIA_REGION',
70 'BUCKET_NAME',
71 'PSQ_TOPIC',
72 'PUBSUB_TOPIC',
73 'GCS_OUTPUT_PATH',
74 'STACKDRIVER_LOGGING',
75 'STACKDRIVER_TRACEBACK',
76 # REDIS CONFIG
77 'REDIS_HOST',
78 'REDIS_PORT',
79 'REDIS_DB',
80 # Celery config
81 'CELERY_BROKER',
82 'CELERY_BACKEND',
83 'KOMBU_BROKER',
84 'KOMBU_CHANNEL',
85 'KOMBU_DURABLE',
86 # Email config
87 'EMAIL_NOTIFICATIONS',
88 'EMAIL_HOST_ADDRESS',
89 'EMAIL_PORT',
90 'EMAIL_ADDRESS',
91 'EMAIL_PASSWORD',
92 ]
93
94 # Environment variable to look for path data in
95 ENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'
96
97 CONFIG = None
98
99 log = logging.getLogger('turbinia')
100
101
102 def LoadConfig(config_file=None):
103 """Finds Turbinia config file and loads it.
104
105 Args:
106 config_file(str): full path to config file
107 """
108 # TODO(aarontp): Find way to not require global var here. Maybe a singleton
109 # pattern on the config class.
110 # pylint: disable=global-statement
111 global CONFIG
112 if CONFIG:
113 log.debug(
114 'Returning cached config from {0:s} instead of reloading config'.format(
115 CONFIG.configSource))
116 return CONFIG
117
118 if not config_file:
119 log.debug('No config specified. Looking in default locations for config.')
120 # If the environment variable is set, take precedence over the pre-defined
121 # CONFIGPATHs.
122 configpath = CONFIGPATH
123 if ENVCONFIGVAR in os.environ:
124 configpath = os.environ[ENVCONFIGVAR].split(':')
125
126 # Load first file found
127 for _dir, _file in itertools.product(configpath, CONFIGFILES):
128 if os.path.exists(os.path.join(_dir, _file)):
129 config_file = os.path.join(_dir, _file)
130 break
131
132 if config_file is None:
133 raise TurbiniaException('No config files found')
134
135 log.debug('Loading config from {0:s}'.format(config_file))
136 # Warn about using fallback source config, but it's currently necessary for
137 # tests. See issue #446.
138 if 'turbinia_config_tmpl' in config_file:
139 log.warning('Using fallback source config. {0:s}'.format(CONFIG_MSG))
140 try:
141 _config = imp.load_source('config', config_file)
142 except IOError as exception:
143 message = (
144 'Could not load config file {0:s}: {1!s}'.format(
145 config_file, exception))
146 log.error(message)
147 raise TurbiniaException(message)
148
149 _config.configSource = config_file
150 ValidateAndSetConfig(_config)
151
152 # Set the environment var for this so that we don't see the "No project ID
153 # could be determined." warning later.
154 if hasattr(_config, 'TURBINIA_PROJECT') and _config.TURBINIA_PROJECT:
155 os.environ['GOOGLE_CLOUD_PROJECT'] = _config.TURBINIA_PROJECT
156
157 CONFIG = _config
158 log.debug(
159 'Returning parsed config loaded from {0:s}'.format(CONFIG.configSource))
160 return _config
161
162
163 def ValidateAndSetConfig(_config):
164 """Makes sure that the config has the vars loaded and set in the module."""
165 # Explicitly set the config path
166 setattr(sys.modules[__name__], 'configSource', _config.configSource)
167
168 CONFIGVARS = REQUIRED_VARS + OPTIONAL_VARS
169 for var in CONFIGVARS:
170 empty_value = False
171 if not hasattr(_config, var):
172 if var in OPTIONAL_VARS:
173 log.debug(
174 'Setting non-existent but optional config variable {0:s} to '
175 'None'.format(var))
176 empty_value = True
177 else:
178 raise TurbiniaException(
179 'Required config attribute {0:s}:{1:s} not in config'.format(
180 _config.configSource, var))
181 if var in REQUIRED_VARS and getattr(_config, var) is None:
182 raise TurbiniaException(
183 'Config attribute {0:s}:{1:s} is not set'.format(
184 _config.configSource, var))
185
186 # Set the attribute in the current module
187 if empty_value:
188 setattr(sys.modules[__name__], var, None)
189 else:
190 setattr(sys.modules[__name__], var, getattr(_config, var))
191
192
193 def ParseDependencies():
194 """Parses the config file DEPENDENCIES variable.
195
196 Raises:
197 TurbiniaException: If bad config file.
198
199 Returns:
200 dependencies(dict): The parsed dependency values.
201 """
202 dependencies = {}
203 try:
204 for values in CONFIG.DEPENDENCIES:
205 job = values['job'].lower()
206 dependencies[job] = {}
207 dependencies[job]['programs'] = values['programs']
208 dependencies[job]['docker_image'] = values.get('docker_image')
209 except (KeyError, TypeError) as exception:
210 raise TurbiniaException(
211 'An issue has occurred while parsing the '
212 'dependency config: {0!s}'.format(exception))
213 return dependencies
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/turbinia/config/__init__.py b/turbinia/config/__init__.py
--- a/turbinia/config/__init__.py
+++ b/turbinia/config/__init__.py
@@ -109,7 +109,7 @@
# pattern on the config class.
# pylint: disable=global-statement
global CONFIG
- if CONFIG:
+ if CONFIG and not config_file:
log.debug(
'Returning cached config from {0:s} instead of reloading config'.format(
CONFIG.configSource))
| {"golden_diff": "diff --git a/turbinia/config/__init__.py b/turbinia/config/__init__.py\n--- a/turbinia/config/__init__.py\n+++ b/turbinia/config/__init__.py\n@@ -109,7 +109,7 @@\n # pattern on the config class.\n # pylint: disable=global-statement\n global CONFIG\n- if CONFIG:\n+ if CONFIG and not config_file:\n log.debug(\n 'Returning cached config from {0:s} instead of reloading config'.format(\n CONFIG.configSource))\n", "issue": "Configuration file not behaving as expected\nI was struggling a bit today with having the configuration file mapped to what I needed it to be (for launching dftimewolf with a one-off configuration of Turbinia).\r\n\r\nMy ~/.turbiniarc is set to what I want, but the config is still picked up from \"whatever file it can find\" in the directory pointed to by `TURBINIA_CONFIG_PATH` (specified in `ENVCONFIGVAR`)\r\n\r\nThis happens because when importing `evidence` (e.g. `from turbinia import evidence`), `LoadConfig` is called with no parameters, and thus populates the config with whatever files it can find there. Subsequent calls to `LoadConfig`, even when passing a `config_file` will still return this first configuration because it has already been loaded.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic Turbinia config.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport imp\nimport itertools\nimport logging\nimport os\nimport sys\n\nfrom turbinia import TurbiniaException\n\nDATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'\n\n# Look for config files with these names\nCONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config_tmpl.py']\n# Look in homedir first, then /etc/turbinia\nCONFIGPATH = [\n os.path.expanduser('~'),\n '/etc/turbinia',\n os.path.dirname(os.path.abspath(__file__)),\n]\n# Config setup reminder for cleaner error handling on empty configs.\nCONFIG_MSG = (\n 'Copy turbinia/config/turbinia_config_tmpl.py to ~/.turbiniarc '\n 'or /etc/turbinia/turbinia.conf, edit, and re-run.')\n\n# Required config vars\nREQUIRED_VARS = [\n # Turbinia Config\n 'INSTANCE_ID',\n 'STATE_MANAGER',\n 'TASK_MANAGER',\n 'LOG_FILE',\n 'LOCK_FILE',\n 'OUTPUT_DIR',\n 'TMP_DIR',\n 'SLEEP_TIME',\n 'SINGLE_RUN',\n 'MOUNT_DIR_PREFIX',\n 'SHARED_FILESYSTEM',\n 'DEBUG_TASKS',\n 'DEPENDENCIES',\n 'DOCKER_ENABLED',\n 'DISABLED_JOBS',\n]\n\n# Optional config vars. Some may be mandatory depending on the configuration\n# (e.g. if TASK_MANAGER is set to 'PSQ', then the GCE Config variables are\n# required), but these requirements are not enforced.\nOPTIONAL_VARS = [\n # GCE CONFIG\n 'TURBINIA_PROJECT',\n 'TURBINIA_ZONE',\n 'TURBINIA_REGION',\n 'BUCKET_NAME',\n 'PSQ_TOPIC',\n 'PUBSUB_TOPIC',\n 'GCS_OUTPUT_PATH',\n 'STACKDRIVER_LOGGING',\n 'STACKDRIVER_TRACEBACK',\n # REDIS CONFIG\n 'REDIS_HOST',\n 'REDIS_PORT',\n 'REDIS_DB',\n # Celery config\n 'CELERY_BROKER',\n 'CELERY_BACKEND',\n 'KOMBU_BROKER',\n 'KOMBU_CHANNEL',\n 'KOMBU_DURABLE',\n # Email config\n 'EMAIL_NOTIFICATIONS',\n 'EMAIL_HOST_ADDRESS',\n 'EMAIL_PORT',\n 'EMAIL_ADDRESS',\n 'EMAIL_PASSWORD',\n]\n\n# Environment variable to look for path data in\nENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'\n\nCONFIG = None\n\nlog = logging.getLogger('turbinia')\n\n\ndef LoadConfig(config_file=None):\n \"\"\"Finds Turbinia config file and loads it.\n\n Args:\n config_file(str): full path to config file\n \"\"\"\n # TODO(aarontp): Find way to not require global var here. Maybe a singleton\n # pattern on the config class.\n # pylint: disable=global-statement\n global CONFIG\n if CONFIG:\n log.debug(\n 'Returning cached config from {0:s} instead of reloading config'.format(\n CONFIG.configSource))\n return CONFIG\n\n if not config_file:\n log.debug('No config specified. Looking in default locations for config.')\n # If the environment variable is set, take precedence over the pre-defined\n # CONFIGPATHs.\n configpath = CONFIGPATH\n if ENVCONFIGVAR in os.environ:\n configpath = os.environ[ENVCONFIGVAR].split(':')\n\n # Load first file found\n for _dir, _file in itertools.product(configpath, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n\n if config_file is None:\n raise TurbiniaException('No config files found')\n\n log.debug('Loading config from {0:s}'.format(config_file))\n # Warn about using fallback source config, but it's currently necessary for\n # tests. See issue #446.\n if 'turbinia_config_tmpl' in config_file:\n log.warning('Using fallback source config. {0:s}'.format(CONFIG_MSG))\n try:\n _config = imp.load_source('config', config_file)\n except IOError as exception:\n message = (\n 'Could not load config file {0:s}: {1!s}'.format(\n config_file, exception))\n log.error(message)\n raise TurbiniaException(message)\n\n _config.configSource = config_file\n ValidateAndSetConfig(_config)\n\n # Set the environment var for this so that we don't see the \"No project ID\n # could be determined.\" warning later.\n if hasattr(_config, 'TURBINIA_PROJECT') and _config.TURBINIA_PROJECT:\n os.environ['GOOGLE_CLOUD_PROJECT'] = _config.TURBINIA_PROJECT\n\n CONFIG = _config\n log.debug(\n 'Returning parsed config loaded from {0:s}'.format(CONFIG.configSource))\n return _config\n\n\ndef ValidateAndSetConfig(_config):\n \"\"\"Makes sure that the config has the vars loaded and set in the module.\"\"\"\n # Explicitly set the config path\n setattr(sys.modules[__name__], 'configSource', _config.configSource)\n\n CONFIGVARS = REQUIRED_VARS + OPTIONAL_VARS\n for var in CONFIGVARS:\n empty_value = False\n if not hasattr(_config, var):\n if var in OPTIONAL_VARS:\n log.debug(\n 'Setting non-existent but optional config variable {0:s} to '\n 'None'.format(var))\n empty_value = True\n else:\n raise TurbiniaException(\n 'Required config attribute {0:s}:{1:s} not in config'.format(\n _config.configSource, var))\n if var in REQUIRED_VARS and getattr(_config, var) is None:\n raise TurbiniaException(\n 'Config attribute {0:s}:{1:s} is not set'.format(\n _config.configSource, var))\n\n # Set the attribute in the current module\n if empty_value:\n setattr(sys.modules[__name__], var, None)\n else:\n setattr(sys.modules[__name__], var, getattr(_config, var))\n\n\ndef ParseDependencies():\n \"\"\"Parses the config file DEPENDENCIES variable.\n\n Raises:\n TurbiniaException: If bad config file.\n\n Returns:\n dependencies(dict): The parsed dependency values.\n \"\"\"\n dependencies = {}\n try:\n for values in CONFIG.DEPENDENCIES:\n job = values['job'].lower()\n dependencies[job] = {}\n dependencies[job]['programs'] = values['programs']\n dependencies[job]['docker_image'] = values.get('docker_image')\n except (KeyError, TypeError) as exception:\n raise TurbiniaException(\n 'An issue has occurred while parsing the '\n 'dependency config: {0!s}'.format(exception))\n return dependencies\n", "path": "turbinia/config/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic Turbinia config.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport imp\nimport itertools\nimport logging\nimport os\nimport sys\n\nfrom turbinia import TurbiniaException\n\nDATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'\n\n# Look for config files with these names\nCONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config_tmpl.py']\n# Look in homedir first, then /etc/turbinia\nCONFIGPATH = [\n os.path.expanduser('~'),\n '/etc/turbinia',\n os.path.dirname(os.path.abspath(__file__)),\n]\n# Config setup reminder for cleaner error handling on empty configs.\nCONFIG_MSG = (\n 'Copy turbinia/config/turbinia_config_tmpl.py to ~/.turbiniarc '\n 'or /etc/turbinia/turbinia.conf, edit, and re-run.')\n\n# Required config vars\nREQUIRED_VARS = [\n # Turbinia Config\n 'INSTANCE_ID',\n 'STATE_MANAGER',\n 'TASK_MANAGER',\n 'LOG_FILE',\n 'LOCK_FILE',\n 'OUTPUT_DIR',\n 'TMP_DIR',\n 'SLEEP_TIME',\n 'SINGLE_RUN',\n 'MOUNT_DIR_PREFIX',\n 'SHARED_FILESYSTEM',\n 'DEBUG_TASKS',\n 'DEPENDENCIES',\n 'DOCKER_ENABLED',\n 'DISABLED_JOBS',\n]\n\n# Optional config vars. Some may be mandatory depending on the configuration\n# (e.g. if TASK_MANAGER is set to 'PSQ', then the GCE Config variables are\n# required), but these requirements are not enforced.\nOPTIONAL_VARS = [\n # GCE CONFIG\n 'TURBINIA_PROJECT',\n 'TURBINIA_ZONE',\n 'TURBINIA_REGION',\n 'BUCKET_NAME',\n 'PSQ_TOPIC',\n 'PUBSUB_TOPIC',\n 'GCS_OUTPUT_PATH',\n 'STACKDRIVER_LOGGING',\n 'STACKDRIVER_TRACEBACK',\n # REDIS CONFIG\n 'REDIS_HOST',\n 'REDIS_PORT',\n 'REDIS_DB',\n # Celery config\n 'CELERY_BROKER',\n 'CELERY_BACKEND',\n 'KOMBU_BROKER',\n 'KOMBU_CHANNEL',\n 'KOMBU_DURABLE',\n # Email config\n 'EMAIL_NOTIFICATIONS',\n 'EMAIL_HOST_ADDRESS',\n 'EMAIL_PORT',\n 'EMAIL_ADDRESS',\n 'EMAIL_PASSWORD',\n]\n\n# Environment variable to look for path data in\nENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'\n\nCONFIG = None\n\nlog = logging.getLogger('turbinia')\n\n\ndef LoadConfig(config_file=None):\n \"\"\"Finds Turbinia config file and loads it.\n\n Args:\n config_file(str): full path to config file\n \"\"\"\n # TODO(aarontp): Find way to not require global var here. Maybe a singleton\n # pattern on the config class.\n # pylint: disable=global-statement\n global CONFIG\n if CONFIG and not config_file:\n log.debug(\n 'Returning cached config from {0:s} instead of reloading config'.format(\n CONFIG.configSource))\n return CONFIG\n\n if not config_file:\n log.debug('No config specified. Looking in default locations for config.')\n # If the environment variable is set, take precedence over the pre-defined\n # CONFIGPATHs.\n configpath = CONFIGPATH\n if ENVCONFIGVAR in os.environ:\n configpath = os.environ[ENVCONFIGVAR].split(':')\n\n # Load first file found\n for _dir, _file in itertools.product(configpath, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n\n if config_file is None:\n raise TurbiniaException('No config files found')\n\n log.debug('Loading config from {0:s}'.format(config_file))\n # Warn about using fallback source config, but it's currently necessary for\n # tests. See issue #446.\n if 'turbinia_config_tmpl' in config_file:\n log.warning('Using fallback source config. {0:s}'.format(CONFIG_MSG))\n try:\n _config = imp.load_source('config', config_file)\n except IOError as exception:\n message = (\n 'Could not load config file {0:s}: {1!s}'.format(\n config_file, exception))\n log.error(message)\n raise TurbiniaException(message)\n\n _config.configSource = config_file\n ValidateAndSetConfig(_config)\n\n # Set the environment var for this so that we don't see the \"No project ID\n # could be determined.\" warning later.\n if hasattr(_config, 'TURBINIA_PROJECT') and _config.TURBINIA_PROJECT:\n os.environ['GOOGLE_CLOUD_PROJECT'] = _config.TURBINIA_PROJECT\n\n CONFIG = _config\n log.debug(\n 'Returning parsed config loaded from {0:s}'.format(CONFIG.configSource))\n return _config\n\n\ndef ValidateAndSetConfig(_config):\n \"\"\"Makes sure that the config has the vars loaded and set in the module.\"\"\"\n # Explicitly set the config path\n setattr(sys.modules[__name__], 'configSource', _config.configSource)\n\n CONFIGVARS = REQUIRED_VARS + OPTIONAL_VARS\n for var in CONFIGVARS:\n empty_value = False\n if not hasattr(_config, var):\n if var in OPTIONAL_VARS:\n log.debug(\n 'Setting non-existent but optional config variable {0:s} to '\n 'None'.format(var))\n empty_value = True\n else:\n raise TurbiniaException(\n 'Required config attribute {0:s}:{1:s} not in config'.format(\n _config.configSource, var))\n if var in REQUIRED_VARS and getattr(_config, var) is None:\n raise TurbiniaException(\n 'Config attribute {0:s}:{1:s} is not set'.format(\n _config.configSource, var))\n\n # Set the attribute in the current module\n if empty_value:\n setattr(sys.modules[__name__], var, None)\n else:\n setattr(sys.modules[__name__], var, getattr(_config, var))\n\n\ndef ParseDependencies():\n \"\"\"Parses the config file DEPENDENCIES variable.\n\n Raises:\n TurbiniaException: If bad config file.\n\n Returns:\n dependencies(dict): The parsed dependency values.\n \"\"\"\n dependencies = {}\n try:\n for values in CONFIG.DEPENDENCIES:\n job = values['job'].lower()\n dependencies[job] = {}\n dependencies[job]['programs'] = values['programs']\n dependencies[job]['docker_image'] = values.get('docker_image')\n except (KeyError, TypeError) as exception:\n raise TurbiniaException(\n 'An issue has occurred while parsing the '\n 'dependency config: {0!s}'.format(exception))\n return dependencies\n", "path": "turbinia/config/__init__.py"}]} | 2,606 | 124 |
gh_patches_debug_1057 | rasdani/github-patches | git_diff | StackStorm__st2-5091 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
St2Stream service broken when using SSL with mongodb
## SUMMARY
This issue is an extension to #4832 however this time it is the st2stream service, I have looked that the code and can see the same monkey patch code hasn't been applied to the st2stream app
### STACKSTORM VERSION
Paste the output of ``st2 --version``: 3.3.0
##### OS, environment, install method
Docker compose with the split services and mongo db references commented out so that an external db can be used https://github.com/StackStorm/st2-docker/blob/master/docker-compose.yml
All other services correctly connected to mongodb.net test instance with the exception of st2stream.
## Steps to reproduce the problem
use docker yaml at https://github.com/StackStorm/st2-docker/blob/master/docker-compose.yml, comment out mongo container and references, adjust files/st2-docker.conf to point to external DB with SSL = True enabled.
docker-compose up
## Expected Results
What did you expect to happen when running the steps above?
st2stream to operate correctly
## Actual Results
What happened? What output did you get?
2020-11-16 05:48:55,053 WARNING [-] Retry on ConnectionError - Cannot connect to database default :
maximum recursion depth exceeded
Adding monkey patch code to st2stream app resolves the issue (manually injected into container to test).
file: st2stream/cmd/api.py
Code:
from st2common.util.monkey_patch import monkey_patch
monkey_patch()
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2stream/st2stream/cmd/api.py`
Content:
```
1 # Copyright 2020 The StackStorm Authors.
2 # Copyright 2019 Extreme Networks, Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import os
17 import sys
18
19 import eventlet
20 from oslo_config import cfg
21 from eventlet import wsgi
22
23 from st2common import log as logging
24 from st2common.service_setup import setup as common_setup
25 from st2common.service_setup import teardown as common_teardown
26 from st2common.stream.listener import get_listener_if_set
27 from st2common.util.wsgi import shutdown_server_kill_pending_requests
28 from st2stream.signal_handlers import register_stream_signal_handlers
29 from st2stream import config
30 config.register_opts()
31 from st2stream import app
32
33 __all__ = [
34 'main'
35 ]
36
37
38 eventlet.monkey_patch(
39 os=True,
40 select=True,
41 socket=True,
42 thread=False if '--use-debugger' in sys.argv else True,
43 time=True)
44
45 LOG = logging.getLogger(__name__)
46
47 # How much time to give to the request in progress to finish in seconds before killing them
48 WSGI_SERVER_REQUEST_SHUTDOWN_TIME = 2
49
50
51 def _setup():
52 capabilities = {
53 'name': 'stream',
54 'listen_host': cfg.CONF.stream.host,
55 'listen_port': cfg.CONF.stream.port,
56 'type': 'active'
57 }
58 common_setup(service='stream', config=config, setup_db=True, register_mq_exchanges=True,
59 register_signal_handlers=True, register_internal_trigger_types=False,
60 run_migrations=False, service_registry=True, capabilities=capabilities)
61
62
63 def _run_server():
64 host = cfg.CONF.stream.host
65 port = cfg.CONF.stream.port
66
67 LOG.info('(PID=%s) ST2 Stream API is serving on http://%s:%s.', os.getpid(), host, port)
68
69 max_pool_size = eventlet.wsgi.DEFAULT_MAX_SIMULTANEOUS_REQUESTS
70 worker_pool = eventlet.GreenPool(max_pool_size)
71 sock = eventlet.listen((host, port))
72
73 def queue_shutdown(signal_number, stack_frame):
74 eventlet.spawn_n(shutdown_server_kill_pending_requests, sock=sock,
75 worker_pool=worker_pool, wait_time=WSGI_SERVER_REQUEST_SHUTDOWN_TIME)
76
77 # We register a custom SIGINT handler which allows us to kill long running active requests.
78 # Note: Eventually we will support draining (waiting for short-running requests), but we
79 # will still want to kill long running stream requests.
80 register_stream_signal_handlers(handler_func=queue_shutdown)
81
82 wsgi.server(sock, app.setup_app(), custom_pool=worker_pool)
83 return 0
84
85
86 def _teardown():
87 common_teardown()
88
89
90 def main():
91 try:
92 _setup()
93 return _run_server()
94 except SystemExit as exit_code:
95 sys.exit(exit_code)
96 except KeyboardInterrupt:
97 listener = get_listener_if_set(name='stream')
98
99 if listener:
100 listener.shutdown()
101 except Exception:
102 LOG.exception('(PID=%s) ST2 Stream API quit due to exception.', os.getpid())
103 return 1
104 finally:
105 _teardown()
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/st2stream/st2stream/cmd/api.py b/st2stream/st2stream/cmd/api.py
--- a/st2stream/st2stream/cmd/api.py
+++ b/st2stream/st2stream/cmd/api.py
@@ -13,6 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from st2common.util.monkey_patch import monkey_patch
+monkey_patch()
+
import os
import sys
| {"golden_diff": "diff --git a/st2stream/st2stream/cmd/api.py b/st2stream/st2stream/cmd/api.py\n--- a/st2stream/st2stream/cmd/api.py\n+++ b/st2stream/st2stream/cmd/api.py\n@@ -13,6 +13,9 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+from st2common.util.monkey_patch import monkey_patch\n+monkey_patch()\n+\n import os\n import sys\n", "issue": "St2Stream service broken when using SSL with mongodb\n## SUMMARY\r\n\r\nThis issue is an extension to #4832 however this time it is the st2stream service, I have looked that the code and can see the same monkey patch code hasn't been applied to the st2stream app\r\n\r\n### STACKSTORM VERSION\r\n\r\nPaste the output of ``st2 --version``: 3.3.0\r\n\r\n##### OS, environment, install method\r\n\r\nDocker compose with the split services and mongo db references commented out so that an external db can be used https://github.com/StackStorm/st2-docker/blob/master/docker-compose.yml\r\n\r\nAll other services correctly connected to mongodb.net test instance with the exception of st2stream.\r\n\r\n## Steps to reproduce the problem\r\n\r\nuse docker yaml at https://github.com/StackStorm/st2-docker/blob/master/docker-compose.yml, comment out mongo container and references, adjust files/st2-docker.conf to point to external DB with SSL = True enabled.\r\ndocker-compose up\r\n\r\n## Expected Results\r\n\r\nWhat did you expect to happen when running the steps above?\r\n\r\nst2stream to operate correctly\r\n\r\n## Actual Results\r\n\r\nWhat happened? What output did you get?\r\n\r\n2020-11-16 05:48:55,053 WARNING [-] Retry on ConnectionError - Cannot connect to database default :\r\nmaximum recursion depth exceeded\r\n\r\n\r\n\r\nAdding monkey patch code to st2stream app resolves the issue (manually injected into container to test).\r\n\r\nfile: st2stream/cmd/api.py\r\nCode: \r\nfrom st2common.util.monkey_patch import monkey_patch\r\nmonkey_patch()\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\n\nimport eventlet\nfrom oslo_config import cfg\nfrom eventlet import wsgi\n\nfrom st2common import log as logging\nfrom st2common.service_setup import setup as common_setup\nfrom st2common.service_setup import teardown as common_teardown\nfrom st2common.stream.listener import get_listener_if_set\nfrom st2common.util.wsgi import shutdown_server_kill_pending_requests\nfrom st2stream.signal_handlers import register_stream_signal_handlers\nfrom st2stream import config\nconfig.register_opts()\nfrom st2stream import app\n\n__all__ = [\n 'main'\n]\n\n\neventlet.monkey_patch(\n os=True,\n select=True,\n socket=True,\n thread=False if '--use-debugger' in sys.argv else True,\n time=True)\n\nLOG = logging.getLogger(__name__)\n\n# How much time to give to the request in progress to finish in seconds before killing them\nWSGI_SERVER_REQUEST_SHUTDOWN_TIME = 2\n\n\ndef _setup():\n capabilities = {\n 'name': 'stream',\n 'listen_host': cfg.CONF.stream.host,\n 'listen_port': cfg.CONF.stream.port,\n 'type': 'active'\n }\n common_setup(service='stream', config=config, setup_db=True, register_mq_exchanges=True,\n register_signal_handlers=True, register_internal_trigger_types=False,\n run_migrations=False, service_registry=True, capabilities=capabilities)\n\n\ndef _run_server():\n host = cfg.CONF.stream.host\n port = cfg.CONF.stream.port\n\n LOG.info('(PID=%s) ST2 Stream API is serving on http://%s:%s.', os.getpid(), host, port)\n\n max_pool_size = eventlet.wsgi.DEFAULT_MAX_SIMULTANEOUS_REQUESTS\n worker_pool = eventlet.GreenPool(max_pool_size)\n sock = eventlet.listen((host, port))\n\n def queue_shutdown(signal_number, stack_frame):\n eventlet.spawn_n(shutdown_server_kill_pending_requests, sock=sock,\n worker_pool=worker_pool, wait_time=WSGI_SERVER_REQUEST_SHUTDOWN_TIME)\n\n # We register a custom SIGINT handler which allows us to kill long running active requests.\n # Note: Eventually we will support draining (waiting for short-running requests), but we\n # will still want to kill long running stream requests.\n register_stream_signal_handlers(handler_func=queue_shutdown)\n\n wsgi.server(sock, app.setup_app(), custom_pool=worker_pool)\n return 0\n\n\ndef _teardown():\n common_teardown()\n\n\ndef main():\n try:\n _setup()\n return _run_server()\n except SystemExit as exit_code:\n sys.exit(exit_code)\n except KeyboardInterrupt:\n listener = get_listener_if_set(name='stream')\n\n if listener:\n listener.shutdown()\n except Exception:\n LOG.exception('(PID=%s) ST2 Stream API quit due to exception.', os.getpid())\n return 1\n finally:\n _teardown()\n", "path": "st2stream/st2stream/cmd/api.py"}], "after_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom st2common.util.monkey_patch import monkey_patch\nmonkey_patch()\n\nimport os\nimport sys\n\nimport eventlet\nfrom oslo_config import cfg\nfrom eventlet import wsgi\n\nfrom st2common import log as logging\nfrom st2common.service_setup import setup as common_setup\nfrom st2common.service_setup import teardown as common_teardown\nfrom st2common.stream.listener import get_listener_if_set\nfrom st2common.util.wsgi import shutdown_server_kill_pending_requests\nfrom st2stream.signal_handlers import register_stream_signal_handlers\nfrom st2stream import config\nconfig.register_opts()\nfrom st2stream import app\n\n__all__ = [\n 'main'\n]\n\n\neventlet.monkey_patch(\n os=True,\n select=True,\n socket=True,\n thread=False if '--use-debugger' in sys.argv else True,\n time=True)\n\nLOG = logging.getLogger(__name__)\n\n# How much time to give to the request in progress to finish in seconds before killing them\nWSGI_SERVER_REQUEST_SHUTDOWN_TIME = 2\n\n\ndef _setup():\n capabilities = {\n 'name': 'stream',\n 'listen_host': cfg.CONF.stream.host,\n 'listen_port': cfg.CONF.stream.port,\n 'type': 'active'\n }\n common_setup(service='stream', config=config, setup_db=True, register_mq_exchanges=True,\n register_signal_handlers=True, register_internal_trigger_types=False,\n run_migrations=False, service_registry=True, capabilities=capabilities)\n\n\ndef _run_server():\n host = cfg.CONF.stream.host\n port = cfg.CONF.stream.port\n\n LOG.info('(PID=%s) ST2 Stream API is serving on http://%s:%s.', os.getpid(), host, port)\n\n max_pool_size = eventlet.wsgi.DEFAULT_MAX_SIMULTANEOUS_REQUESTS\n worker_pool = eventlet.GreenPool(max_pool_size)\n sock = eventlet.listen((host, port))\n\n def queue_shutdown(signal_number, stack_frame):\n eventlet.spawn_n(shutdown_server_kill_pending_requests, sock=sock,\n worker_pool=worker_pool, wait_time=WSGI_SERVER_REQUEST_SHUTDOWN_TIME)\n\n # We register a custom SIGINT handler which allows us to kill long running active requests.\n # Note: Eventually we will support draining (waiting for short-running requests), but we\n # will still want to kill long running stream requests.\n register_stream_signal_handlers(handler_func=queue_shutdown)\n\n wsgi.server(sock, app.setup_app(), custom_pool=worker_pool)\n return 0\n\n\ndef _teardown():\n common_teardown()\n\n\ndef main():\n try:\n _setup()\n return _run_server()\n except SystemExit as exit_code:\n sys.exit(exit_code)\n except KeyboardInterrupt:\n listener = get_listener_if_set(name='stream')\n\n if listener:\n listener.shutdown()\n except Exception:\n LOG.exception('(PID=%s) ST2 Stream API quit due to exception.', os.getpid())\n return 1\n finally:\n _teardown()\n", "path": "st2stream/st2stream/cmd/api.py"}]} | 1,590 | 103 |
gh_patches_debug_8235 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1824 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
google-api-python-client 2.6.0, 2.7.0, 2.8.0, 2.9.0 cannot work with google-auth 1.18.0 and earlier
These versions of `google-api-python-client` requires `google-auth>=1.16.0,<2dev`. However, some of the code cannot work with `google-auth` 1.18.0 and earlier because`google.auth.load_credentials_from_file()` and `google.auth.default()` do not accept the `quota_project_id` parameter. I think it will be better to increase the minimum version of `google-auth` for these `google-api-python-client` versions.
Here are the details:
#### Environment details
- OS type and version: Ubuntu 20.04
- Python version: 3.9.7
- pip version: 21.2.4
- `google-api-python-client` version: 2.6.0, 2.7.0, 2.8.0, 2.9.0
#### Steps to reproduce
Create a clean Python environment and install relevant packages
```
conda create -n test python=3.9
conda activate test
pip install --no-cache-dir google-auth==1.18.0 google-api-core==1.21.0 google-api-python-client==2.9.0
```
#### Code example
```
(test) heh@ubuntu:~$ python
Python 3.9.7 (default, Sep 16 2021, 13:09:58)
[GCC 7.5.0] :: Anaconda, Inc. on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import googleapiclient._auth as _auth
>>> _auth.credentials_from_file("a")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/heh/anaconda3/envs/test/lib/python3.9/site-packages/googleapiclient/_auth.py", line 44, in credentials_from_file
credentials, _ = google.auth.load_credentials_from_file(filename, scopes=scopes, quota_project_id=quota_project_id)
TypeError: load_credentials_from_file() got an unexpected keyword argument 'quota_project_id'
>>> _auth.default_credentials("a")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/heh/anaconda3/envs/test/lib/python3.9/site-packages/googleapiclient/_auth.py", line 54, in default_credentials
credentials, _ = google.auth.default(scopes=scopes, quota_project_id=quota_project_id)
TypeError: default() got an unexpected keyword argument 'quota_project_id'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Setup script for Google API Python client.
16
17 Also installs included versions of third party libraries, if those libraries
18 are not already installed.
19 """
20 from __future__ import print_function
21
22 import sys
23
24 if sys.version_info < (3, 6):
25 print("google-api-python-client requires python3 version >= 3.6.", file=sys.stderr)
26 sys.exit(1)
27
28 import io
29 import os
30
31 from setuptools import setup
32
33 packages = ["apiclient", "googleapiclient", "googleapiclient/discovery_cache"]
34
35 install_requires = [
36 "httplib2>=0.15.0,<1dev",
37 # NOTE: Maintainers, please do not require google-auth>=2.x.x
38 # Until this issue is closed
39 # https://github.com/googleapis/google-cloud-python/issues/10566
40 "google-auth>=1.16.0,<3.0.0dev",
41 "google-auth-httplib2>=0.1.0",
42 # NOTE: Maintainers, please do not require google-api-core>=2.x.x
43 # Until this issue is closed
44 # https://github.com/googleapis/google-cloud-python/issues/10566
45 "google-api-core >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0",
46 "uritemplate>=3.0.1,<5",
47 ]
48
49 package_root = os.path.abspath(os.path.dirname(__file__))
50
51 readme_filename = os.path.join(package_root, "README.md")
52 with io.open(readme_filename, encoding="utf-8") as readme_file:
53 readme = readme_file.read()
54
55 package_root = os.path.abspath(os.path.dirname(__file__))
56
57 version = {}
58 with open(os.path.join(package_root, "googleapiclient/version.py")) as fp:
59 exec(fp.read(), version)
60 version = version["__version__"]
61
62 setup(
63 name="google-api-python-client",
64 version=version,
65 description="Google API Client Library for Python",
66 long_description=readme,
67 long_description_content_type="text/markdown",
68 author="Google LLC",
69 author_email="[email protected]",
70 url="https://github.com/googleapis/google-api-python-client/",
71 install_requires=install_requires,
72 python_requires=">=3.6",
73 packages=packages,
74 package_data={"googleapiclient": ["discovery_cache/documents/*.json"]},
75 license="Apache 2.0",
76 keywords="google api client",
77 classifiers=[
78 "Programming Language :: Python :: 3",
79 "Programming Language :: Python :: 3.6",
80 "Programming Language :: Python :: 3.7",
81 "Programming Language :: Python :: 3.8",
82 "Programming Language :: Python :: 3.9",
83 "Programming Language :: Python :: 3.10",
84 "Development Status :: 5 - Production/Stable",
85 "Intended Audience :: Developers",
86 "License :: OSI Approved :: Apache Software License",
87 "Operating System :: OS Independent",
88 "Topic :: Internet :: WWW/HTTP",
89 ],
90 )
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,7 @@
# NOTE: Maintainers, please do not require google-auth>=2.x.x
# Until this issue is closed
# https://github.com/googleapis/google-cloud-python/issues/10566
- "google-auth>=1.16.0,<3.0.0dev",
+ "google-auth>=1.19.0,<3.0.0dev",
"google-auth-httplib2>=0.1.0",
# NOTE: Maintainers, please do not require google-api-core>=2.x.x
# Until this issue is closed
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,7 +37,7 @@\n # NOTE: Maintainers, please do not require google-auth>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n- \"google-auth>=1.16.0,<3.0.0dev\",\n+ \"google-auth>=1.19.0,<3.0.0dev\",\n \"google-auth-httplib2>=0.1.0\",\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n", "issue": "google-api-python-client 2.6.0, 2.7.0, 2.8.0, 2.9.0 cannot work with google-auth 1.18.0 and earlier\nThese versions of `google-api-python-client` requires `google-auth>=1.16.0,<2dev`. However, some of the code cannot work with `google-auth` 1.18.0 and earlier because`google.auth.load_credentials_from_file()` and `google.auth.default()` do not accept the `quota_project_id` parameter. I think it will be better to increase the minimum version of `google-auth` for these `google-api-python-client` versions.\r\n\r\nHere are the details:\r\n\r\n#### Environment details\r\n\r\n - OS type and version: Ubuntu 20.04\r\n - Python version: 3.9.7\r\n - pip version: 21.2.4\r\n - `google-api-python-client` version: 2.6.0, 2.7.0, 2.8.0, 2.9.0\r\n\r\n#### Steps to reproduce\r\n\r\nCreate a clean Python environment and install relevant packages\r\n \r\n```\r\nconda create -n test python=3.9\r\nconda activate test\r\npip install --no-cache-dir google-auth==1.18.0 google-api-core==1.21.0 google-api-python-client==2.9.0\r\n```\r\n\r\n#### Code example\r\n\r\n```\r\n(test) heh@ubuntu:~$ python\r\nPython 3.9.7 (default, Sep 16 2021, 13:09:58)\r\n[GCC 7.5.0] :: Anaconda, Inc. on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import googleapiclient._auth as _auth\r\n>>> _auth.credentials_from_file(\"a\")\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/heh/anaconda3/envs/test/lib/python3.9/site-packages/googleapiclient/_auth.py\", line 44, in credentials_from_file\r\n credentials, _ = google.auth.load_credentials_from_file(filename, scopes=scopes, quota_project_id=quota_project_id)\r\nTypeError: load_credentials_from_file() got an unexpected keyword argument 'quota_project_id'\r\n>>> _auth.default_credentials(\"a\")\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/heh/anaconda3/envs/test/lib/python3.9/site-packages/googleapiclient/_auth.py\", line 54, in default_credentials\r\n credentials, _ = google.auth.default(scopes=scopes, quota_project_id=quota_project_id)\r\nTypeError: default() got an unexpected keyword argument 'quota_project_id'\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (3, 6):\n print(\"google-api-python-client requires python3 version >= 3.6.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\n\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n # NOTE: Maintainers, please do not require google-auth>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-auth>=1.16.0,<3.0.0dev\",\n \"google-auth-httplib2>=0.1.0\",\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0\",\n \"uritemplate>=3.0.1,<5\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"googleapiclient/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n package_data={\"googleapiclient\": [\"discovery_cache/documents/*.json\"]},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (3, 6):\n print(\"google-api-python-client requires python3 version >= 3.6.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\n\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n # NOTE: Maintainers, please do not require google-auth>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-auth>=1.19.0,<3.0.0dev\",\n \"google-auth-httplib2>=0.1.0\",\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0\",\n \"uritemplate>=3.0.1,<5\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"googleapiclient/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n package_data={\"googleapiclient\": [\"discovery_cache/documents/*.json\"]},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 1,852 | 159 |
gh_patches_debug_5447 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-5295 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Paramètre GET pour activer la miniature twitter en `summary_large_image`
Ajouter une vérification `?twittercard=large` pour activer la twittercard en large dans le front.
Minimum : 300 x 157 idéal : 600 x 314
https://developer.twitter.com/en/docs/tweets/optimize-with-cards/overview/summary-card-with-large-image.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/settings/abstract_base/zds.py`
Content:
```
1 from os.path import join
2 from pathlib import Path
3
4 from django.utils.translation import gettext_lazy as _
5
6 from .config import config
7 from .base_dir import BASE_DIR
8
9
10 zds_config = config.get('zds', {})
11
12
13 GEOIP_PATH = join(BASE_DIR, 'geodata')
14
15
16 ES_ENABLED = True
17
18 ES_CONNECTIONS = {
19 'default': {
20 'hosts': ['localhost:9200'],
21 }
22 }
23
24 ES_SEARCH_INDEX = {
25 'name': 'zds_search',
26 'shards': 3,
27 'replicas': 0,
28 }
29
30
31 # Anonymous [Dis]Likes. Authors of [dis]likes before those pk will never be shown
32 VOTES_ID_LIMIT = zds_config.get('VOTES_ID_LIMIT', 0)
33
34
35 THUMBNAIL_ALIASES = {
36 '': {
37 'avatar': {'size': (60, 60), 'crop': True},
38 'avatar_mini': {'size': (24, 24), 'crop': True},
39 'tutorial_illu': {'size': (60, 60), 'crop': True},
40 'article_illu': {'size': (60, 60), 'crop': True},
41 'content_thumb': {'size': (96, 96), 'crop': True},
42 'help_illu': {'size': (48, 48), 'crop': True},
43 'help_mini_illu': {'size': (26, 26), 'crop': True},
44 'gallery': {'size': (120, 120), 'crop': True},
45 'featured': {'size': (228, 228), 'crop': True},
46 'gallery_illu': {'size': (480, 270), 'crop': True},
47 'content': {'size': (960, 960), 'crop': False},
48 'social_network': {'size': (144, 144), 'crop': True},
49 # ^^^ ^^^ -> minimum dimensions of 144x144
50 # https://developer.twitter.com/en/docs/tweets/optimize-with-cards/overview/summary
51 },
52 }
53
54
55 ZDS_APP = {
56 'site': {
57 'name': 'ZesteDeSavoir',
58 'literal_name': 'Zeste de Savoir',
59 'slogan': 'Zeste de Savoir, la connaissance pour tous et sans pépins',
60 'abbr': 'zds',
61 'url': 'https://zestedesavoir.com',
62 'dns': 'zestedesavoir.com',
63 'email_contact': '[email protected]',
64 'email_noreply': '[email protected]',
65 'forum_feedback_users': '/forums/communaute/bug-suggestions/',
66 'contribute_link': 'https://docs.zestedesavoir.com/contributing.html',
67 'short_description': '',
68 'long_description': 'Zeste de Savoir est un site de partage de connaissances '
69 'sur lequel vous trouverez des tutoriels de tous niveaux, '
70 'des articles et des forums d’entraide animés par et pour '
71 'la communauté.',
72 'googleAnalyticsID': zds_config.get('google_analytics_id'),
73 'googleTagManagerID': zds_config.get('google_tag_manager_id'),
74 'association': {
75 'name': 'Zeste de Savoir',
76 'fee': zds_config.get('association_fee', '20 €'),
77 'email': '[email protected]',
78 'email_ca': '[email protected]',
79 'forum_ca_pk': 25
80 },
81 'repository': {
82 'url': 'https://github.com/zestedesavoir/zds-site',
83 'bugtracker': 'https://github.com/zestedesavoir/zds-site/issues',
84 'api': 'https://api.github.com/repos/zestedesavoir/zds-site',
85 'tags': [
86 'C-Back', 'C-Front', 'C-API', 'C-Documentation', 'C-Infra', 'S-Bug', 'S-Régression',
87 'S-Évolution'
88 ]
89 },
90 'licenses': {
91 'logo': {
92 'code': 'CC-BY',
93 'title': 'Creative Commons License',
94 'description': 'Licence Creative Commons Attribution - Pas d’Utilisation Commerciale - '
95 'Partage dans les Mêmes Conditions 4.0 International.',
96 'url_image': 'http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png',
97 'url_license': 'http://creativecommons.org/licenses/by-nc-sa/4.0/',
98 'author': 'MaxRoyo'
99 },
100 'cookies': {
101 'code': 'CC-BY',
102 'title': 'Licence Creative Commons',
103 'description': 'licence Creative Commons Attribution 4.0 International',
104 'url_image': 'http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png',
105 'url_license': 'http://creativecommons.org/licenses/by-nc-sa/4.0/'
106 },
107 'source': {
108 'code': 'GPL v3',
109 'url_license': 'http://www.gnu.org/licenses/gpl-3.0.html',
110 'provider_name': 'Progdupeupl',
111 'provider_url': 'http://pdp.microjoe.org',
112 },
113 'licence_info_title': 'http://zestedesavoir.com/tutoriels/281/le-droit-dauteur-creative-commons-et-les-lic'
114 'ences-sur-zeste-de-savoir/',
115 'licence_info_link': 'Le droit d\'auteur, Creative Commons et les licences sur Zeste de Savoir'
116 },
117 'hosting': {
118 'name': 'GANDI SAS',
119 'address': '63-65 boulevard Massena - 75013 Paris - France'
120 },
121 'social': {
122 'mastodon': 'https://framapiaf.org/@ZesteDeSavoir',
123 'facebook': 'https://www.facebook.com/ZesteDeSavoir',
124 'twitter': 'https://twitter.com/ZesteDeSavoir'
125 },
126 'cnil': '1771020',
127 },
128 'member': {
129 'bot_account': zds_config.get('member_bot_username', 'admin'),
130 'anonymous_account': zds_config.get('member_anonymous_username', 'anonymous'),
131 'external_account': zds_config.get('member_external_username', 'external'),
132 'bot_group': 'bot',
133 'dev_group': 'devs',
134 'members_per_page': 100,
135 'providers_per_page': 100,
136 'hats_per_page': 50,
137 'users_in_hats_list': 5,
138 'requested_hats_per_page': 100,
139 'update_last_visit_interval': 600, # seconds
140 'old_smileys_allowed': False,
141 'old_smileys_cookie_key': 'use_old_smileys',
142 },
143 'hats': {
144 'moderation': 'Staff',
145 'validation': 'Staff',
146 'hats_management': 'Staff',
147 },
148 'gallery': {
149 'image_max_size': 1024 * 1024,
150 'gallery_per_page': 21,
151 'images_per_page': 21,
152 },
153 'tutorial': {
154 'home_number': 4,
155 },
156 'article': {
157 'home_number': 3
158 },
159 'opinions': {
160 'home_number': 5
161 },
162 'content': {
163 'repo_private_path': join(BASE_DIR, 'contents-private'),
164 'repo_public_path': join(BASE_DIR, 'contents-public'),
165 'extra_contents_dirname': 'extra_contents',
166 # can also be 'extra_content_generation_policy': 'WATCHDOG'
167 # or 'extra_content_generation_policy': 'NOTHING'
168 'extra_content_generation_policy': 'SYNC',
169 'extra_content_watchdog_dir': join(BASE_DIR, 'watchdog-build'),
170 'max_tree_depth': 3,
171 'default_licence_pk': 7,
172 'content_per_page': 42,
173 'max_last_publications_level_1': 6,
174 'max_last_publications_level_2': 12,
175 'max_last_publications_level_3': 12,
176 'notes_per_page': 25,
177 'helps_per_page': 20,
178 'commits_per_page': 20,
179 'feed_length': 5,
180 'user_page_number': 5,
181 'default_image': join(BASE_DIR, 'fixtures', 'noir_black.png'),
182 'import_image_prefix': 'archive',
183 'build_pdf_when_published': True,
184 'maximum_slug_size': 150,
185 'characters_per_minute': 1500,
186 'editorial_line_link':
187 'https://zestedesavoir.com/articles/222/la-ligne-editoriale-officielle-de-zeste-de-savoir/',
188 'epub_stylesheets': {
189 'toc': Path('toc.css'),
190 'full': Path(BASE_DIR) / 'dist' / 'css' / 'zmd.css',
191 'katex': Path(BASE_DIR) / 'dist' / 'css' / 'katex.min.css'
192 },
193 'latex_template_repo': 'NOT_EXISTING_DIR'
194 },
195 'forum': {
196 'posts_per_page': 21,
197 'topics_per_page': 21,
198 'spam_limit_seconds': 60 * 15,
199 'spam_limit_participant': 2,
200 'beta_forum_id': zds_config.get('publications_being_written_forum_id', 1),
201 'max_post_length': 1000000,
202 'top_tag_max': 5,
203 'home_number': 5,
204 'old_post_limit_days': 90,
205 # Exclude tags from top tags list. Tags listed here should not be relevant for most of users.
206 # Be warned exclude too much tags can restrict performance
207 'top_tag_exclu': ['bug', 'suggestion', 'tutoriel', 'beta', 'article']
208 },
209 'topic': {
210 'home_number': 5,
211 },
212 'comment': {
213 'max_pings': 15,
214 # allow to mention (and notify) members in messages
215 'enable_pings': True,
216 },
217 'featured_resource': {
218 'featured_per_page': 100,
219 'home_number': 5,
220 },
221 'notification': {
222 'per_page': 50,
223 },
224 'paginator': {
225 'folding_limit': 4
226 },
227 'search': {
228 'mark_keywords': ['javafx', 'haskell', 'groovy', 'powershell', 'latex', 'linux', 'windows'],
229 'results_per_page': 20,
230 'search_groups': {
231 'content': (
232 _('Contenus publiés'), ['publishedcontent', 'chapter']
233 ),
234 'topic': (
235 _('Sujets du forum'), ['topic']
236 ),
237 'post': (
238 _('Messages du forum'), ['post']
239 ),
240 },
241 'boosts': {
242 'publishedcontent': {
243 'global': 3.0,
244 'if_article': 1.0,
245 'if_tutorial': 1.0,
246 'if_medium_or_big_tutorial': 1.5,
247 'if_opinion': 0.66,
248 'if_opinion_not_picked': 0.5
249 },
250 'topic': {
251 'global': 2.0,
252 'if_solved': 1.1,
253 'if_sticky': 1.2,
254 'if_locked': 0.1,
255 },
256 'chapter': {
257 'global': 1.5,
258 },
259 'post': {
260 'global': 1.0,
261 'if_first': 1.2,
262 'if_useful': 1.5,
263 'ld_ratio_above_1': 1.05,
264 'ld_ratio_below_1': 0.95,
265 }
266 }
267 },
268 'visual_changes': [],
269 'display_search_bar': True,
270 'zmd': {
271 'server': 'http://127.0.0.1:27272',
272 'disable_pings': False
273 },
274 'stats_ga_viewid': 'ga:86962671'
275 }
276
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/settings/abstract_base/zds.py b/zds/settings/abstract_base/zds.py
--- a/zds/settings/abstract_base/zds.py
+++ b/zds/settings/abstract_base/zds.py
@@ -48,6 +48,7 @@
'social_network': {'size': (144, 144), 'crop': True},
# ^^^ ^^^ -> minimum dimensions of 144x144
# https://developer.twitter.com/en/docs/tweets/optimize-with-cards/overview/summary
+ 'social_network_large': {'size': (600, 314), 'crop': True}
},
}
| {"golden_diff": "diff --git a/zds/settings/abstract_base/zds.py b/zds/settings/abstract_base/zds.py\n--- a/zds/settings/abstract_base/zds.py\n+++ b/zds/settings/abstract_base/zds.py\n@@ -48,6 +48,7 @@\n 'social_network': {'size': (144, 144), 'crop': True},\n # ^^^ ^^^ -> minimum dimensions of 144x144\n # https://developer.twitter.com/en/docs/tweets/optimize-with-cards/overview/summary\n+ 'social_network_large': {'size': (600, 314), 'crop': True}\n },\n }\n", "issue": "Param\u00e8tre GET pour activer la miniature twitter en `summary_large_image`\nAjouter une v\u00e9rification `?twittercard=large` pour activer la twittercard en large dans le front.\r\n\r\nMinimum : 300 x 157 id\u00e9al : 600 x 314\r\n\r\nhttps://developer.twitter.com/en/docs/tweets/optimize-with-cards/overview/summary-card-with-large-image.html\n", "before_files": [{"content": "from os.path import join\nfrom pathlib import Path\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .config import config\nfrom .base_dir import BASE_DIR\n\n\nzds_config = config.get('zds', {})\n\n\nGEOIP_PATH = join(BASE_DIR, 'geodata')\n\n\nES_ENABLED = True\n\nES_CONNECTIONS = {\n 'default': {\n 'hosts': ['localhost:9200'],\n }\n}\n\nES_SEARCH_INDEX = {\n 'name': 'zds_search',\n 'shards': 3,\n 'replicas': 0,\n}\n\n\n# Anonymous [Dis]Likes. Authors of [dis]likes before those pk will never be shown\nVOTES_ID_LIMIT = zds_config.get('VOTES_ID_LIMIT', 0)\n\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'avatar': {'size': (60, 60), 'crop': True},\n 'avatar_mini': {'size': (24, 24), 'crop': True},\n 'tutorial_illu': {'size': (60, 60), 'crop': True},\n 'article_illu': {'size': (60, 60), 'crop': True},\n 'content_thumb': {'size': (96, 96), 'crop': True},\n 'help_illu': {'size': (48, 48), 'crop': True},\n 'help_mini_illu': {'size': (26, 26), 'crop': True},\n 'gallery': {'size': (120, 120), 'crop': True},\n 'featured': {'size': (228, 228), 'crop': True},\n 'gallery_illu': {'size': (480, 270), 'crop': True},\n 'content': {'size': (960, 960), 'crop': False},\n 'social_network': {'size': (144, 144), 'crop': True},\n # ^^^ ^^^ -> minimum dimensions of 144x144\n # https://developer.twitter.com/en/docs/tweets/optimize-with-cards/overview/summary\n },\n}\n\n\nZDS_APP = {\n 'site': {\n 'name': 'ZesteDeSavoir',\n 'literal_name': 'Zeste de Savoir',\n 'slogan': 'Zeste de Savoir, la connaissance pour tous et sans p\u00e9pins',\n 'abbr': 'zds',\n 'url': 'https://zestedesavoir.com',\n 'dns': 'zestedesavoir.com',\n 'email_contact': '[email protected]',\n 'email_noreply': '[email protected]',\n 'forum_feedback_users': '/forums/communaute/bug-suggestions/',\n 'contribute_link': 'https://docs.zestedesavoir.com/contributing.html',\n 'short_description': '',\n 'long_description': 'Zeste de Savoir est un site de partage de connaissances '\n 'sur lequel vous trouverez des tutoriels de tous niveaux, '\n 'des articles et des forums d\u2019entraide anim\u00e9s par et pour '\n 'la communaut\u00e9.',\n 'googleAnalyticsID': zds_config.get('google_analytics_id'),\n 'googleTagManagerID': zds_config.get('google_tag_manager_id'),\n 'association': {\n 'name': 'Zeste de Savoir',\n 'fee': zds_config.get('association_fee', '20\u00a0\u20ac'),\n 'email': '[email protected]',\n 'email_ca': '[email protected]',\n 'forum_ca_pk': 25\n },\n 'repository': {\n 'url': 'https://github.com/zestedesavoir/zds-site',\n 'bugtracker': 'https://github.com/zestedesavoir/zds-site/issues',\n 'api': 'https://api.github.com/repos/zestedesavoir/zds-site',\n 'tags': [\n 'C-Back', 'C-Front', 'C-API', 'C-Documentation', 'C-Infra', 'S-Bug', 'S-R\u00e9gression',\n 'S-\u00c9volution'\n ]\n },\n 'licenses': {\n 'logo': {\n 'code': 'CC-BY',\n 'title': 'Creative Commons License',\n 'description': 'Licence Creative Commons Attribution - Pas d\u2019Utilisation Commerciale - '\n 'Partage dans les M\u00eames Conditions 4.0 International.',\n 'url_image': 'http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png',\n 'url_license': 'http://creativecommons.org/licenses/by-nc-sa/4.0/',\n 'author': 'MaxRoyo'\n },\n 'cookies': {\n 'code': 'CC-BY',\n 'title': 'Licence Creative Commons',\n 'description': 'licence Creative Commons Attribution 4.0 International',\n 'url_image': 'http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png',\n 'url_license': 'http://creativecommons.org/licenses/by-nc-sa/4.0/'\n },\n 'source': {\n 'code': 'GPL v3',\n 'url_license': 'http://www.gnu.org/licenses/gpl-3.0.html',\n 'provider_name': 'Progdupeupl',\n 'provider_url': 'http://pdp.microjoe.org',\n },\n 'licence_info_title': 'http://zestedesavoir.com/tutoriels/281/le-droit-dauteur-creative-commons-et-les-lic'\n 'ences-sur-zeste-de-savoir/',\n 'licence_info_link': 'Le droit d\\'auteur, Creative Commons et les licences sur Zeste de Savoir'\n },\n 'hosting': {\n 'name': 'GANDI SAS',\n 'address': '63-65 boulevard Massena - 75013 Paris - France'\n },\n 'social': {\n 'mastodon': 'https://framapiaf.org/@ZesteDeSavoir',\n 'facebook': 'https://www.facebook.com/ZesteDeSavoir',\n 'twitter': 'https://twitter.com/ZesteDeSavoir'\n },\n 'cnil': '1771020',\n },\n 'member': {\n 'bot_account': zds_config.get('member_bot_username', 'admin'),\n 'anonymous_account': zds_config.get('member_anonymous_username', 'anonymous'),\n 'external_account': zds_config.get('member_external_username', 'external'),\n 'bot_group': 'bot',\n 'dev_group': 'devs',\n 'members_per_page': 100,\n 'providers_per_page': 100,\n 'hats_per_page': 50,\n 'users_in_hats_list': 5,\n 'requested_hats_per_page': 100,\n 'update_last_visit_interval': 600, # seconds\n 'old_smileys_allowed': False,\n 'old_smileys_cookie_key': 'use_old_smileys',\n },\n 'hats': {\n 'moderation': 'Staff',\n 'validation': 'Staff',\n 'hats_management': 'Staff',\n },\n 'gallery': {\n 'image_max_size': 1024 * 1024,\n 'gallery_per_page': 21,\n 'images_per_page': 21,\n },\n 'tutorial': {\n 'home_number': 4,\n },\n 'article': {\n 'home_number': 3\n },\n 'opinions': {\n 'home_number': 5\n },\n 'content': {\n 'repo_private_path': join(BASE_DIR, 'contents-private'),\n 'repo_public_path': join(BASE_DIR, 'contents-public'),\n 'extra_contents_dirname': 'extra_contents',\n # can also be 'extra_content_generation_policy': 'WATCHDOG'\n # or 'extra_content_generation_policy': 'NOTHING'\n 'extra_content_generation_policy': 'SYNC',\n 'extra_content_watchdog_dir': join(BASE_DIR, 'watchdog-build'),\n 'max_tree_depth': 3,\n 'default_licence_pk': 7,\n 'content_per_page': 42,\n 'max_last_publications_level_1': 6,\n 'max_last_publications_level_2': 12,\n 'max_last_publications_level_3': 12,\n 'notes_per_page': 25,\n 'helps_per_page': 20,\n 'commits_per_page': 20,\n 'feed_length': 5,\n 'user_page_number': 5,\n 'default_image': join(BASE_DIR, 'fixtures', 'noir_black.png'),\n 'import_image_prefix': 'archive',\n 'build_pdf_when_published': True,\n 'maximum_slug_size': 150,\n 'characters_per_minute': 1500,\n 'editorial_line_link':\n 'https://zestedesavoir.com/articles/222/la-ligne-editoriale-officielle-de-zeste-de-savoir/',\n 'epub_stylesheets': {\n 'toc': Path('toc.css'),\n 'full': Path(BASE_DIR) / 'dist' / 'css' / 'zmd.css',\n 'katex': Path(BASE_DIR) / 'dist' / 'css' / 'katex.min.css'\n },\n 'latex_template_repo': 'NOT_EXISTING_DIR'\n },\n 'forum': {\n 'posts_per_page': 21,\n 'topics_per_page': 21,\n 'spam_limit_seconds': 60 * 15,\n 'spam_limit_participant': 2,\n 'beta_forum_id': zds_config.get('publications_being_written_forum_id', 1),\n 'max_post_length': 1000000,\n 'top_tag_max': 5,\n 'home_number': 5,\n 'old_post_limit_days': 90,\n # Exclude tags from top tags list. Tags listed here should not be relevant for most of users.\n # Be warned exclude too much tags can restrict performance\n 'top_tag_exclu': ['bug', 'suggestion', 'tutoriel', 'beta', 'article']\n },\n 'topic': {\n 'home_number': 5,\n },\n 'comment': {\n 'max_pings': 15,\n # allow to mention (and notify) members in messages\n 'enable_pings': True,\n },\n 'featured_resource': {\n 'featured_per_page': 100,\n 'home_number': 5,\n },\n 'notification': {\n 'per_page': 50,\n },\n 'paginator': {\n 'folding_limit': 4\n },\n 'search': {\n 'mark_keywords': ['javafx', 'haskell', 'groovy', 'powershell', 'latex', 'linux', 'windows'],\n 'results_per_page': 20,\n 'search_groups': {\n 'content': (\n _('Contenus publi\u00e9s'), ['publishedcontent', 'chapter']\n ),\n 'topic': (\n _('Sujets du forum'), ['topic']\n ),\n 'post': (\n _('Messages du forum'), ['post']\n ),\n },\n 'boosts': {\n 'publishedcontent': {\n 'global': 3.0,\n 'if_article': 1.0,\n 'if_tutorial': 1.0,\n 'if_medium_or_big_tutorial': 1.5,\n 'if_opinion': 0.66,\n 'if_opinion_not_picked': 0.5\n },\n 'topic': {\n 'global': 2.0,\n 'if_solved': 1.1,\n 'if_sticky': 1.2,\n 'if_locked': 0.1,\n },\n 'chapter': {\n 'global': 1.5,\n },\n 'post': {\n 'global': 1.0,\n 'if_first': 1.2,\n 'if_useful': 1.5,\n 'ld_ratio_above_1': 1.05,\n 'ld_ratio_below_1': 0.95,\n }\n }\n },\n 'visual_changes': [],\n 'display_search_bar': True,\n 'zmd': {\n 'server': 'http://127.0.0.1:27272',\n 'disable_pings': False\n },\n 'stats_ga_viewid': 'ga:86962671'\n}\n", "path": "zds/settings/abstract_base/zds.py"}], "after_files": [{"content": "from os.path import join\nfrom pathlib import Path\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .config import config\nfrom .base_dir import BASE_DIR\n\n\nzds_config = config.get('zds', {})\n\n\nGEOIP_PATH = join(BASE_DIR, 'geodata')\n\n\nES_ENABLED = True\n\nES_CONNECTIONS = {\n 'default': {\n 'hosts': ['localhost:9200'],\n }\n}\n\nES_SEARCH_INDEX = {\n 'name': 'zds_search',\n 'shards': 3,\n 'replicas': 0,\n}\n\n\n# Anonymous [Dis]Likes. Authors of [dis]likes before those pk will never be shown\nVOTES_ID_LIMIT = zds_config.get('VOTES_ID_LIMIT', 0)\n\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'avatar': {'size': (60, 60), 'crop': True},\n 'avatar_mini': {'size': (24, 24), 'crop': True},\n 'tutorial_illu': {'size': (60, 60), 'crop': True},\n 'article_illu': {'size': (60, 60), 'crop': True},\n 'content_thumb': {'size': (96, 96), 'crop': True},\n 'help_illu': {'size': (48, 48), 'crop': True},\n 'help_mini_illu': {'size': (26, 26), 'crop': True},\n 'gallery': {'size': (120, 120), 'crop': True},\n 'featured': {'size': (228, 228), 'crop': True},\n 'gallery_illu': {'size': (480, 270), 'crop': True},\n 'content': {'size': (960, 960), 'crop': False},\n 'social_network': {'size': (144, 144), 'crop': True},\n # ^^^ ^^^ -> minimum dimensions of 144x144\n # https://developer.twitter.com/en/docs/tweets/optimize-with-cards/overview/summary\n 'social_network_large': {'size': (600, 314), 'crop': True}\n },\n}\n\n\nZDS_APP = {\n 'site': {\n 'name': 'ZesteDeSavoir',\n 'literal_name': 'Zeste de Savoir',\n 'slogan': 'Zeste de Savoir, la connaissance pour tous et sans p\u00e9pins',\n 'abbr': 'zds',\n 'url': 'https://zestedesavoir.com',\n 'dns': 'zestedesavoir.com',\n 'email_contact': '[email protected]',\n 'email_noreply': '[email protected]',\n 'forum_feedback_users': '/forums/communaute/bug-suggestions/',\n 'contribute_link': 'https://docs.zestedesavoir.com/contributing.html',\n 'short_description': '',\n 'long_description': 'Zeste de Savoir est un site de partage de connaissances '\n 'sur lequel vous trouverez des tutoriels de tous niveaux, '\n 'des articles et des forums d\u2019entraide anim\u00e9s par et pour '\n 'la communaut\u00e9.',\n 'googleAnalyticsID': zds_config.get('google_analytics_id'),\n 'googleTagManagerID': zds_config.get('google_tag_manager_id'),\n 'association': {\n 'name': 'Zeste de Savoir',\n 'fee': zds_config.get('association_fee', '20\u00a0\u20ac'),\n 'email': '[email protected]',\n 'email_ca': '[email protected]',\n 'forum_ca_pk': 25\n },\n 'repository': {\n 'url': 'https://github.com/zestedesavoir/zds-site',\n 'bugtracker': 'https://github.com/zestedesavoir/zds-site/issues',\n 'api': 'https://api.github.com/repos/zestedesavoir/zds-site',\n 'tags': [\n 'C-Back', 'C-Front', 'C-API', 'C-Documentation', 'C-Infra', 'S-Bug', 'S-R\u00e9gression',\n 'S-\u00c9volution'\n ]\n },\n 'licenses': {\n 'logo': {\n 'code': 'CC-BY',\n 'title': 'Creative Commons License',\n 'description': 'Licence Creative Commons Attribution - Pas d\u2019Utilisation Commerciale - '\n 'Partage dans les M\u00eames Conditions 4.0 International.',\n 'url_image': 'http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png',\n 'url_license': 'http://creativecommons.org/licenses/by-nc-sa/4.0/',\n 'author': 'MaxRoyo'\n },\n 'cookies': {\n 'code': 'CC-BY',\n 'title': 'Licence Creative Commons',\n 'description': 'licence Creative Commons Attribution 4.0 International',\n 'url_image': 'http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png',\n 'url_license': 'http://creativecommons.org/licenses/by-nc-sa/4.0/'\n },\n 'source': {\n 'code': 'GPL v3',\n 'url_license': 'http://www.gnu.org/licenses/gpl-3.0.html',\n 'provider_name': 'Progdupeupl',\n 'provider_url': 'http://pdp.microjoe.org',\n },\n 'licence_info_title': 'http://zestedesavoir.com/tutoriels/281/le-droit-dauteur-creative-commons-et-les-lic'\n 'ences-sur-zeste-de-savoir/',\n 'licence_info_link': 'Le droit d\\'auteur, Creative Commons et les licences sur Zeste de Savoir'\n },\n 'hosting': {\n 'name': 'GANDI SAS',\n 'address': '63-65 boulevard Massena - 75013 Paris - France'\n },\n 'social': {\n 'mastodon': 'https://framapiaf.org/@ZesteDeSavoir',\n 'facebook': 'https://www.facebook.com/ZesteDeSavoir',\n 'twitter': 'https://twitter.com/ZesteDeSavoir'\n },\n 'cnil': '1771020',\n },\n 'member': {\n 'bot_account': zds_config.get('member_bot_username', 'admin'),\n 'anonymous_account': zds_config.get('member_anonymous_username', 'anonymous'),\n 'external_account': zds_config.get('member_external_username', 'external'),\n 'bot_group': 'bot',\n 'dev_group': 'devs',\n 'members_per_page': 100,\n 'providers_per_page': 100,\n 'hats_per_page': 50,\n 'users_in_hats_list': 5,\n 'requested_hats_per_page': 100,\n 'update_last_visit_interval': 600, # seconds\n 'old_smileys_allowed': False,\n 'old_smileys_cookie_key': 'use_old_smileys',\n },\n 'hats': {\n 'moderation': 'Staff',\n 'validation': 'Staff',\n 'hats_management': 'Staff',\n },\n 'gallery': {\n 'image_max_size': 1024 * 1024,\n 'gallery_per_page': 21,\n 'images_per_page': 21,\n },\n 'tutorial': {\n 'home_number': 4,\n },\n 'article': {\n 'home_number': 3\n },\n 'opinions': {\n 'home_number': 5\n },\n 'content': {\n 'repo_private_path': join(BASE_DIR, 'contents-private'),\n 'repo_public_path': join(BASE_DIR, 'contents-public'),\n 'extra_contents_dirname': 'extra_contents',\n # can also be 'extra_content_generation_policy': 'WATCHDOG'\n # or 'extra_content_generation_policy': 'NOTHING'\n 'extra_content_generation_policy': 'SYNC',\n 'extra_content_watchdog_dir': join(BASE_DIR, 'watchdog-build'),\n 'max_tree_depth': 3,\n 'default_licence_pk': 7,\n 'content_per_page': 42,\n 'max_last_publications_level_1': 6,\n 'max_last_publications_level_2': 12,\n 'max_last_publications_level_3': 12,\n 'notes_per_page': 25,\n 'helps_per_page': 20,\n 'commits_per_page': 20,\n 'feed_length': 5,\n 'user_page_number': 5,\n 'default_image': join(BASE_DIR, 'fixtures', 'noir_black.png'),\n 'import_image_prefix': 'archive',\n 'build_pdf_when_published': True,\n 'maximum_slug_size': 150,\n 'characters_per_minute': 1500,\n 'editorial_line_link':\n 'https://zestedesavoir.com/articles/222/la-ligne-editoriale-officielle-de-zeste-de-savoir/',\n 'epub_stylesheets': {\n 'toc': Path('toc.css'),\n 'full': Path(BASE_DIR) / 'dist' / 'css' / 'zmd.css',\n 'katex': Path(BASE_DIR) / 'dist' / 'css' / 'katex.min.css'\n },\n 'latex_template_repo': 'NOT_EXISTING_DIR'\n },\n 'forum': {\n 'posts_per_page': 21,\n 'topics_per_page': 21,\n 'spam_limit_seconds': 60 * 15,\n 'spam_limit_participant': 2,\n 'beta_forum_id': zds_config.get('publications_being_written_forum_id', 1),\n 'max_post_length': 1000000,\n 'top_tag_max': 5,\n 'home_number': 5,\n 'old_post_limit_days': 90,\n # Exclude tags from top tags list. Tags listed here should not be relevant for most of users.\n # Be warned exclude too much tags can restrict performance\n 'top_tag_exclu': ['bug', 'suggestion', 'tutoriel', 'beta', 'article']\n },\n 'topic': {\n 'home_number': 5,\n },\n 'comment': {\n 'max_pings': 15,\n # allow to mention (and notify) members in messages\n 'enable_pings': True,\n },\n 'featured_resource': {\n 'featured_per_page': 100,\n 'home_number': 5,\n },\n 'notification': {\n 'per_page': 50,\n },\n 'paginator': {\n 'folding_limit': 4\n },\n 'search': {\n 'mark_keywords': ['javafx', 'haskell', 'groovy', 'powershell', 'latex', 'linux', 'windows'],\n 'results_per_page': 20,\n 'search_groups': {\n 'content': (\n _('Contenus publi\u00e9s'), ['publishedcontent', 'chapter']\n ),\n 'topic': (\n _('Sujets du forum'), ['topic']\n ),\n 'post': (\n _('Messages du forum'), ['post']\n ),\n },\n 'boosts': {\n 'publishedcontent': {\n 'global': 3.0,\n 'if_article': 1.0,\n 'if_tutorial': 1.0,\n 'if_medium_or_big_tutorial': 1.5,\n 'if_opinion': 0.66,\n 'if_opinion_not_picked': 0.5\n },\n 'topic': {\n 'global': 2.0,\n 'if_solved': 1.1,\n 'if_sticky': 1.2,\n 'if_locked': 0.1,\n },\n 'chapter': {\n 'global': 1.5,\n },\n 'post': {\n 'global': 1.0,\n 'if_first': 1.2,\n 'if_useful': 1.5,\n 'ld_ratio_above_1': 1.05,\n 'ld_ratio_below_1': 0.95,\n }\n }\n },\n 'visual_changes': [],\n 'display_search_bar': True,\n 'zmd': {\n 'server': 'http://127.0.0.1:27272',\n 'disable_pings': False\n },\n 'stats_ga_viewid': 'ga:86962671'\n}\n", "path": "zds/settings/abstract_base/zds.py"}]} | 3,886 | 153 |
gh_patches_debug_14503 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-3525 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flyout links incorrect on Mkdocs projects
GitHub links in the sidebar footer contain an extraneous path. For example, on http://tracks.readthedocs.org/en/latest/upgrading/ the "View" link points to https://github.com/TracksApp/tracks/blob/master/home/docs/checkouts/readthedocs.org/user_builds/tracks/checkouts/latest/doc/Upgrading.md.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/doc_builder/backends/mkdocs.py`
Content:
```
1 """
2 MkDocs_ backend for building docs.
3
4 .. _MkDocs: http://www.mkdocs.org/
5 """
6 from __future__ import (
7 absolute_import, division, print_function, unicode_literals)
8
9 import json
10 import logging
11 import os
12
13 import yaml
14 from django.conf import settings
15 from django.template import loader as template_loader
16
17 from readthedocs.doc_builder.base import BaseBuilder
18 from readthedocs.doc_builder.exceptions import BuildEnvironmentError
19
20 log = logging.getLogger(__name__)
21
22
23 def get_absolute_static_url():
24 """
25 Get the fully qualified static URL from settings.
26
27 Mkdocs needs a full domain because it tries to link to local files.
28 """
29 static_url = settings.STATIC_URL
30
31 if not static_url.startswith('http'):
32 domain = getattr(settings, 'PRODUCTION_DOMAIN')
33 static_url = 'http://{}{}'.format(domain, static_url)
34
35 return static_url
36
37
38 class BaseMkdocs(BaseBuilder):
39
40 """Mkdocs builder."""
41
42 # The default theme for mkdocs is the 'mkdocs' theme
43 DEFAULT_THEME_NAME = 'mkdocs'
44
45 def __init__(self, *args, **kwargs):
46 super(BaseMkdocs, self).__init__(*args, **kwargs)
47 self.old_artifact_path = os.path.join(
48 self.version.project.checkout_path(self.version.slug),
49 self.build_dir)
50 self.root_path = self.version.project.checkout_path(self.version.slug)
51 self.yaml_file = self.get_yaml_config()
52
53 def get_yaml_config(self):
54 """Find the ``mkdocs.yml`` file in the project root."""
55 mkdoc_path = self.config.mkdocs.configuration
56 if not mkdoc_path:
57 mkdoc_path = os.path.join(
58 self.project.checkout_path(self.version.slug),
59 'mkdocs.yml'
60 )
61 if not os.path.exists(mkdoc_path):
62 return None
63 return mkdoc_path
64
65 def load_yaml_config(self):
66 """
67 Load a YAML config.
68
69 Raise BuildEnvironmentError if failed due to syntax errors.
70 """
71 try:
72 return yaml.safe_load(
73 open(self.yaml_file, 'r')
74 )
75 except IOError:
76 return {
77 'site_name': self.version.project.name,
78 }
79 except yaml.YAMLError as exc:
80 note = ''
81 if hasattr(exc, 'problem_mark'):
82 mark = exc.problem_mark
83 note = ' (line %d, column %d)' % (mark.line + 1, mark.column + 1)
84 raise BuildEnvironmentError(
85 'Your mkdocs.yml could not be loaded, '
86 'possibly due to a syntax error{note}'.format(note=note)
87 )
88
89 def append_conf(self, **__):
90 """Set mkdocs config values."""
91 if not self.yaml_file:
92 self.yaml_file = os.path.join(self.root_path, 'mkdocs.yml')
93
94 user_config = self.load_yaml_config()
95
96 # Handle custom docs dirs
97 user_docs_dir = user_config.get('docs_dir')
98 docs_dir = self.docs_dir(docs_dir=user_docs_dir)
99 self.create_index(extension='md')
100 user_config['docs_dir'] = docs_dir
101
102 # Set mkdocs config values
103 static_url = get_absolute_static_url()
104 user_config.setdefault('extra_javascript', []).extend([
105 'readthedocs-data.js',
106 '%score/js/readthedocs-doc-embed.js' % static_url,
107 '%sjavascript/readthedocs-analytics.js' % static_url,
108 ])
109 user_config.setdefault('extra_css', []).extend([
110 '%scss/badge_only.css' % static_url,
111 '%scss/readthedocs-doc-embed.css' % static_url,
112 ])
113
114 docs_path = os.path.join(self.root_path, docs_dir)
115
116 # RTD javascript writing
117 rtd_data = self.generate_rtd_data(
118 docs_dir=docs_dir,
119 mkdocs_config=user_config
120 )
121 with open(os.path.join(docs_path, 'readthedocs-data.js'), 'w') as f:
122 f.write(rtd_data)
123
124 # Use Read the Docs' analytics setup rather than mkdocs'
125 # This supports using RTD's privacy improvements around analytics
126 user_config['google_analytics'] = None
127
128 # Write the modified mkdocs configuration
129 yaml.safe_dump(
130 user_config,
131 open(self.yaml_file, 'w')
132 )
133
134 # Write the mkdocs.yml to the build logs
135 self.run(
136 'cat',
137 os.path.relpath(self.yaml_file, self.root_path),
138 cwd=self.root_path,
139 )
140
141 def generate_rtd_data(self, docs_dir, mkdocs_config):
142 """Generate template properties and render readthedocs-data.js."""
143 # Use the analytics code from mkdocs.yml
144 # if it isn't set already by Read the Docs,
145 analytics_code = self.version.project.analytics_code
146 if not analytics_code and mkdocs_config.get('google_analytics'):
147 # http://www.mkdocs.org/user-guide/configuration/#google_analytics
148 analytics_code = mkdocs_config['google_analytics'][0]
149
150 # Will be available in the JavaScript as READTHEDOCS_DATA.
151 readthedocs_data = {
152 'project': self.version.project.slug,
153 'version': self.version.slug,
154 'language': self.version.project.language,
155 'programming_language': self.version.project.programming_language,
156 'page': None,
157 'theme': self.get_theme_name(mkdocs_config),
158 'builder': "mkdocs",
159 'docroot': docs_dir,
160 'source_suffix': ".md",
161 'api_host': getattr(settings, 'PUBLIC_API_URL', 'https://readthedocs.org'),
162 'ad_free': not self.project.show_advertising,
163 'commit': self.version.project.vcs_repo(self.version.slug).commit,
164 'global_analytics_code': getattr(settings, 'GLOBAL_ANALYTICS_CODE', 'UA-17997319-1'),
165 'user_analytics_code': analytics_code,
166 }
167 data_json = json.dumps(readthedocs_data, indent=4)
168 data_ctx = {
169 'data_json': data_json,
170 'current_version': readthedocs_data['version'],
171 'slug': readthedocs_data['project'],
172 'html_theme': readthedocs_data['theme'],
173 'pagename': None,
174 }
175 tmpl = template_loader.get_template('doc_builder/data.js.tmpl')
176 return tmpl.render(data_ctx)
177
178 def build(self):
179 checkout_path = self.project.checkout_path(self.version.slug)
180 build_command = [
181 'python',
182 self.python_env.venv_bin(filename='mkdocs'),
183 self.builder,
184 '--clean',
185 '--site-dir', self.build_dir,
186 '--config-file', self.yaml_file,
187 ]
188 if self.config.mkdocs.fail_on_warning:
189 build_command.append('--strict')
190 cmd_ret = self.run(
191 *build_command,
192 cwd=checkout_path,
193 bin_path=self.python_env.venv_bin()
194 )
195 return cmd_ret.successful
196
197 def get_theme_name(self, mkdocs_config):
198 """
199 Get the theme configuration in the mkdocs_config
200
201 In v0.17.0, the theme configuration switched
202 from two separate configs (both optional) to a nested directive.
203
204 :see: http://www.mkdocs.org/about/release-notes/#theme-customization-1164
205 :returns: the name of the theme RTD will use
206 """
207 theme_setting = mkdocs_config.get('theme')
208 if isinstance(theme_setting, dict):
209 # Full nested theme config (the new configuration)
210 return theme_setting.get('name') or self.DEFAULT_THEME_NAME
211
212 if theme_setting:
213 # A string which is the name of the theme
214 return theme_setting
215
216 theme_dir = mkdocs_config.get('theme_dir')
217 if theme_dir:
218 # Use the name of the directory in this project's custom theme directory
219 return theme_dir.rstrip('/').split('/')[-1]
220
221 return self.DEFAULT_THEME_NAME
222
223
224 class MkdocsHTML(BaseMkdocs):
225 type = 'mkdocs'
226 builder = 'build'
227 build_dir = '_build/html'
228
229
230 class MkdocsJSON(BaseMkdocs):
231 type = 'mkdocs_json'
232 builder = 'json'
233 build_dir = '_build/json'
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/doc_builder/backends/mkdocs.py b/readthedocs/doc_builder/backends/mkdocs.py
--- a/readthedocs/doc_builder/backends/mkdocs.py
+++ b/readthedocs/doc_builder/backends/mkdocs.py
@@ -111,11 +111,16 @@
'%scss/readthedocs-doc-embed.css' % static_url,
])
- docs_path = os.path.join(self.root_path, docs_dir)
+ # The docs path is relative to the location
+ # of the mkdocs configuration file.
+ docs_path = os.path.join(
+ os.path.dirname(self.yaml_file),
+ docs_dir
+ )
# RTD javascript writing
rtd_data = self.generate_rtd_data(
- docs_dir=docs_dir,
+ docs_dir=os.path.relpath(docs_path, self.root_path),
mkdocs_config=user_config
)
with open(os.path.join(docs_path, 'readthedocs-data.js'), 'w') as f:
| {"golden_diff": "diff --git a/readthedocs/doc_builder/backends/mkdocs.py b/readthedocs/doc_builder/backends/mkdocs.py\n--- a/readthedocs/doc_builder/backends/mkdocs.py\n+++ b/readthedocs/doc_builder/backends/mkdocs.py\n@@ -111,11 +111,16 @@\n '%scss/readthedocs-doc-embed.css' % static_url,\n ])\n \n- docs_path = os.path.join(self.root_path, docs_dir)\n+ # The docs path is relative to the location\n+ # of the mkdocs configuration file.\n+ docs_path = os.path.join(\n+ os.path.dirname(self.yaml_file),\n+ docs_dir\n+ )\n \n # RTD javascript writing\n rtd_data = self.generate_rtd_data(\n- docs_dir=docs_dir,\n+ docs_dir=os.path.relpath(docs_path, self.root_path),\n mkdocs_config=user_config\n )\n with open(os.path.join(docs_path, 'readthedocs-data.js'), 'w') as f:\n", "issue": "Flyout links incorrect on Mkdocs projects\nGitHub links in the sidebar footer contain an extraneous path. For example, on http://tracks.readthedocs.org/en/latest/upgrading/ the \"View\" link points to https://github.com/TracksApp/tracks/blob/master/home/docs/checkouts/readthedocs.org/user_builds/tracks/checkouts/latest/doc/Upgrading.md.\n\n", "before_files": [{"content": "\"\"\"\nMkDocs_ backend for building docs.\n\n.. _MkDocs: http://www.mkdocs.org/\n\"\"\"\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport json\nimport logging\nimport os\n\nimport yaml\nfrom django.conf import settings\nfrom django.template import loader as template_loader\n\nfrom readthedocs.doc_builder.base import BaseBuilder\nfrom readthedocs.doc_builder.exceptions import BuildEnvironmentError\n\nlog = logging.getLogger(__name__)\n\n\ndef get_absolute_static_url():\n \"\"\"\n Get the fully qualified static URL from settings.\n\n Mkdocs needs a full domain because it tries to link to local files.\n \"\"\"\n static_url = settings.STATIC_URL\n\n if not static_url.startswith('http'):\n domain = getattr(settings, 'PRODUCTION_DOMAIN')\n static_url = 'http://{}{}'.format(domain, static_url)\n\n return static_url\n\n\nclass BaseMkdocs(BaseBuilder):\n\n \"\"\"Mkdocs builder.\"\"\"\n\n # The default theme for mkdocs is the 'mkdocs' theme\n DEFAULT_THEME_NAME = 'mkdocs'\n\n def __init__(self, *args, **kwargs):\n super(BaseMkdocs, self).__init__(*args, **kwargs)\n self.old_artifact_path = os.path.join(\n self.version.project.checkout_path(self.version.slug),\n self.build_dir)\n self.root_path = self.version.project.checkout_path(self.version.slug)\n self.yaml_file = self.get_yaml_config()\n\n def get_yaml_config(self):\n \"\"\"Find the ``mkdocs.yml`` file in the project root.\"\"\"\n mkdoc_path = self.config.mkdocs.configuration\n if not mkdoc_path:\n mkdoc_path = os.path.join(\n self.project.checkout_path(self.version.slug),\n 'mkdocs.yml'\n )\n if not os.path.exists(mkdoc_path):\n return None\n return mkdoc_path\n\n def load_yaml_config(self):\n \"\"\"\n Load a YAML config.\n\n Raise BuildEnvironmentError if failed due to syntax errors.\n \"\"\"\n try:\n return yaml.safe_load(\n open(self.yaml_file, 'r')\n )\n except IOError:\n return {\n 'site_name': self.version.project.name,\n }\n except yaml.YAMLError as exc:\n note = ''\n if hasattr(exc, 'problem_mark'):\n mark = exc.problem_mark\n note = ' (line %d, column %d)' % (mark.line + 1, mark.column + 1)\n raise BuildEnvironmentError(\n 'Your mkdocs.yml could not be loaded, '\n 'possibly due to a syntax error{note}'.format(note=note)\n )\n\n def append_conf(self, **__):\n \"\"\"Set mkdocs config values.\"\"\"\n if not self.yaml_file:\n self.yaml_file = os.path.join(self.root_path, 'mkdocs.yml')\n\n user_config = self.load_yaml_config()\n\n # Handle custom docs dirs\n user_docs_dir = user_config.get('docs_dir')\n docs_dir = self.docs_dir(docs_dir=user_docs_dir)\n self.create_index(extension='md')\n user_config['docs_dir'] = docs_dir\n\n # Set mkdocs config values\n static_url = get_absolute_static_url()\n user_config.setdefault('extra_javascript', []).extend([\n 'readthedocs-data.js',\n '%score/js/readthedocs-doc-embed.js' % static_url,\n '%sjavascript/readthedocs-analytics.js' % static_url,\n ])\n user_config.setdefault('extra_css', []).extend([\n '%scss/badge_only.css' % static_url,\n '%scss/readthedocs-doc-embed.css' % static_url,\n ])\n\n docs_path = os.path.join(self.root_path, docs_dir)\n\n # RTD javascript writing\n rtd_data = self.generate_rtd_data(\n docs_dir=docs_dir,\n mkdocs_config=user_config\n )\n with open(os.path.join(docs_path, 'readthedocs-data.js'), 'w') as f:\n f.write(rtd_data)\n\n # Use Read the Docs' analytics setup rather than mkdocs'\n # This supports using RTD's privacy improvements around analytics\n user_config['google_analytics'] = None\n\n # Write the modified mkdocs configuration\n yaml.safe_dump(\n user_config,\n open(self.yaml_file, 'w')\n )\n\n # Write the mkdocs.yml to the build logs\n self.run(\n 'cat',\n os.path.relpath(self.yaml_file, self.root_path),\n cwd=self.root_path,\n )\n\n def generate_rtd_data(self, docs_dir, mkdocs_config):\n \"\"\"Generate template properties and render readthedocs-data.js.\"\"\"\n # Use the analytics code from mkdocs.yml\n # if it isn't set already by Read the Docs,\n analytics_code = self.version.project.analytics_code\n if not analytics_code and mkdocs_config.get('google_analytics'):\n # http://www.mkdocs.org/user-guide/configuration/#google_analytics\n analytics_code = mkdocs_config['google_analytics'][0]\n\n # Will be available in the JavaScript as READTHEDOCS_DATA.\n readthedocs_data = {\n 'project': self.version.project.slug,\n 'version': self.version.slug,\n 'language': self.version.project.language,\n 'programming_language': self.version.project.programming_language,\n 'page': None,\n 'theme': self.get_theme_name(mkdocs_config),\n 'builder': \"mkdocs\",\n 'docroot': docs_dir,\n 'source_suffix': \".md\",\n 'api_host': getattr(settings, 'PUBLIC_API_URL', 'https://readthedocs.org'),\n 'ad_free': not self.project.show_advertising,\n 'commit': self.version.project.vcs_repo(self.version.slug).commit,\n 'global_analytics_code': getattr(settings, 'GLOBAL_ANALYTICS_CODE', 'UA-17997319-1'),\n 'user_analytics_code': analytics_code,\n }\n data_json = json.dumps(readthedocs_data, indent=4)\n data_ctx = {\n 'data_json': data_json,\n 'current_version': readthedocs_data['version'],\n 'slug': readthedocs_data['project'],\n 'html_theme': readthedocs_data['theme'],\n 'pagename': None,\n }\n tmpl = template_loader.get_template('doc_builder/data.js.tmpl')\n return tmpl.render(data_ctx)\n\n def build(self):\n checkout_path = self.project.checkout_path(self.version.slug)\n build_command = [\n 'python',\n self.python_env.venv_bin(filename='mkdocs'),\n self.builder,\n '--clean',\n '--site-dir', self.build_dir,\n '--config-file', self.yaml_file,\n ]\n if self.config.mkdocs.fail_on_warning:\n build_command.append('--strict')\n cmd_ret = self.run(\n *build_command,\n cwd=checkout_path,\n bin_path=self.python_env.venv_bin()\n )\n return cmd_ret.successful\n\n def get_theme_name(self, mkdocs_config):\n \"\"\"\n Get the theme configuration in the mkdocs_config\n\n In v0.17.0, the theme configuration switched\n from two separate configs (both optional) to a nested directive.\n\n :see: http://www.mkdocs.org/about/release-notes/#theme-customization-1164\n :returns: the name of the theme RTD will use\n \"\"\"\n theme_setting = mkdocs_config.get('theme')\n if isinstance(theme_setting, dict):\n # Full nested theme config (the new configuration)\n return theme_setting.get('name') or self.DEFAULT_THEME_NAME\n\n if theme_setting:\n # A string which is the name of the theme\n return theme_setting\n\n theme_dir = mkdocs_config.get('theme_dir')\n if theme_dir:\n # Use the name of the directory in this project's custom theme directory\n return theme_dir.rstrip('/').split('/')[-1]\n\n return self.DEFAULT_THEME_NAME\n\n\nclass MkdocsHTML(BaseMkdocs):\n type = 'mkdocs'\n builder = 'build'\n build_dir = '_build/html'\n\n\nclass MkdocsJSON(BaseMkdocs):\n type = 'mkdocs_json'\n builder = 'json'\n build_dir = '_build/json'\n", "path": "readthedocs/doc_builder/backends/mkdocs.py"}], "after_files": [{"content": "\"\"\"\nMkDocs_ backend for building docs.\n\n.. _MkDocs: http://www.mkdocs.org/\n\"\"\"\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport json\nimport logging\nimport os\n\nimport yaml\nfrom django.conf import settings\nfrom django.template import loader as template_loader\n\nfrom readthedocs.doc_builder.base import BaseBuilder\nfrom readthedocs.doc_builder.exceptions import BuildEnvironmentError\n\nlog = logging.getLogger(__name__)\n\n\ndef get_absolute_static_url():\n \"\"\"\n Get the fully qualified static URL from settings.\n\n Mkdocs needs a full domain because it tries to link to local files.\n \"\"\"\n static_url = settings.STATIC_URL\n\n if not static_url.startswith('http'):\n domain = getattr(settings, 'PRODUCTION_DOMAIN')\n static_url = 'http://{}{}'.format(domain, static_url)\n\n return static_url\n\n\nclass BaseMkdocs(BaseBuilder):\n\n \"\"\"Mkdocs builder.\"\"\"\n\n # The default theme for mkdocs is the 'mkdocs' theme\n DEFAULT_THEME_NAME = 'mkdocs'\n\n def __init__(self, *args, **kwargs):\n super(BaseMkdocs, self).__init__(*args, **kwargs)\n self.old_artifact_path = os.path.join(\n self.version.project.checkout_path(self.version.slug),\n self.build_dir)\n self.root_path = self.version.project.checkout_path(self.version.slug)\n self.yaml_file = self.get_yaml_config()\n\n def get_yaml_config(self):\n \"\"\"Find the ``mkdocs.yml`` file in the project root.\"\"\"\n mkdoc_path = self.config.mkdocs.configuration\n if not mkdoc_path:\n mkdoc_path = os.path.join(\n self.project.checkout_path(self.version.slug),\n 'mkdocs.yml'\n )\n if not os.path.exists(mkdoc_path):\n return None\n return mkdoc_path\n\n def load_yaml_config(self):\n \"\"\"\n Load a YAML config.\n\n Raise BuildEnvironmentError if failed due to syntax errors.\n \"\"\"\n try:\n return yaml.safe_load(\n open(self.yaml_file, 'r')\n )\n except IOError:\n return {\n 'site_name': self.version.project.name,\n }\n except yaml.YAMLError as exc:\n note = ''\n if hasattr(exc, 'problem_mark'):\n mark = exc.problem_mark\n note = ' (line %d, column %d)' % (mark.line + 1, mark.column + 1)\n raise BuildEnvironmentError(\n 'Your mkdocs.yml could not be loaded, '\n 'possibly due to a syntax error{note}'.format(note=note)\n )\n\n def append_conf(self, **__):\n \"\"\"Set mkdocs config values.\"\"\"\n if not self.yaml_file:\n self.yaml_file = os.path.join(self.root_path, 'mkdocs.yml')\n\n user_config = self.load_yaml_config()\n\n # Handle custom docs dirs\n user_docs_dir = user_config.get('docs_dir')\n docs_dir = self.docs_dir(docs_dir=user_docs_dir)\n self.create_index(extension='md')\n user_config['docs_dir'] = docs_dir\n\n # Set mkdocs config values\n static_url = get_absolute_static_url()\n user_config.setdefault('extra_javascript', []).extend([\n 'readthedocs-data.js',\n '%score/js/readthedocs-doc-embed.js' % static_url,\n '%sjavascript/readthedocs-analytics.js' % static_url,\n ])\n user_config.setdefault('extra_css', []).extend([\n '%scss/badge_only.css' % static_url,\n '%scss/readthedocs-doc-embed.css' % static_url,\n ])\n\n # The docs path is relative to the location\n # of the mkdocs configuration file.\n docs_path = os.path.join(\n os.path.dirname(self.yaml_file),\n docs_dir\n )\n\n # RTD javascript writing\n rtd_data = self.generate_rtd_data(\n docs_dir=os.path.relpath(docs_path, self.root_path),\n mkdocs_config=user_config\n )\n with open(os.path.join(docs_path, 'readthedocs-data.js'), 'w') as f:\n f.write(rtd_data)\n\n # Use Read the Docs' analytics setup rather than mkdocs'\n # This supports using RTD's privacy improvements around analytics\n user_config['google_analytics'] = None\n\n # Write the modified mkdocs configuration\n yaml.safe_dump(\n user_config,\n open(self.yaml_file, 'w')\n )\n\n # Write the mkdocs.yml to the build logs\n self.run(\n 'cat',\n os.path.relpath(self.yaml_file, self.root_path),\n cwd=self.root_path,\n )\n\n def generate_rtd_data(self, docs_dir, mkdocs_config):\n \"\"\"Generate template properties and render readthedocs-data.js.\"\"\"\n # Use the analytics code from mkdocs.yml\n # if it isn't set already by Read the Docs,\n analytics_code = self.version.project.analytics_code\n if not analytics_code and mkdocs_config.get('google_analytics'):\n # http://www.mkdocs.org/user-guide/configuration/#google_analytics\n analytics_code = mkdocs_config['google_analytics'][0]\n\n # Will be available in the JavaScript as READTHEDOCS_DATA.\n readthedocs_data = {\n 'project': self.version.project.slug,\n 'version': self.version.slug,\n 'language': self.version.project.language,\n 'programming_language': self.version.project.programming_language,\n 'page': None,\n 'theme': self.get_theme_name(mkdocs_config),\n 'builder': \"mkdocs\",\n 'docroot': docs_dir,\n 'source_suffix': \".md\",\n 'api_host': getattr(settings, 'PUBLIC_API_URL', 'https://readthedocs.org'),\n 'ad_free': not self.project.show_advertising,\n 'commit': self.version.project.vcs_repo(self.version.slug).commit,\n 'global_analytics_code': getattr(settings, 'GLOBAL_ANALYTICS_CODE', 'UA-17997319-1'),\n 'user_analytics_code': analytics_code,\n }\n data_json = json.dumps(readthedocs_data, indent=4)\n data_ctx = {\n 'data_json': data_json,\n 'current_version': readthedocs_data['version'],\n 'slug': readthedocs_data['project'],\n 'html_theme': readthedocs_data['theme'],\n 'pagename': None,\n }\n tmpl = template_loader.get_template('doc_builder/data.js.tmpl')\n return tmpl.render(data_ctx)\n\n def build(self):\n checkout_path = self.project.checkout_path(self.version.slug)\n build_command = [\n 'python',\n self.python_env.venv_bin(filename='mkdocs'),\n self.builder,\n '--clean',\n '--site-dir', self.build_dir,\n '--config-file', self.yaml_file,\n ]\n if self.config.mkdocs.fail_on_warning:\n build_command.append('--strict')\n cmd_ret = self.run(\n *build_command,\n cwd=checkout_path,\n bin_path=self.python_env.venv_bin()\n )\n return cmd_ret.successful\n\n def get_theme_name(self, mkdocs_config):\n \"\"\"\n Get the theme configuration in the mkdocs_config\n\n In v0.17.0, the theme configuration switched\n from two separate configs (both optional) to a nested directive.\n\n :see: http://www.mkdocs.org/about/release-notes/#theme-customization-1164\n :returns: the name of the theme RTD will use\n \"\"\"\n theme_setting = mkdocs_config.get('theme')\n if isinstance(theme_setting, dict):\n # Full nested theme config (the new configuration)\n return theme_setting.get('name') or self.DEFAULT_THEME_NAME\n\n if theme_setting:\n # A string which is the name of the theme\n return theme_setting\n\n theme_dir = mkdocs_config.get('theme_dir')\n if theme_dir:\n # Use the name of the directory in this project's custom theme directory\n return theme_dir.rstrip('/').split('/')[-1]\n\n return self.DEFAULT_THEME_NAME\n\n\nclass MkdocsHTML(BaseMkdocs):\n type = 'mkdocs'\n builder = 'build'\n build_dir = '_build/html'\n\n\nclass MkdocsJSON(BaseMkdocs):\n type = 'mkdocs_json'\n builder = 'json'\n build_dir = '_build/json'\n", "path": "readthedocs/doc_builder/backends/mkdocs.py"}]} | 2,713 | 226 |
gh_patches_debug_38482 | rasdani/github-patches | git_diff | great-expectations__great_expectations-2749 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/datasource/data_connector/batch_filter.py`
Content:
```
1 import itertools
2 import logging
3 from typing import Callable, Dict, Optional, Union
4
5 import great_expectations.exceptions as ge_exceptions
6 from great_expectations.core.id_dict import IDDict
7 from great_expectations.util import is_int
8
9 logger = logging.getLogger(__name__)
10
11
12 def build_batch_filter(
13 data_connector_query_dict: Optional[
14 Dict[
15 str,
16 Optional[
17 Union[
18 int,
19 list,
20 tuple,
21 slice,
22 str,
23 Union[Dict, IDDict],
24 Callable,
25 ]
26 ],
27 ]
28 ] = None
29 ):
30 if not data_connector_query_dict:
31 return BatchFilter(
32 custom_filter_function=None,
33 batch_filter_parameters=None,
34 index=None,
35 limit=None,
36 )
37 data_connector_query_keys: set = set(data_connector_query_dict.keys())
38 if not data_connector_query_keys <= BatchFilter.RECOGNIZED_KEYS:
39 raise ge_exceptions.BatchFilterError(
40 f"""Unrecognized data_connector_query key(s):
41 "{str(data_connector_query_keys - BatchFilter.RECOGNIZED_KEYS)}" detected.
42 """
43 )
44 custom_filter_function: Callable = data_connector_query_dict.get(
45 "custom_filter_function"
46 )
47 if custom_filter_function and not isinstance(custom_filter_function, Callable):
48 raise ge_exceptions.BatchFilterError(
49 f"""The type of a custom_filter must be a function (Python "Callable"). The type given is
50 "{str(type(custom_filter_function))}", which is illegal.
51 """
52 )
53 batch_filter_parameters: Optional[dict] = data_connector_query_dict.get(
54 "batch_filter_parameters"
55 )
56 if batch_filter_parameters:
57 if not isinstance(batch_filter_parameters, dict):
58 raise ge_exceptions.BatchFilterError(
59 f"""The type of batch_filter_parameters must be a dictionary (Python "dict"). The type given is
60 "{str(type(batch_filter_parameters))}", which is illegal.
61 """
62 )
63 if not all([isinstance(key, str) for key in batch_filter_parameters.keys()]):
64 raise ge_exceptions.BatchFilterError(
65 'All batch_filter_parameters keys must strings (Python "str").'
66 )
67 if batch_filter_parameters is not None:
68 batch_filter_parameters: IDDict = IDDict(batch_filter_parameters)
69 index: Optional[
70 Union[int, list, tuple, slice, str]
71 ] = data_connector_query_dict.get("index")
72 limit: Optional[int] = data_connector_query_dict.get("limit")
73 if limit and (not isinstance(limit, int) or limit < 0):
74 raise ge_exceptions.BatchFilterError(
75 f"""The type of a limit must be an integer (Python "int") that is greater than or equal to 0. The
76 type and value given are "{str(type(limit))}" and "{limit}", respectively, which is illegal.
77 """
78 )
79 if index is not None and limit is not None:
80 raise ge_exceptions.BatchFilterError(
81 "Only one of index or limit, but not both, can be specified (specifying both is illegal)."
82 )
83 index = _parse_index(index=index)
84 return BatchFilter(
85 custom_filter_function=custom_filter_function,
86 batch_filter_parameters=batch_filter_parameters,
87 index=index,
88 limit=limit,
89 )
90
91
92 def _parse_index(
93 index: Optional[Union[int, list, tuple, slice, str]] = None
94 ) -> Optional[Union[int, slice]]:
95 if index is None:
96 return None
97 elif isinstance(index, (int, slice)):
98 return index
99 elif isinstance(index, (list, tuple)):
100 if len(index) > 3:
101 raise ge_exceptions.BatchFilterError(
102 f"""The number of index slice components must be between 1 and 3 (the given number is
103 {len(index)}).
104 """
105 )
106 if len(index) == 1:
107 return index[0]
108 if len(index) == 2:
109 return slice(index[0], index[1], None)
110 if len(index) == 3:
111 return slice(index[0], index[1], index[2])
112 elif isinstance(index, str):
113 if is_int(value=index):
114 return _parse_index(index=int(index))
115 return _parse_index(index=[int(idx_str) for idx_str in index.split(":")])
116 else:
117 raise ge_exceptions.BatchFilterError(
118 f"""The type of index must be an integer (Python "int"), or a list (Python "list") or a tuple
119 (Python "tuple"), or a Python "slice" object, or a string that has the format of a single integer or a slice argument.
120 The type given is "{str(type(index))}", which is illegal.
121 """
122 )
123
124
125 class BatchFilter:
126 RECOGNIZED_KEYS: set = {
127 "custom_filter_function",
128 "batch_filter_parameters",
129 "index",
130 "limit",
131 }
132
133 def __init__(
134 self,
135 custom_filter_function: Callable = None,
136 batch_filter_parameters: Optional[IDDict] = None,
137 index: Optional[Union[int, slice]] = None,
138 limit: int = None,
139 ):
140 self._custom_filter_function = custom_filter_function
141 self._batch_filter_parameters = batch_filter_parameters
142 self._index = index
143 self._limit = limit
144
145 @property
146 def custom_filter_function(self) -> Callable:
147 return self._custom_filter_function
148
149 @property
150 def batch_filter_parameters(self) -> Optional[IDDict]:
151 return self._batch_filter_parameters
152
153 @property
154 def index(self) -> Optional[Union[int, slice]]:
155 return self._index
156
157 @property
158 def limit(self) -> int:
159 return self._limit
160
161 def __repr__(self) -> str:
162 doc_fields_dict: dict = {
163 "custom_filter_function": self._custom_filter_function,
164 "batch_filter_parameters": self.batch_filter_parameters,
165 "index": self.index,
166 "limit": self.limit,
167 }
168 return str(doc_fields_dict)
169
170 def select_from_data_connector_query(self, batch_definition_list=None):
171 if batch_definition_list is None:
172 return []
173 filter_function: Callable
174 if self.custom_filter_function:
175 filter_function = self.custom_filter_function
176 else:
177 filter_function = self.best_effort_batch_definition_matcher()
178 selected_batch_definitions = list(
179 filter(
180 lambda batch_definition: filter_function(
181 batch_identifiers=batch_definition.batch_identifiers,
182 ),
183 batch_definition_list,
184 )
185 )
186 if self.index is None:
187 selected_batch_definitions = selected_batch_definitions[: self.limit]
188 else:
189 if isinstance(self.index, int):
190 selected_batch_definitions = [selected_batch_definitions[self.index]]
191 else:
192 selected_batch_definitions = list(
193 itertools.chain.from_iterable(
194 [selected_batch_definitions[self.index]]
195 )
196 )
197 return selected_batch_definitions
198
199 def best_effort_batch_definition_matcher(self) -> Callable:
200 def match_batch_identifiers_to_batch_filter_params(
201 batch_identifiers: dict,
202 ) -> bool:
203 if self.batch_filter_parameters:
204 if not batch_identifiers:
205 return False
206
207 for batch_filter_parameter, val in self.batch_filter_parameters.items():
208 if not (
209 batch_filter_parameter in batch_identifiers
210 and batch_identifiers[batch_filter_parameter] == val
211 ):
212 return False
213
214 return True
215
216 return match_batch_identifiers_to_batch_filter_params
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/great_expectations/datasource/data_connector/batch_filter.py b/great_expectations/datasource/data_connector/batch_filter.py
--- a/great_expectations/datasource/data_connector/batch_filter.py
+++ b/great_expectations/datasource/data_connector/batch_filter.py
@@ -1,8 +1,9 @@
import itertools
import logging
-from typing import Callable, Dict, Optional, Union
+from typing import Callable, Dict, List, Optional, Union
import great_expectations.exceptions as ge_exceptions
+from great_expectations.core.batch import BatchDefinition
from great_expectations.core.id_dict import IDDict
from great_expectations.util import is_int
@@ -50,9 +51,9 @@
"{str(type(custom_filter_function))}", which is illegal.
"""
)
- batch_filter_parameters: Optional[dict] = data_connector_query_dict.get(
- "batch_filter_parameters"
- )
+ batch_filter_parameters: Optional[
+ Union[dict, IDDict]
+ ] = data_connector_query_dict.get("batch_filter_parameters")
if batch_filter_parameters:
if not isinstance(batch_filter_parameters, dict):
raise ge_exceptions.BatchFilterError(
@@ -64,8 +65,7 @@
raise ge_exceptions.BatchFilterError(
'All batch_filter_parameters keys must strings (Python "str").'
)
- if batch_filter_parameters is not None:
- batch_filter_parameters: IDDict = IDDict(batch_filter_parameters)
+ batch_filter_parameters = IDDict(batch_filter_parameters)
index: Optional[
Union[int, list, tuple, slice, str]
] = data_connector_query_dict.get("index")
@@ -167,7 +167,9 @@
}
return str(doc_fields_dict)
- def select_from_data_connector_query(self, batch_definition_list=None):
+ def select_from_data_connector_query(
+ self, batch_definition_list: Optional[List[BatchDefinition]] = None
+ ) -> List[BatchDefinition]:
if batch_definition_list is None:
return []
filter_function: Callable
@@ -175,6 +177,7 @@
filter_function = self.custom_filter_function
else:
filter_function = self.best_effort_batch_definition_matcher()
+ selected_batch_definitions: List[BatchDefinition]
selected_batch_definitions = list(
filter(
lambda batch_definition: filter_function(
| {"golden_diff": "diff --git a/great_expectations/datasource/data_connector/batch_filter.py b/great_expectations/datasource/data_connector/batch_filter.py\n--- a/great_expectations/datasource/data_connector/batch_filter.py\n+++ b/great_expectations/datasource/data_connector/batch_filter.py\n@@ -1,8 +1,9 @@\n import itertools\n import logging\n-from typing import Callable, Dict, Optional, Union\n+from typing import Callable, Dict, List, Optional, Union\n \n import great_expectations.exceptions as ge_exceptions\n+from great_expectations.core.batch import BatchDefinition\n from great_expectations.core.id_dict import IDDict\n from great_expectations.util import is_int\n \n@@ -50,9 +51,9 @@\n \"{str(type(custom_filter_function))}\", which is illegal.\n \"\"\"\n )\n- batch_filter_parameters: Optional[dict] = data_connector_query_dict.get(\n- \"batch_filter_parameters\"\n- )\n+ batch_filter_parameters: Optional[\n+ Union[dict, IDDict]\n+ ] = data_connector_query_dict.get(\"batch_filter_parameters\")\n if batch_filter_parameters:\n if not isinstance(batch_filter_parameters, dict):\n raise ge_exceptions.BatchFilterError(\n@@ -64,8 +65,7 @@\n raise ge_exceptions.BatchFilterError(\n 'All batch_filter_parameters keys must strings (Python \"str\").'\n )\n- if batch_filter_parameters is not None:\n- batch_filter_parameters: IDDict = IDDict(batch_filter_parameters)\n+ batch_filter_parameters = IDDict(batch_filter_parameters)\n index: Optional[\n Union[int, list, tuple, slice, str]\n ] = data_connector_query_dict.get(\"index\")\n@@ -167,7 +167,9 @@\n }\n return str(doc_fields_dict)\n \n- def select_from_data_connector_query(self, batch_definition_list=None):\n+ def select_from_data_connector_query(\n+ self, batch_definition_list: Optional[List[BatchDefinition]] = None\n+ ) -> List[BatchDefinition]:\n if batch_definition_list is None:\n return []\n filter_function: Callable\n@@ -175,6 +177,7 @@\n filter_function = self.custom_filter_function\n else:\n filter_function = self.best_effort_batch_definition_matcher()\n+ selected_batch_definitions: List[BatchDefinition]\n selected_batch_definitions = list(\n filter(\n lambda batch_definition: filter_function(\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import itertools\nimport logging\nfrom typing import Callable, Dict, Optional, Union\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.core.id_dict import IDDict\nfrom great_expectations.util import is_int\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_batch_filter(\n data_connector_query_dict: Optional[\n Dict[\n str,\n Optional[\n Union[\n int,\n list,\n tuple,\n slice,\n str,\n Union[Dict, IDDict],\n Callable,\n ]\n ],\n ]\n ] = None\n):\n if not data_connector_query_dict:\n return BatchFilter(\n custom_filter_function=None,\n batch_filter_parameters=None,\n index=None,\n limit=None,\n )\n data_connector_query_keys: set = set(data_connector_query_dict.keys())\n if not data_connector_query_keys <= BatchFilter.RECOGNIZED_KEYS:\n raise ge_exceptions.BatchFilterError(\n f\"\"\"Unrecognized data_connector_query key(s):\n\"{str(data_connector_query_keys - BatchFilter.RECOGNIZED_KEYS)}\" detected.\n \"\"\"\n )\n custom_filter_function: Callable = data_connector_query_dict.get(\n \"custom_filter_function\"\n )\n if custom_filter_function and not isinstance(custom_filter_function, Callable):\n raise ge_exceptions.BatchFilterError(\n f\"\"\"The type of a custom_filter must be a function (Python \"Callable\"). The type given is\n\"{str(type(custom_filter_function))}\", which is illegal.\n \"\"\"\n )\n batch_filter_parameters: Optional[dict] = data_connector_query_dict.get(\n \"batch_filter_parameters\"\n )\n if batch_filter_parameters:\n if not isinstance(batch_filter_parameters, dict):\n raise ge_exceptions.BatchFilterError(\n f\"\"\"The type of batch_filter_parameters must be a dictionary (Python \"dict\"). The type given is\n\"{str(type(batch_filter_parameters))}\", which is illegal.\n \"\"\"\n )\n if not all([isinstance(key, str) for key in batch_filter_parameters.keys()]):\n raise ge_exceptions.BatchFilterError(\n 'All batch_filter_parameters keys must strings (Python \"str\").'\n )\n if batch_filter_parameters is not None:\n batch_filter_parameters: IDDict = IDDict(batch_filter_parameters)\n index: Optional[\n Union[int, list, tuple, slice, str]\n ] = data_connector_query_dict.get(\"index\")\n limit: Optional[int] = data_connector_query_dict.get(\"limit\")\n if limit and (not isinstance(limit, int) or limit < 0):\n raise ge_exceptions.BatchFilterError(\n f\"\"\"The type of a limit must be an integer (Python \"int\") that is greater than or equal to 0. The\ntype and value given are \"{str(type(limit))}\" and \"{limit}\", respectively, which is illegal.\n \"\"\"\n )\n if index is not None and limit is not None:\n raise ge_exceptions.BatchFilterError(\n \"Only one of index or limit, but not both, can be specified (specifying both is illegal).\"\n )\n index = _parse_index(index=index)\n return BatchFilter(\n custom_filter_function=custom_filter_function,\n batch_filter_parameters=batch_filter_parameters,\n index=index,\n limit=limit,\n )\n\n\ndef _parse_index(\n index: Optional[Union[int, list, tuple, slice, str]] = None\n) -> Optional[Union[int, slice]]:\n if index is None:\n return None\n elif isinstance(index, (int, slice)):\n return index\n elif isinstance(index, (list, tuple)):\n if len(index) > 3:\n raise ge_exceptions.BatchFilterError(\n f\"\"\"The number of index slice components must be between 1 and 3 (the given number is\n{len(index)}).\n \"\"\"\n )\n if len(index) == 1:\n return index[0]\n if len(index) == 2:\n return slice(index[0], index[1], None)\n if len(index) == 3:\n return slice(index[0], index[1], index[2])\n elif isinstance(index, str):\n if is_int(value=index):\n return _parse_index(index=int(index))\n return _parse_index(index=[int(idx_str) for idx_str in index.split(\":\")])\n else:\n raise ge_exceptions.BatchFilterError(\n f\"\"\"The type of index must be an integer (Python \"int\"), or a list (Python \"list\") or a tuple\n(Python \"tuple\"), or a Python \"slice\" object, or a string that has the format of a single integer or a slice argument.\nThe type given is \"{str(type(index))}\", which is illegal.\n \"\"\"\n )\n\n\nclass BatchFilter:\n RECOGNIZED_KEYS: set = {\n \"custom_filter_function\",\n \"batch_filter_parameters\",\n \"index\",\n \"limit\",\n }\n\n def __init__(\n self,\n custom_filter_function: Callable = None,\n batch_filter_parameters: Optional[IDDict] = None,\n index: Optional[Union[int, slice]] = None,\n limit: int = None,\n ):\n self._custom_filter_function = custom_filter_function\n self._batch_filter_parameters = batch_filter_parameters\n self._index = index\n self._limit = limit\n\n @property\n def custom_filter_function(self) -> Callable:\n return self._custom_filter_function\n\n @property\n def batch_filter_parameters(self) -> Optional[IDDict]:\n return self._batch_filter_parameters\n\n @property\n def index(self) -> Optional[Union[int, slice]]:\n return self._index\n\n @property\n def limit(self) -> int:\n return self._limit\n\n def __repr__(self) -> str:\n doc_fields_dict: dict = {\n \"custom_filter_function\": self._custom_filter_function,\n \"batch_filter_parameters\": self.batch_filter_parameters,\n \"index\": self.index,\n \"limit\": self.limit,\n }\n return str(doc_fields_dict)\n\n def select_from_data_connector_query(self, batch_definition_list=None):\n if batch_definition_list is None:\n return []\n filter_function: Callable\n if self.custom_filter_function:\n filter_function = self.custom_filter_function\n else:\n filter_function = self.best_effort_batch_definition_matcher()\n selected_batch_definitions = list(\n filter(\n lambda batch_definition: filter_function(\n batch_identifiers=batch_definition.batch_identifiers,\n ),\n batch_definition_list,\n )\n )\n if self.index is None:\n selected_batch_definitions = selected_batch_definitions[: self.limit]\n else:\n if isinstance(self.index, int):\n selected_batch_definitions = [selected_batch_definitions[self.index]]\n else:\n selected_batch_definitions = list(\n itertools.chain.from_iterable(\n [selected_batch_definitions[self.index]]\n )\n )\n return selected_batch_definitions\n\n def best_effort_batch_definition_matcher(self) -> Callable:\n def match_batch_identifiers_to_batch_filter_params(\n batch_identifiers: dict,\n ) -> bool:\n if self.batch_filter_parameters:\n if not batch_identifiers:\n return False\n\n for batch_filter_parameter, val in self.batch_filter_parameters.items():\n if not (\n batch_filter_parameter in batch_identifiers\n and batch_identifiers[batch_filter_parameter] == val\n ):\n return False\n\n return True\n\n return match_batch_identifiers_to_batch_filter_params\n", "path": "great_expectations/datasource/data_connector/batch_filter.py"}], "after_files": [{"content": "import itertools\nimport logging\nfrom typing import Callable, Dict, List, Optional, Union\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.core.batch import BatchDefinition\nfrom great_expectations.core.id_dict import IDDict\nfrom great_expectations.util import is_int\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_batch_filter(\n data_connector_query_dict: Optional[\n Dict[\n str,\n Optional[\n Union[\n int,\n list,\n tuple,\n slice,\n str,\n Union[Dict, IDDict],\n Callable,\n ]\n ],\n ]\n ] = None\n):\n if not data_connector_query_dict:\n return BatchFilter(\n custom_filter_function=None,\n batch_filter_parameters=None,\n index=None,\n limit=None,\n )\n data_connector_query_keys: set = set(data_connector_query_dict.keys())\n if not data_connector_query_keys <= BatchFilter.RECOGNIZED_KEYS:\n raise ge_exceptions.BatchFilterError(\n f\"\"\"Unrecognized data_connector_query key(s):\n\"{str(data_connector_query_keys - BatchFilter.RECOGNIZED_KEYS)}\" detected.\n \"\"\"\n )\n custom_filter_function: Callable = data_connector_query_dict.get(\n \"custom_filter_function\"\n )\n if custom_filter_function and not isinstance(custom_filter_function, Callable):\n raise ge_exceptions.BatchFilterError(\n f\"\"\"The type of a custom_filter must be a function (Python \"Callable\"). The type given is\n\"{str(type(custom_filter_function))}\", which is illegal.\n \"\"\"\n )\n batch_filter_parameters: Optional[\n Union[dict, IDDict]\n ] = data_connector_query_dict.get(\"batch_filter_parameters\")\n if batch_filter_parameters:\n if not isinstance(batch_filter_parameters, dict):\n raise ge_exceptions.BatchFilterError(\n f\"\"\"The type of batch_filter_parameters must be a dictionary (Python \"dict\"). The type given is\n\"{str(type(batch_filter_parameters))}\", which is illegal.\n \"\"\"\n )\n if not all([isinstance(key, str) for key in batch_filter_parameters.keys()]):\n raise ge_exceptions.BatchFilterError(\n 'All batch_filter_parameters keys must strings (Python \"str\").'\n )\n batch_filter_parameters = IDDict(batch_filter_parameters)\n index: Optional[\n Union[int, list, tuple, slice, str]\n ] = data_connector_query_dict.get(\"index\")\n limit: Optional[int] = data_connector_query_dict.get(\"limit\")\n if limit and (not isinstance(limit, int) or limit < 0):\n raise ge_exceptions.BatchFilterError(\n f\"\"\"The type of a limit must be an integer (Python \"int\") that is greater than or equal to 0. The\ntype and value given are \"{str(type(limit))}\" and \"{limit}\", respectively, which is illegal.\n \"\"\"\n )\n if index is not None and limit is not None:\n raise ge_exceptions.BatchFilterError(\n \"Only one of index or limit, but not both, can be specified (specifying both is illegal).\"\n )\n index = _parse_index(index=index)\n return BatchFilter(\n custom_filter_function=custom_filter_function,\n batch_filter_parameters=batch_filter_parameters,\n index=index,\n limit=limit,\n )\n\n\ndef _parse_index(\n index: Optional[Union[int, list, tuple, slice, str]] = None\n) -> Optional[Union[int, slice]]:\n if index is None:\n return None\n elif isinstance(index, (int, slice)):\n return index\n elif isinstance(index, (list, tuple)):\n if len(index) > 3:\n raise ge_exceptions.BatchFilterError(\n f\"\"\"The number of index slice components must be between 1 and 3 (the given number is\n{len(index)}).\n \"\"\"\n )\n if len(index) == 1:\n return index[0]\n if len(index) == 2:\n return slice(index[0], index[1], None)\n if len(index) == 3:\n return slice(index[0], index[1], index[2])\n elif isinstance(index, str):\n if is_int(value=index):\n return _parse_index(index=int(index))\n return _parse_index(index=[int(idx_str) for idx_str in index.split(\":\")])\n else:\n raise ge_exceptions.BatchFilterError(\n f\"\"\"The type of index must be an integer (Python \"int\"), or a list (Python \"list\") or a tuple\n(Python \"tuple\"), or a Python \"slice\" object, or a string that has the format of a single integer or a slice argument.\nThe type given is \"{str(type(index))}\", which is illegal.\n \"\"\"\n )\n\n\nclass BatchFilter:\n RECOGNIZED_KEYS: set = {\n \"custom_filter_function\",\n \"batch_filter_parameters\",\n \"index\",\n \"limit\",\n }\n\n def __init__(\n self,\n custom_filter_function: Callable = None,\n batch_filter_parameters: Optional[IDDict] = None,\n index: Optional[Union[int, slice]] = None,\n limit: int = None,\n ):\n self._custom_filter_function = custom_filter_function\n self._batch_filter_parameters = batch_filter_parameters\n self._index = index\n self._limit = limit\n\n @property\n def custom_filter_function(self) -> Callable:\n return self._custom_filter_function\n\n @property\n def batch_filter_parameters(self) -> Optional[IDDict]:\n return self._batch_filter_parameters\n\n @property\n def index(self) -> Optional[Union[int, slice]]:\n return self._index\n\n @property\n def limit(self) -> int:\n return self._limit\n\n def __repr__(self) -> str:\n doc_fields_dict: dict = {\n \"custom_filter_function\": self._custom_filter_function,\n \"batch_filter_parameters\": self.batch_filter_parameters,\n \"index\": self.index,\n \"limit\": self.limit,\n }\n return str(doc_fields_dict)\n\n def select_from_data_connector_query(\n self, batch_definition_list: Optional[List[BatchDefinition]] = None\n ) -> List[BatchDefinition]:\n if batch_definition_list is None:\n return []\n filter_function: Callable\n if self.custom_filter_function:\n filter_function = self.custom_filter_function\n else:\n filter_function = self.best_effort_batch_definition_matcher()\n selected_batch_definitions: List[BatchDefinition]\n selected_batch_definitions = list(\n filter(\n lambda batch_definition: filter_function(\n batch_identifiers=batch_definition.batch_identifiers,\n ),\n batch_definition_list,\n )\n )\n if self.index is None:\n selected_batch_definitions = selected_batch_definitions[: self.limit]\n else:\n if isinstance(self.index, int):\n selected_batch_definitions = [selected_batch_definitions[self.index]]\n else:\n selected_batch_definitions = list(\n itertools.chain.from_iterable(\n [selected_batch_definitions[self.index]]\n )\n )\n return selected_batch_definitions\n\n def best_effort_batch_definition_matcher(self) -> Callable:\n def match_batch_identifiers_to_batch_filter_params(\n batch_identifiers: dict,\n ) -> bool:\n if self.batch_filter_parameters:\n if not batch_identifiers:\n return False\n\n for batch_filter_parameter, val in self.batch_filter_parameters.items():\n if not (\n batch_filter_parameter in batch_identifiers\n and batch_identifiers[batch_filter_parameter] == val\n ):\n return False\n\n return True\n\n return match_batch_identifiers_to_batch_filter_params\n", "path": "great_expectations/datasource/data_connector/batch_filter.py"}]} | 2,407 | 516 |
gh_patches_debug_31205 | rasdani/github-patches | git_diff | jazzband__pip-tools-614 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip-compile gives misleading error message if no internet
If there is no internet access, `pip-compile` will fail with a misleading message. This is with pip-compile 1.6.1, pip 8.1.1, and Python 3.5.1.
##### Steps to replicate
1. Create a `requirements.in` file with a single project name.
2. Disable internet access (e.g. by disabling wi-fi).
3. Run `pip-compile --verbose requirements.in`.
##### Expected result
The command should fail with an error message relating to the fact that there is no internet connection.
##### Actual result
Instead, the output looks something like this:
```
Using indexes:
https://pypi.python.org/simple
ROUND 1
Current constraints:
MyPackage
Finding the best candidates:
Could not find a version that matches MyPackage
Tried: (no version found at all)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/repositories/pypi.py`
Content:
```
1 # coding: utf-8
2 from __future__ import (absolute_import, division, print_function,
3 unicode_literals)
4
5 import hashlib
6 import os
7 from contextlib import contextmanager
8 from shutil import rmtree
9
10 from pip.download import is_file_url, url_to_path
11 from pip.index import PackageFinder
12 from pip.req.req_set import RequirementSet
13 from pip.wheel import Wheel
14 try:
15 from pip.utils.hashes import FAVORITE_HASH
16 except ImportError:
17 FAVORITE_HASH = 'sha256'
18
19 from ..cache import CACHE_DIR
20 from ..exceptions import NoCandidateFound
21 from ..utils import (fs_str, is_pinned_requirement, lookup_table,
22 make_install_requirement, pip_version_info)
23 from .base import BaseRepository
24
25 try:
26 from tempfile import TemporaryDirectory # added in 3.2
27 except ImportError:
28 from .._compat import TemporaryDirectory
29
30
31 class PyPIRepository(BaseRepository):
32 DEFAULT_INDEX_URL = 'https://pypi.python.org/simple'
33
34 """
35 The PyPIRepository will use the provided Finder instance to lookup
36 packages. Typically, it looks up packages on PyPI (the default implicit
37 config), but any other PyPI mirror can be used if index_urls is
38 changed/configured on the Finder.
39 """
40 def __init__(self, pip_options, session):
41 self.session = session
42
43 index_urls = [pip_options.index_url] + pip_options.extra_index_urls
44 if pip_options.no_index:
45 index_urls = []
46
47 self.finder = PackageFinder(
48 find_links=pip_options.find_links,
49 index_urls=index_urls,
50 trusted_hosts=pip_options.trusted_hosts,
51 allow_all_prereleases=pip_options.pre,
52 process_dependency_links=pip_options.process_dependency_links,
53 session=self.session,
54 )
55
56 # Caches
57 # stores project_name => InstallationCandidate mappings for all
58 # versions reported by PyPI, so we only have to ask once for each
59 # project
60 self._available_candidates_cache = {}
61
62 # stores InstallRequirement => list(InstallRequirement) mappings
63 # of all secondary dependencies for the given requirement, so we
64 # only have to go to disk once for each requirement
65 self._dependencies_cache = {}
66
67 # Setup file paths
68 self.freshen_build_caches()
69 self._download_dir = fs_str(os.path.join(CACHE_DIR, 'pkgs'))
70 self._wheel_download_dir = fs_str(os.path.join(CACHE_DIR, 'wheels'))
71
72 def freshen_build_caches(self):
73 """
74 Start with fresh build/source caches. Will remove any old build
75 caches from disk automatically.
76 """
77 self._build_dir = TemporaryDirectory(fs_str('build'))
78 self._source_dir = TemporaryDirectory(fs_str('source'))
79
80 @property
81 def build_dir(self):
82 return self._build_dir.name
83
84 @property
85 def source_dir(self):
86 return self._source_dir.name
87
88 def clear_caches(self):
89 rmtree(self._download_dir, ignore_errors=True)
90 rmtree(self._wheel_download_dir, ignore_errors=True)
91
92 def find_all_candidates(self, req_name):
93 if req_name not in self._available_candidates_cache:
94 # pip 8 changed the internal API, making this a public method
95 if pip_version_info >= (8, 0):
96 candidates = self.finder.find_all_candidates(req_name)
97 else:
98 candidates = self.finder._find_all_versions(req_name)
99 self._available_candidates_cache[req_name] = candidates
100 return self._available_candidates_cache[req_name]
101
102 def find_best_match(self, ireq, prereleases=None):
103 """
104 Returns a Version object that indicates the best match for the given
105 InstallRequirement according to the external repository.
106 """
107 if ireq.editable:
108 return ireq # return itself as the best match
109
110 all_candidates = self.find_all_candidates(ireq.name)
111 candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version, unique=True)
112 matching_versions = ireq.specifier.filter((candidate.version for candidate in all_candidates),
113 prereleases=prereleases)
114
115 # Reuses pip's internal candidate sort key to sort
116 matching_candidates = [candidates_by_version[ver] for ver in matching_versions]
117 if not matching_candidates:
118 raise NoCandidateFound(ireq, all_candidates)
119 best_candidate = max(matching_candidates, key=self.finder._candidate_sort_key)
120
121 # Turn the candidate into a pinned InstallRequirement
122 return make_install_requirement(
123 best_candidate.project, best_candidate.version, ireq.extras, constraint=ireq.constraint
124 )
125
126 def get_dependencies(self, ireq):
127 """
128 Given a pinned or an editable InstallRequirement, returns a set of
129 dependencies (also InstallRequirements, but not necessarily pinned).
130 They indicate the secondary dependencies for the given requirement.
131 """
132 if not (ireq.editable or is_pinned_requirement(ireq)):
133 raise TypeError('Expected pinned or editable InstallRequirement, got {}'.format(ireq))
134
135 if ireq not in self._dependencies_cache:
136 if ireq.editable and (ireq.source_dir and os.path.exists(ireq.source_dir)):
137 # No download_dir for locally available editable requirements.
138 # If a download_dir is passed, pip will unnecessarely
139 # archive the entire source directory
140 download_dir = None
141 elif ireq.link and not ireq.link.is_artifact:
142 # No download_dir for VCS sources. This also works around pip
143 # using git-checkout-index, which gets rid of the .git dir.
144 download_dir = None
145 else:
146 download_dir = self._download_dir
147 if not os.path.isdir(download_dir):
148 os.makedirs(download_dir)
149 if not os.path.isdir(self._wheel_download_dir):
150 os.makedirs(self._wheel_download_dir)
151
152 reqset = RequirementSet(self.build_dir,
153 self.source_dir,
154 download_dir=download_dir,
155 wheel_download_dir=self._wheel_download_dir,
156 session=self.session)
157 self._dependencies_cache[ireq] = reqset._prepare_file(self.finder, ireq)
158 return set(self._dependencies_cache[ireq])
159
160 def get_hashes(self, ireq):
161 """
162 Given a pinned InstallRequire, returns a set of hashes that represent
163 all of the files for a given requirement. It is not acceptable for an
164 editable or unpinned requirement to be passed to this function.
165 """
166 if not is_pinned_requirement(ireq):
167 raise TypeError(
168 "Expected pinned requirement, not unpinned or editable, got {}".format(ireq))
169
170 # We need to get all of the candidates that match our current version
171 # pin, these will represent all of the files that could possibly
172 # satisfy this constraint.
173 all_candidates = self.find_all_candidates(ireq.name)
174 candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version)
175 matching_versions = list(
176 ireq.specifier.filter((candidate.version for candidate in all_candidates)))
177 matching_candidates = candidates_by_version[matching_versions[0]]
178
179 return {
180 self._get_file_hash(candidate.location)
181 for candidate in matching_candidates
182 }
183
184 def _get_file_hash(self, location):
185 h = hashlib.new(FAVORITE_HASH)
186 with open_local_or_remote_file(location, self.session) as fp:
187 for chunk in iter(lambda: fp.read(8096), b""):
188 h.update(chunk)
189 return ":".join([FAVORITE_HASH, h.hexdigest()])
190
191 @contextmanager
192 def allow_all_wheels(self):
193 """
194 Monkey patches pip.Wheel to allow wheels from all platforms and Python versions.
195
196 This also saves the candidate cache and set a new one, or else the results from the
197 previous non-patched calls will interfere.
198 """
199 def _wheel_supported(self, tags=None):
200 # Ignore current platform. Support everything.
201 return True
202
203 def _wheel_support_index_min(self, tags=None):
204 # All wheels are equal priority for sorting.
205 return 0
206
207 original_wheel_supported = Wheel.supported
208 original_support_index_min = Wheel.support_index_min
209 original_cache = self._available_candidates_cache
210
211 Wheel.supported = _wheel_supported
212 Wheel.support_index_min = _wheel_support_index_min
213 self._available_candidates_cache = {}
214
215 try:
216 yield
217 finally:
218 Wheel.supported = original_wheel_supported
219 Wheel.support_index_min = original_support_index_min
220 self._available_candidates_cache = original_cache
221
222
223 @contextmanager
224 def open_local_or_remote_file(link, session):
225 """
226 Open local or remote file for reading.
227
228 :type link: pip.index.Link
229 :type session: requests.Session
230 :raises ValueError: If link points to a local directory.
231 :return: a context manager to the opened file-like object
232 """
233 url = link.url_without_fragment
234
235 if is_file_url(link):
236 # Local URL
237 local_path = url_to_path(url)
238 if os.path.isdir(local_path):
239 raise ValueError("Cannot open directory for read: {}".format(url))
240 else:
241 with open(local_path, 'rb') as local_file:
242 yield local_file
243 else:
244 # Remote URL
245 headers = {"Accept-Encoding": "identity"}
246 response = session.get(url, headers=headers, stream=True)
247 try:
248 yield response.raw
249 finally:
250 response.close()
251
```
Path: `piptools/exceptions.py`
Content:
```
1 class PipToolsError(Exception):
2 pass
3
4
5 class NoCandidateFound(PipToolsError):
6 def __init__(self, ireq, candidates_tried):
7 self.ireq = ireq
8 self.candidates_tried = candidates_tried
9
10 def __str__(self):
11 sorted_versions = sorted(c.version for c in self.candidates_tried)
12 lines = [
13 'Could not find a version that matches {}'.format(self.ireq),
14 'Tried: {}'.format(', '.join(str(version) for version in sorted_versions) or '(no version found at all)')
15 ]
16 return '\n'.join(lines)
17
18
19 class UnsupportedConstraint(PipToolsError):
20 def __init__(self, message, constraint):
21 super(UnsupportedConstraint, self).__init__(message)
22 self.constraint = constraint
23
24 def __str__(self):
25 message = super(UnsupportedConstraint, self).__str__()
26 return '{} (constraint was: {})'.format(message, str(self.constraint))
27
28
29 class IncompatibleRequirements(PipToolsError):
30 def __init__(self, ireq_a, ireq_b):
31 self.ireq_a = ireq_a
32 self.ireq_b = ireq_b
33
34 def __str__(self):
35 message = "Incompatible requirements found: {} and {}"
36 return message.format(self.ireq_a, self.ireq_b)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/piptools/exceptions.py b/piptools/exceptions.py
--- a/piptools/exceptions.py
+++ b/piptools/exceptions.py
@@ -3,9 +3,10 @@
class NoCandidateFound(PipToolsError):
- def __init__(self, ireq, candidates_tried):
+ def __init__(self, ireq, candidates_tried, index_urls):
self.ireq = ireq
self.candidates_tried = candidates_tried
+ self.index_urls = index_urls
def __str__(self):
sorted_versions = sorted(c.version for c in self.candidates_tried)
@@ -13,6 +14,12 @@
'Could not find a version that matches {}'.format(self.ireq),
'Tried: {}'.format(', '.join(str(version) for version in sorted_versions) or '(no version found at all)')
]
+ if sorted_versions:
+ lines.append('There are incompatible versions in the resolved dependencies.')
+ else:
+ lines.append('{} {} reachable?'.format(
+ 'Were' if len(self.index_urls) > 1 else 'Was', ' or '.join(self.index_urls))
+ )
return '\n'.join(lines)
diff --git a/piptools/repositories/pypi.py b/piptools/repositories/pypi.py
--- a/piptools/repositories/pypi.py
+++ b/piptools/repositories/pypi.py
@@ -115,7 +115,7 @@
# Reuses pip's internal candidate sort key to sort
matching_candidates = [candidates_by_version[ver] for ver in matching_versions]
if not matching_candidates:
- raise NoCandidateFound(ireq, all_candidates)
+ raise NoCandidateFound(ireq, all_candidates, self.finder.index_urls)
best_candidate = max(matching_candidates, key=self.finder._candidate_sort_key)
# Turn the candidate into a pinned InstallRequirement
| {"golden_diff": "diff --git a/piptools/exceptions.py b/piptools/exceptions.py\n--- a/piptools/exceptions.py\n+++ b/piptools/exceptions.py\n@@ -3,9 +3,10 @@\n \n \n class NoCandidateFound(PipToolsError):\n- def __init__(self, ireq, candidates_tried):\n+ def __init__(self, ireq, candidates_tried, index_urls):\n self.ireq = ireq\n self.candidates_tried = candidates_tried\n+ self.index_urls = index_urls\n \n def __str__(self):\n sorted_versions = sorted(c.version for c in self.candidates_tried)\n@@ -13,6 +14,12 @@\n 'Could not find a version that matches {}'.format(self.ireq),\n 'Tried: {}'.format(', '.join(str(version) for version in sorted_versions) or '(no version found at all)')\n ]\n+ if sorted_versions:\n+ lines.append('There are incompatible versions in the resolved dependencies.')\n+ else:\n+ lines.append('{} {} reachable?'.format(\n+ 'Were' if len(self.index_urls) > 1 else 'Was', ' or '.join(self.index_urls))\n+ )\n return '\\n'.join(lines)\n \n \ndiff --git a/piptools/repositories/pypi.py b/piptools/repositories/pypi.py\n--- a/piptools/repositories/pypi.py\n+++ b/piptools/repositories/pypi.py\n@@ -115,7 +115,7 @@\n # Reuses pip's internal candidate sort key to sort\n matching_candidates = [candidates_by_version[ver] for ver in matching_versions]\n if not matching_candidates:\n- raise NoCandidateFound(ireq, all_candidates)\n+ raise NoCandidateFound(ireq, all_candidates, self.finder.index_urls)\n best_candidate = max(matching_candidates, key=self.finder._candidate_sort_key)\n \n # Turn the candidate into a pinned InstallRequirement\n", "issue": "pip-compile gives misleading error message if no internet\nIf there is no internet access, `pip-compile` will fail with a misleading message. This is with pip-compile 1.6.1, pip 8.1.1, and Python 3.5.1.\n##### Steps to replicate\n1. Create a `requirements.in` file with a single project name.\n2. Disable internet access (e.g. by disabling wi-fi).\n3. Run `pip-compile --verbose requirements.in`.\n##### Expected result\n\nThe command should fail with an error message relating to the fact that there is no internet connection.\n##### Actual result\n\nInstead, the output looks something like this:\n\n```\nUsing indexes:\n https://pypi.python.org/simple\n\n ROUND 1 \nCurrent constraints:\n MyPackage\n\nFinding the best candidates:\nCould not find a version that matches MyPackage\nTried: (no version found at all)\n```\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport hashlib\nimport os\nfrom contextlib import contextmanager\nfrom shutil import rmtree\n\nfrom pip.download import is_file_url, url_to_path\nfrom pip.index import PackageFinder\nfrom pip.req.req_set import RequirementSet\nfrom pip.wheel import Wheel\ntry:\n from pip.utils.hashes import FAVORITE_HASH\nexcept ImportError:\n FAVORITE_HASH = 'sha256'\n\nfrom ..cache import CACHE_DIR\nfrom ..exceptions import NoCandidateFound\nfrom ..utils import (fs_str, is_pinned_requirement, lookup_table,\n make_install_requirement, pip_version_info)\nfrom .base import BaseRepository\n\ntry:\n from tempfile import TemporaryDirectory # added in 3.2\nexcept ImportError:\n from .._compat import TemporaryDirectory\n\n\nclass PyPIRepository(BaseRepository):\n DEFAULT_INDEX_URL = 'https://pypi.python.org/simple'\n\n \"\"\"\n The PyPIRepository will use the provided Finder instance to lookup\n packages. Typically, it looks up packages on PyPI (the default implicit\n config), but any other PyPI mirror can be used if index_urls is\n changed/configured on the Finder.\n \"\"\"\n def __init__(self, pip_options, session):\n self.session = session\n\n index_urls = [pip_options.index_url] + pip_options.extra_index_urls\n if pip_options.no_index:\n index_urls = []\n\n self.finder = PackageFinder(\n find_links=pip_options.find_links,\n index_urls=index_urls,\n trusted_hosts=pip_options.trusted_hosts,\n allow_all_prereleases=pip_options.pre,\n process_dependency_links=pip_options.process_dependency_links,\n session=self.session,\n )\n\n # Caches\n # stores project_name => InstallationCandidate mappings for all\n # versions reported by PyPI, so we only have to ask once for each\n # project\n self._available_candidates_cache = {}\n\n # stores InstallRequirement => list(InstallRequirement) mappings\n # of all secondary dependencies for the given requirement, so we\n # only have to go to disk once for each requirement\n self._dependencies_cache = {}\n\n # Setup file paths\n self.freshen_build_caches()\n self._download_dir = fs_str(os.path.join(CACHE_DIR, 'pkgs'))\n self._wheel_download_dir = fs_str(os.path.join(CACHE_DIR, 'wheels'))\n\n def freshen_build_caches(self):\n \"\"\"\n Start with fresh build/source caches. Will remove any old build\n caches from disk automatically.\n \"\"\"\n self._build_dir = TemporaryDirectory(fs_str('build'))\n self._source_dir = TemporaryDirectory(fs_str('source'))\n\n @property\n def build_dir(self):\n return self._build_dir.name\n\n @property\n def source_dir(self):\n return self._source_dir.name\n\n def clear_caches(self):\n rmtree(self._download_dir, ignore_errors=True)\n rmtree(self._wheel_download_dir, ignore_errors=True)\n\n def find_all_candidates(self, req_name):\n if req_name not in self._available_candidates_cache:\n # pip 8 changed the internal API, making this a public method\n if pip_version_info >= (8, 0):\n candidates = self.finder.find_all_candidates(req_name)\n else:\n candidates = self.finder._find_all_versions(req_name)\n self._available_candidates_cache[req_name] = candidates\n return self._available_candidates_cache[req_name]\n\n def find_best_match(self, ireq, prereleases=None):\n \"\"\"\n Returns a Version object that indicates the best match for the given\n InstallRequirement according to the external repository.\n \"\"\"\n if ireq.editable:\n return ireq # return itself as the best match\n\n all_candidates = self.find_all_candidates(ireq.name)\n candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version, unique=True)\n matching_versions = ireq.specifier.filter((candidate.version for candidate in all_candidates),\n prereleases=prereleases)\n\n # Reuses pip's internal candidate sort key to sort\n matching_candidates = [candidates_by_version[ver] for ver in matching_versions]\n if not matching_candidates:\n raise NoCandidateFound(ireq, all_candidates)\n best_candidate = max(matching_candidates, key=self.finder._candidate_sort_key)\n\n # Turn the candidate into a pinned InstallRequirement\n return make_install_requirement(\n best_candidate.project, best_candidate.version, ireq.extras, constraint=ireq.constraint\n )\n\n def get_dependencies(self, ireq):\n \"\"\"\n Given a pinned or an editable InstallRequirement, returns a set of\n dependencies (also InstallRequirements, but not necessarily pinned).\n They indicate the secondary dependencies for the given requirement.\n \"\"\"\n if not (ireq.editable or is_pinned_requirement(ireq)):\n raise TypeError('Expected pinned or editable InstallRequirement, got {}'.format(ireq))\n\n if ireq not in self._dependencies_cache:\n if ireq.editable and (ireq.source_dir and os.path.exists(ireq.source_dir)):\n # No download_dir for locally available editable requirements.\n # If a download_dir is passed, pip will unnecessarely\n # archive the entire source directory\n download_dir = None\n elif ireq.link and not ireq.link.is_artifact:\n # No download_dir for VCS sources. This also works around pip\n # using git-checkout-index, which gets rid of the .git dir.\n download_dir = None\n else:\n download_dir = self._download_dir\n if not os.path.isdir(download_dir):\n os.makedirs(download_dir)\n if not os.path.isdir(self._wheel_download_dir):\n os.makedirs(self._wheel_download_dir)\n\n reqset = RequirementSet(self.build_dir,\n self.source_dir,\n download_dir=download_dir,\n wheel_download_dir=self._wheel_download_dir,\n session=self.session)\n self._dependencies_cache[ireq] = reqset._prepare_file(self.finder, ireq)\n return set(self._dependencies_cache[ireq])\n\n def get_hashes(self, ireq):\n \"\"\"\n Given a pinned InstallRequire, returns a set of hashes that represent\n all of the files for a given requirement. It is not acceptable for an\n editable or unpinned requirement to be passed to this function.\n \"\"\"\n if not is_pinned_requirement(ireq):\n raise TypeError(\n \"Expected pinned requirement, not unpinned or editable, got {}\".format(ireq))\n\n # We need to get all of the candidates that match our current version\n # pin, these will represent all of the files that could possibly\n # satisfy this constraint.\n all_candidates = self.find_all_candidates(ireq.name)\n candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version)\n matching_versions = list(\n ireq.specifier.filter((candidate.version for candidate in all_candidates)))\n matching_candidates = candidates_by_version[matching_versions[0]]\n\n return {\n self._get_file_hash(candidate.location)\n for candidate in matching_candidates\n }\n\n def _get_file_hash(self, location):\n h = hashlib.new(FAVORITE_HASH)\n with open_local_or_remote_file(location, self.session) as fp:\n for chunk in iter(lambda: fp.read(8096), b\"\"):\n h.update(chunk)\n return \":\".join([FAVORITE_HASH, h.hexdigest()])\n\n @contextmanager\n def allow_all_wheels(self):\n \"\"\"\n Monkey patches pip.Wheel to allow wheels from all platforms and Python versions.\n\n This also saves the candidate cache and set a new one, or else the results from the\n previous non-patched calls will interfere.\n \"\"\"\n def _wheel_supported(self, tags=None):\n # Ignore current platform. Support everything.\n return True\n\n def _wheel_support_index_min(self, tags=None):\n # All wheels are equal priority for sorting.\n return 0\n\n original_wheel_supported = Wheel.supported\n original_support_index_min = Wheel.support_index_min\n original_cache = self._available_candidates_cache\n\n Wheel.supported = _wheel_supported\n Wheel.support_index_min = _wheel_support_index_min\n self._available_candidates_cache = {}\n\n try:\n yield\n finally:\n Wheel.supported = original_wheel_supported\n Wheel.support_index_min = original_support_index_min\n self._available_candidates_cache = original_cache\n\n\n@contextmanager\ndef open_local_or_remote_file(link, session):\n \"\"\"\n Open local or remote file for reading.\n\n :type link: pip.index.Link\n :type session: requests.Session\n :raises ValueError: If link points to a local directory.\n :return: a context manager to the opened file-like object\n \"\"\"\n url = link.url_without_fragment\n\n if is_file_url(link):\n # Local URL\n local_path = url_to_path(url)\n if os.path.isdir(local_path):\n raise ValueError(\"Cannot open directory for read: {}\".format(url))\n else:\n with open(local_path, 'rb') as local_file:\n yield local_file\n else:\n # Remote URL\n headers = {\"Accept-Encoding\": \"identity\"}\n response = session.get(url, headers=headers, stream=True)\n try:\n yield response.raw\n finally:\n response.close()\n", "path": "piptools/repositories/pypi.py"}, {"content": "class PipToolsError(Exception):\n pass\n\n\nclass NoCandidateFound(PipToolsError):\n def __init__(self, ireq, candidates_tried):\n self.ireq = ireq\n self.candidates_tried = candidates_tried\n\n def __str__(self):\n sorted_versions = sorted(c.version for c in self.candidates_tried)\n lines = [\n 'Could not find a version that matches {}'.format(self.ireq),\n 'Tried: {}'.format(', '.join(str(version) for version in sorted_versions) or '(no version found at all)')\n ]\n return '\\n'.join(lines)\n\n\nclass UnsupportedConstraint(PipToolsError):\n def __init__(self, message, constraint):\n super(UnsupportedConstraint, self).__init__(message)\n self.constraint = constraint\n\n def __str__(self):\n message = super(UnsupportedConstraint, self).__str__()\n return '{} (constraint was: {})'.format(message, str(self.constraint))\n\n\nclass IncompatibleRequirements(PipToolsError):\n def __init__(self, ireq_a, ireq_b):\n self.ireq_a = ireq_a\n self.ireq_b = ireq_b\n\n def __str__(self):\n message = \"Incompatible requirements found: {} and {}\"\n return message.format(self.ireq_a, self.ireq_b)\n", "path": "piptools/exceptions.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport hashlib\nimport os\nfrom contextlib import contextmanager\nfrom shutil import rmtree\n\nfrom pip.download import is_file_url, url_to_path\nfrom pip.index import PackageFinder\nfrom pip.req.req_set import RequirementSet\nfrom pip.wheel import Wheel\ntry:\n from pip.utils.hashes import FAVORITE_HASH\nexcept ImportError:\n FAVORITE_HASH = 'sha256'\n\nfrom ..cache import CACHE_DIR\nfrom ..exceptions import NoCandidateFound\nfrom ..utils import (fs_str, is_pinned_requirement, lookup_table,\n make_install_requirement, pip_version_info)\nfrom .base import BaseRepository\n\ntry:\n from tempfile import TemporaryDirectory # added in 3.2\nexcept ImportError:\n from .._compat import TemporaryDirectory\n\n\nclass PyPIRepository(BaseRepository):\n DEFAULT_INDEX_URL = 'https://pypi.python.org/simple'\n\n \"\"\"\n The PyPIRepository will use the provided Finder instance to lookup\n packages. Typically, it looks up packages on PyPI (the default implicit\n config), but any other PyPI mirror can be used if index_urls is\n changed/configured on the Finder.\n \"\"\"\n def __init__(self, pip_options, session):\n self.session = session\n\n index_urls = [pip_options.index_url] + pip_options.extra_index_urls\n if pip_options.no_index:\n index_urls = []\n\n self.finder = PackageFinder(\n find_links=pip_options.find_links,\n index_urls=index_urls,\n trusted_hosts=pip_options.trusted_hosts,\n allow_all_prereleases=pip_options.pre,\n process_dependency_links=pip_options.process_dependency_links,\n session=self.session,\n )\n\n # Caches\n # stores project_name => InstallationCandidate mappings for all\n # versions reported by PyPI, so we only have to ask once for each\n # project\n self._available_candidates_cache = {}\n\n # stores InstallRequirement => list(InstallRequirement) mappings\n # of all secondary dependencies for the given requirement, so we\n # only have to go to disk once for each requirement\n self._dependencies_cache = {}\n\n # Setup file paths\n self.freshen_build_caches()\n self._download_dir = fs_str(os.path.join(CACHE_DIR, 'pkgs'))\n self._wheel_download_dir = fs_str(os.path.join(CACHE_DIR, 'wheels'))\n\n def freshen_build_caches(self):\n \"\"\"\n Start with fresh build/source caches. Will remove any old build\n caches from disk automatically.\n \"\"\"\n self._build_dir = TemporaryDirectory(fs_str('build'))\n self._source_dir = TemporaryDirectory(fs_str('source'))\n\n @property\n def build_dir(self):\n return self._build_dir.name\n\n @property\n def source_dir(self):\n return self._source_dir.name\n\n def clear_caches(self):\n rmtree(self._download_dir, ignore_errors=True)\n rmtree(self._wheel_download_dir, ignore_errors=True)\n\n def find_all_candidates(self, req_name):\n if req_name not in self._available_candidates_cache:\n # pip 8 changed the internal API, making this a public method\n if pip_version_info >= (8, 0):\n candidates = self.finder.find_all_candidates(req_name)\n else:\n candidates = self.finder._find_all_versions(req_name)\n self._available_candidates_cache[req_name] = candidates\n return self._available_candidates_cache[req_name]\n\n def find_best_match(self, ireq, prereleases=None):\n \"\"\"\n Returns a Version object that indicates the best match for the given\n InstallRequirement according to the external repository.\n \"\"\"\n if ireq.editable:\n return ireq # return itself as the best match\n\n all_candidates = self.find_all_candidates(ireq.name)\n candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version, unique=True)\n matching_versions = ireq.specifier.filter((candidate.version for candidate in all_candidates),\n prereleases=prereleases)\n\n # Reuses pip's internal candidate sort key to sort\n matching_candidates = [candidates_by_version[ver] for ver in matching_versions]\n if not matching_candidates:\n raise NoCandidateFound(ireq, all_candidates, self.finder.index_urls)\n best_candidate = max(matching_candidates, key=self.finder._candidate_sort_key)\n\n # Turn the candidate into a pinned InstallRequirement\n return make_install_requirement(\n best_candidate.project, best_candidate.version, ireq.extras, constraint=ireq.constraint\n )\n\n def get_dependencies(self, ireq):\n \"\"\"\n Given a pinned or an editable InstallRequirement, returns a set of\n dependencies (also InstallRequirements, but not necessarily pinned).\n They indicate the secondary dependencies for the given requirement.\n \"\"\"\n if not (ireq.editable or is_pinned_requirement(ireq)):\n raise TypeError('Expected pinned or editable InstallRequirement, got {}'.format(ireq))\n\n if ireq not in self._dependencies_cache:\n if ireq.editable and (ireq.source_dir and os.path.exists(ireq.source_dir)):\n # No download_dir for locally available editable requirements.\n # If a download_dir is passed, pip will unnecessarely\n # archive the entire source directory\n download_dir = None\n elif ireq.link and not ireq.link.is_artifact:\n # No download_dir for VCS sources. This also works around pip\n # using git-checkout-index, which gets rid of the .git dir.\n download_dir = None\n else:\n download_dir = self._download_dir\n if not os.path.isdir(download_dir):\n os.makedirs(download_dir)\n if not os.path.isdir(self._wheel_download_dir):\n os.makedirs(self._wheel_download_dir)\n\n reqset = RequirementSet(self.build_dir,\n self.source_dir,\n download_dir=download_dir,\n wheel_download_dir=self._wheel_download_dir,\n session=self.session)\n self._dependencies_cache[ireq] = reqset._prepare_file(self.finder, ireq)\n return set(self._dependencies_cache[ireq])\n\n def get_hashes(self, ireq):\n \"\"\"\n Given a pinned InstallRequire, returns a set of hashes that represent\n all of the files for a given requirement. It is not acceptable for an\n editable or unpinned requirement to be passed to this function.\n \"\"\"\n if not is_pinned_requirement(ireq):\n raise TypeError(\n \"Expected pinned requirement, not unpinned or editable, got {}\".format(ireq))\n\n # We need to get all of the candidates that match our current version\n # pin, these will represent all of the files that could possibly\n # satisfy this constraint.\n all_candidates = self.find_all_candidates(ireq.name)\n candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version)\n matching_versions = list(\n ireq.specifier.filter((candidate.version for candidate in all_candidates)))\n matching_candidates = candidates_by_version[matching_versions[0]]\n\n return {\n self._get_file_hash(candidate.location)\n for candidate in matching_candidates\n }\n\n def _get_file_hash(self, location):\n h = hashlib.new(FAVORITE_HASH)\n with open_local_or_remote_file(location, self.session) as fp:\n for chunk in iter(lambda: fp.read(8096), b\"\"):\n h.update(chunk)\n return \":\".join([FAVORITE_HASH, h.hexdigest()])\n\n @contextmanager\n def allow_all_wheels(self):\n \"\"\"\n Monkey patches pip.Wheel to allow wheels from all platforms and Python versions.\n\n This also saves the candidate cache and set a new one, or else the results from the\n previous non-patched calls will interfere.\n \"\"\"\n def _wheel_supported(self, tags=None):\n # Ignore current platform. Support everything.\n return True\n\n def _wheel_support_index_min(self, tags=None):\n # All wheels are equal priority for sorting.\n return 0\n\n original_wheel_supported = Wheel.supported\n original_support_index_min = Wheel.support_index_min\n original_cache = self._available_candidates_cache\n\n Wheel.supported = _wheel_supported\n Wheel.support_index_min = _wheel_support_index_min\n self._available_candidates_cache = {}\n\n try:\n yield\n finally:\n Wheel.supported = original_wheel_supported\n Wheel.support_index_min = original_support_index_min\n self._available_candidates_cache = original_cache\n\n\n@contextmanager\ndef open_local_or_remote_file(link, session):\n \"\"\"\n Open local or remote file for reading.\n\n :type link: pip.index.Link\n :type session: requests.Session\n :raises ValueError: If link points to a local directory.\n :return: a context manager to the opened file-like object\n \"\"\"\n url = link.url_without_fragment\n\n if is_file_url(link):\n # Local URL\n local_path = url_to_path(url)\n if os.path.isdir(local_path):\n raise ValueError(\"Cannot open directory for read: {}\".format(url))\n else:\n with open(local_path, 'rb') as local_file:\n yield local_file\n else:\n # Remote URL\n headers = {\"Accept-Encoding\": \"identity\"}\n response = session.get(url, headers=headers, stream=True)\n try:\n yield response.raw\n finally:\n response.close()\n", "path": "piptools/repositories/pypi.py"}, {"content": "class PipToolsError(Exception):\n pass\n\n\nclass NoCandidateFound(PipToolsError):\n def __init__(self, ireq, candidates_tried, index_urls):\n self.ireq = ireq\n self.candidates_tried = candidates_tried\n self.index_urls = index_urls\n\n def __str__(self):\n sorted_versions = sorted(c.version for c in self.candidates_tried)\n lines = [\n 'Could not find a version that matches {}'.format(self.ireq),\n 'Tried: {}'.format(', '.join(str(version) for version in sorted_versions) or '(no version found at all)')\n ]\n if sorted_versions:\n lines.append('There are incompatible versions in the resolved dependencies.')\n else:\n lines.append('{} {} reachable?'.format(\n 'Were' if len(self.index_urls) > 1 else 'Was', ' or '.join(self.index_urls))\n )\n return '\\n'.join(lines)\n\n\nclass UnsupportedConstraint(PipToolsError):\n def __init__(self, message, constraint):\n super(UnsupportedConstraint, self).__init__(message)\n self.constraint = constraint\n\n def __str__(self):\n message = super(UnsupportedConstraint, self).__str__()\n return '{} (constraint was: {})'.format(message, str(self.constraint))\n\n\nclass IncompatibleRequirements(PipToolsError):\n def __init__(self, ireq_a, ireq_b):\n self.ireq_a = ireq_a\n self.ireq_b = ireq_b\n\n def __str__(self):\n message = \"Incompatible requirements found: {} and {}\"\n return message.format(self.ireq_a, self.ireq_b)\n", "path": "piptools/exceptions.py"}]} | 3,522 | 429 |
gh_patches_debug_11620 | rasdani/github-patches | git_diff | spack__spack-37500 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect checksum for CRTM fix files @ version 2.4.0_emc
### Steps to reproduce
```console
% cat spack/var/spack/repos/builtin/packages/crtm-fix/package.py
[...]
class CrtmFix(Package):
"""CRTM coefficient files"""
homepage = "https://github.com/NOAA-EMC/crtm"
url = "ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-2.3.0_emc.tgz"
maintainers = [
"BenjaminTJohnson",
"edwardhartnett",
"AlexanderRichert-NOAA",
"Hang-Lei-NOAA",
"climbfuji",
]
version("2.4.0_emc", sha256="88d659ae5bc4434f7fafa232ff65b4c48442d2d1a25f8fc96078094fa572ac1a")
version("2.3.0_emc", sha256="1452af2d1d11d57ef3c57b6b861646541e7042a9b0f3c230f9a82854d7e90924")
[...]
% spack checksum [email protected]_emc
==> Found 1 version of crtm-fix:
2.4.0_emc ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-2.4.0_emc.tgz
==> Fetching ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-2.4.0_emc.tgz
version("2.4.0_emc", sha256="d0f1b2ae2905457f4c3731746892aaa8f6b84ee0691f6228dfbe48917df1e85e")
```
### Error message
```console
==> Installing crtm-fix-2.4.0_emc-ftl3fxdnhmvw43lgf74lunrcms7dxmqf
==> No binary for crtm-fix-2.4.0_emc-ftl3fxdnhmvw43lgf74lunrcms7dxmqf found: installing from source
==> Fetching ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-2.4.0_emc.tgz
==> Error: ChecksumError: sha256 checksum failed for /path/to/cache/build_stage/spack-stage-crtm-fix-2.4.0_emc-ftl3fxdnhmvw43lgf74lunrcms7dxmqf/fix_REL-2.4.0_emc.tgz
Expected 88d659ae5bc4434f7fafa232ff65b4c48442d2d1a25f8fc96078094fa572ac1a but got d0f1b2ae2905457f4c3731746892aaa8f6b84ee0691f6228dfbe48917df1e85e
```
### Information on your system
% spack debug report
* **Spack:** 0.20.0.dev0
* **Python:** 3.9.15
* **Platform:** linux-rhel7-broadwell
* **Concretizer:** clingo
### General information
- [X] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [X] I have searched the issues of this repo and believe this is not a duplicate
- [X] I have run the failing commands in debug mode and reported the output
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/crtm-fix/package.py`
Content:
```
1 # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 import os
7
8 from spack.package import *
9
10
11 class CrtmFix(Package):
12 """CRTM coefficient files"""
13
14 homepage = "https://github.com/NOAA-EMC/crtm"
15 url = "ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-2.3.0_emc.tgz"
16
17 maintainers(
18 "BenjaminTJohnson", "edwardhartnett", "AlexanderRichert-NOAA", "Hang-Lei-NOAA", "climbfuji"
19 )
20
21 version("2.4.0_emc", sha256="88d659ae5bc4434f7fafa232ff65b4c48442d2d1a25f8fc96078094fa572ac1a")
22 version("2.3.0_emc", sha256="1452af2d1d11d57ef3c57b6b861646541e7042a9b0f3c230f9a82854d7e90924")
23
24 variant("big_endian", default=True, description="Install big_endian fix files")
25 variant("little_endian", default=False, description="Install little endian fix files")
26 variant("netcdf", default=True, description="Install netcdf fix files")
27
28 conflicts("+big_endian", when="+little_endian", msg="big_endian and little_endian conflict")
29
30 def url_for_version(self, version):
31 url = "ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-{}.tgz"
32 return url.format(version)
33
34 def install(self, spec, prefix):
35 spec = self.spec
36 mkdir(self.prefix.fix)
37
38 endian_dirs = []
39 if "+big_endian" in spec:
40 endian_dirs.append("Big_Endian")
41 elif "+little_endian" in spec:
42 endian_dirs.append("Little_Endian")
43
44 if "+netcdf" in spec:
45 endian_dirs.extend(["netcdf", "netCDF"])
46
47 fix_files = []
48 for d in endian_dirs:
49 fix_files = fix_files + find(".", "*/{}/*".format(d))
50
51 # Big_Endian amsua_metop-c.SpcCoeff.bin is incorrect
52 # Little_Endian amsua_metop-c_v2.SpcCoeff.bin is what it's supposed to be.
53 # Remove the incorrect file, and install it as noACC,, then install
54 # correct file under new name.
55 if "+big_endian" in spec and spec.version == Version("2.4.0_emc"):
56 remove_path = join_path(
57 os.getcwd(), "fix", "SpcCoeff", "Big_Endian", "amsua_metop-c.SpcCoeff.bin"
58 )
59 fix_files.remove(remove_path)
60
61 # This file is incorrect, install it as a different name.
62 install(
63 join_path("fix", "SpcCoeff", "Big_Endian", "amsua_metop-c.SpcCoeff.bin"),
64 join_path(self.prefix.fix, "amsua_metop-c.SpcCoeff.noACC.bin"),
65 )
66
67 # This "Little_Endian" file is actually the correct one.
68 install(
69 join_path("fix", "SpcCoeff", "Little_Endian", "amsua_metop-c_v2.SpcCoeff.bin"),
70 join_path(self.prefix.fix, "amsua_metop-c.SpcCoeff.bin"),
71 )
72
73 for f in fix_files:
74 install(f, self.prefix.fix)
75
76 def setup_run_environment(self, env):
77 env.set("CRTM_FIX", self.prefix.fix)
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/crtm-fix/package.py b/var/spack/repos/builtin/packages/crtm-fix/package.py
--- a/var/spack/repos/builtin/packages/crtm-fix/package.py
+++ b/var/spack/repos/builtin/packages/crtm-fix/package.py
@@ -18,7 +18,7 @@
"BenjaminTJohnson", "edwardhartnett", "AlexanderRichert-NOAA", "Hang-Lei-NOAA", "climbfuji"
)
- version("2.4.0_emc", sha256="88d659ae5bc4434f7fafa232ff65b4c48442d2d1a25f8fc96078094fa572ac1a")
+ version("2.4.0_emc", sha256="d0f1b2ae2905457f4c3731746892aaa8f6b84ee0691f6228dfbe48917df1e85e")
version("2.3.0_emc", sha256="1452af2d1d11d57ef3c57b6b861646541e7042a9b0f3c230f9a82854d7e90924")
variant("big_endian", default=True, description="Install big_endian fix files")
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/crtm-fix/package.py b/var/spack/repos/builtin/packages/crtm-fix/package.py\n--- a/var/spack/repos/builtin/packages/crtm-fix/package.py\n+++ b/var/spack/repos/builtin/packages/crtm-fix/package.py\n@@ -18,7 +18,7 @@\n \"BenjaminTJohnson\", \"edwardhartnett\", \"AlexanderRichert-NOAA\", \"Hang-Lei-NOAA\", \"climbfuji\"\n )\n \n- version(\"2.4.0_emc\", sha256=\"88d659ae5bc4434f7fafa232ff65b4c48442d2d1a25f8fc96078094fa572ac1a\")\n+ version(\"2.4.0_emc\", sha256=\"d0f1b2ae2905457f4c3731746892aaa8f6b84ee0691f6228dfbe48917df1e85e\")\n version(\"2.3.0_emc\", sha256=\"1452af2d1d11d57ef3c57b6b861646541e7042a9b0f3c230f9a82854d7e90924\")\n \n variant(\"big_endian\", default=True, description=\"Install big_endian fix files\")\n", "issue": "Incorrect checksum for CRTM fix files @ version 2.4.0_emc\n### Steps to reproduce\n\n```console\r\n% cat spack/var/spack/repos/builtin/packages/crtm-fix/package.py \r\n[...]\r\nclass CrtmFix(Package):\r\n \"\"\"CRTM coefficient files\"\"\"\r\n\r\n homepage = \"https://github.com/NOAA-EMC/crtm\"\r\n url = \"ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-2.3.0_emc.tgz\"\r\n\r\n maintainers = [\r\n \"BenjaminTJohnson\",\r\n \"edwardhartnett\",\r\n \"AlexanderRichert-NOAA\",\r\n \"Hang-Lei-NOAA\",\r\n \"climbfuji\",\r\n ]\r\n\r\n version(\"2.4.0_emc\", sha256=\"88d659ae5bc4434f7fafa232ff65b4c48442d2d1a25f8fc96078094fa572ac1a\")\r\n version(\"2.3.0_emc\", sha256=\"1452af2d1d11d57ef3c57b6b861646541e7042a9b0f3c230f9a82854d7e90924\")\r\n[...]\r\n\r\n% spack checksum [email protected]_emc\r\n==> Found 1 version of crtm-fix:\r\n \r\n 2.4.0_emc ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-2.4.0_emc.tgz\r\n\r\n==> Fetching ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-2.4.0_emc.tgz\r\n\r\n version(\"2.4.0_emc\", sha256=\"d0f1b2ae2905457f4c3731746892aaa8f6b84ee0691f6228dfbe48917df1e85e\")\r\n```\n\n### Error message\n\n```console\r\n==> Installing crtm-fix-2.4.0_emc-ftl3fxdnhmvw43lgf74lunrcms7dxmqf\r\n==> No binary for crtm-fix-2.4.0_emc-ftl3fxdnhmvw43lgf74lunrcms7dxmqf found: installing from source\r\n==> Fetching ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-2.4.0_emc.tgz\r\n==> Error: ChecksumError: sha256 checksum failed for /path/to/cache/build_stage/spack-stage-crtm-fix-2.4.0_emc-ftl3fxdnhmvw43lgf74lunrcms7dxmqf/fix_REL-2.4.0_emc.tgz\r\n Expected 88d659ae5bc4434f7fafa232ff65b4c48442d2d1a25f8fc96078094fa572ac1a but got d0f1b2ae2905457f4c3731746892aaa8f6b84ee0691f6228dfbe48917df1e85e\r\n```\n\n### Information on your system\n\n% spack debug report\r\n* **Spack:** 0.20.0.dev0\r\n* **Python:** 3.9.15\r\n* **Platform:** linux-rhel7-broadwell\r\n* **Concretizer:** clingo\n\n### General information\n\n- [X] I have run `spack debug report` and reported the version of Spack/Python/Platform\n- [X] I have searched the issues of this repo and believe this is not a duplicate\n- [X] I have run the failing commands in debug mode and reported the output\n", "before_files": [{"content": "# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport os\n\nfrom spack.package import *\n\n\nclass CrtmFix(Package):\n \"\"\"CRTM coefficient files\"\"\"\n\n homepage = \"https://github.com/NOAA-EMC/crtm\"\n url = \"ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-2.3.0_emc.tgz\"\n\n maintainers(\n \"BenjaminTJohnson\", \"edwardhartnett\", \"AlexanderRichert-NOAA\", \"Hang-Lei-NOAA\", \"climbfuji\"\n )\n\n version(\"2.4.0_emc\", sha256=\"88d659ae5bc4434f7fafa232ff65b4c48442d2d1a25f8fc96078094fa572ac1a\")\n version(\"2.3.0_emc\", sha256=\"1452af2d1d11d57ef3c57b6b861646541e7042a9b0f3c230f9a82854d7e90924\")\n\n variant(\"big_endian\", default=True, description=\"Install big_endian fix files\")\n variant(\"little_endian\", default=False, description=\"Install little endian fix files\")\n variant(\"netcdf\", default=True, description=\"Install netcdf fix files\")\n\n conflicts(\"+big_endian\", when=\"+little_endian\", msg=\"big_endian and little_endian conflict\")\n\n def url_for_version(self, version):\n url = \"ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-{}.tgz\"\n return url.format(version)\n\n def install(self, spec, prefix):\n spec = self.spec\n mkdir(self.prefix.fix)\n\n endian_dirs = []\n if \"+big_endian\" in spec:\n endian_dirs.append(\"Big_Endian\")\n elif \"+little_endian\" in spec:\n endian_dirs.append(\"Little_Endian\")\n\n if \"+netcdf\" in spec:\n endian_dirs.extend([\"netcdf\", \"netCDF\"])\n\n fix_files = []\n for d in endian_dirs:\n fix_files = fix_files + find(\".\", \"*/{}/*\".format(d))\n\n # Big_Endian amsua_metop-c.SpcCoeff.bin is incorrect\n # Little_Endian amsua_metop-c_v2.SpcCoeff.bin is what it's supposed to be.\n # Remove the incorrect file, and install it as noACC,, then install\n # correct file under new name.\n if \"+big_endian\" in spec and spec.version == Version(\"2.4.0_emc\"):\n remove_path = join_path(\n os.getcwd(), \"fix\", \"SpcCoeff\", \"Big_Endian\", \"amsua_metop-c.SpcCoeff.bin\"\n )\n fix_files.remove(remove_path)\n\n # This file is incorrect, install it as a different name.\n install(\n join_path(\"fix\", \"SpcCoeff\", \"Big_Endian\", \"amsua_metop-c.SpcCoeff.bin\"),\n join_path(self.prefix.fix, \"amsua_metop-c.SpcCoeff.noACC.bin\"),\n )\n\n # This \"Little_Endian\" file is actually the correct one.\n install(\n join_path(\"fix\", \"SpcCoeff\", \"Little_Endian\", \"amsua_metop-c_v2.SpcCoeff.bin\"),\n join_path(self.prefix.fix, \"amsua_metop-c.SpcCoeff.bin\"),\n )\n\n for f in fix_files:\n install(f, self.prefix.fix)\n\n def setup_run_environment(self, env):\n env.set(\"CRTM_FIX\", self.prefix.fix)\n", "path": "var/spack/repos/builtin/packages/crtm-fix/package.py"}], "after_files": [{"content": "# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport os\n\nfrom spack.package import *\n\n\nclass CrtmFix(Package):\n \"\"\"CRTM coefficient files\"\"\"\n\n homepage = \"https://github.com/NOAA-EMC/crtm\"\n url = \"ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-2.3.0_emc.tgz\"\n\n maintainers(\n \"BenjaminTJohnson\", \"edwardhartnett\", \"AlexanderRichert-NOAA\", \"Hang-Lei-NOAA\", \"climbfuji\"\n )\n\n version(\"2.4.0_emc\", sha256=\"d0f1b2ae2905457f4c3731746892aaa8f6b84ee0691f6228dfbe48917df1e85e\")\n version(\"2.3.0_emc\", sha256=\"1452af2d1d11d57ef3c57b6b861646541e7042a9b0f3c230f9a82854d7e90924\")\n\n variant(\"big_endian\", default=True, description=\"Install big_endian fix files\")\n variant(\"little_endian\", default=False, description=\"Install little endian fix files\")\n variant(\"netcdf\", default=True, description=\"Install netcdf fix files\")\n\n conflicts(\"+big_endian\", when=\"+little_endian\", msg=\"big_endian and little_endian conflict\")\n\n def url_for_version(self, version):\n url = \"ftp://ftp.ssec.wisc.edu/pub/s4/CRTM/fix_REL-{}.tgz\"\n return url.format(version)\n\n def install(self, spec, prefix):\n spec = self.spec\n mkdir(self.prefix.fix)\n\n endian_dirs = []\n if \"+big_endian\" in spec:\n endian_dirs.append(\"Big_Endian\")\n elif \"+little_endian\" in spec:\n endian_dirs.append(\"Little_Endian\")\n\n if \"+netcdf\" in spec:\n endian_dirs.extend([\"netcdf\", \"netCDF\"])\n\n fix_files = []\n for d in endian_dirs:\n fix_files = fix_files + find(\".\", \"*/{}/*\".format(d))\n\n # Big_Endian amsua_metop-c.SpcCoeff.bin is incorrect\n # Little_Endian amsua_metop-c_v2.SpcCoeff.bin is what it's supposed to be.\n # Remove the incorrect file, and install it as noACC,, then install\n # correct file under new name.\n if \"+big_endian\" in spec and spec.version == Version(\"2.4.0_emc\"):\n remove_path = join_path(\n os.getcwd(), \"fix\", \"SpcCoeff\", \"Big_Endian\", \"amsua_metop-c.SpcCoeff.bin\"\n )\n fix_files.remove(remove_path)\n\n # This file is incorrect, install it as a different name.\n install(\n join_path(\"fix\", \"SpcCoeff\", \"Big_Endian\", \"amsua_metop-c.SpcCoeff.bin\"),\n join_path(self.prefix.fix, \"amsua_metop-c.SpcCoeff.noACC.bin\"),\n )\n\n # This \"Little_Endian\" file is actually the correct one.\n install(\n join_path(\"fix\", \"SpcCoeff\", \"Little_Endian\", \"amsua_metop-c_v2.SpcCoeff.bin\"),\n join_path(self.prefix.fix, \"amsua_metop-c.SpcCoeff.bin\"),\n )\n\n for f in fix_files:\n install(f, self.prefix.fix)\n\n def setup_run_environment(self, env):\n env.set(\"CRTM_FIX\", self.prefix.fix)\n", "path": "var/spack/repos/builtin/packages/crtm-fix/package.py"}]} | 2,203 | 361 |
gh_patches_debug_11914 | rasdani/github-patches | git_diff | pytorch__ignite-2984 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix warning in fast_neural_style example
Here is another good first issue to improve the ignite project. Currently, we have a warning on this line: https://github.com/pytorch/ignite/blob/master/examples/fast_neural_style/vgg.py#L10 (fast neural style example)
/opt/hostedtoolcache/Python/3.9.17/x64/lib/python3.9/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.
warnings.warn(
/opt/hostedtoolcache/Python/3.9.17/x64/lib/python3.9/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=VGG16_Weights.IMAGENET1K_V1`. You can also use `weights=VGG16_Weights.DEFAULT` to get the most up-to-date weights.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/fast_neural_style/vgg.py`
Content:
```
1 from collections import namedtuple
2
3 import torch
4 from torchvision import models
5
6
7 class Vgg16(torch.nn.Module):
8 def __init__(self, requires_grad=False):
9 super(Vgg16, self).__init__()
10 vgg_pretrained_features = models.vgg16(pretrained=True).features
11 self.slice1 = torch.nn.Sequential()
12 self.slice2 = torch.nn.Sequential()
13 self.slice3 = torch.nn.Sequential()
14 self.slice4 = torch.nn.Sequential()
15 for x in range(4):
16 self.slice1.add_module(str(x), vgg_pretrained_features[x])
17 for x in range(4, 9):
18 self.slice2.add_module(str(x), vgg_pretrained_features[x])
19 for x in range(9, 16):
20 self.slice3.add_module(str(x), vgg_pretrained_features[x])
21 for x in range(16, 23):
22 self.slice4.add_module(str(x), vgg_pretrained_features[x])
23 if not requires_grad:
24 for param in self.parameters():
25 param.requires_grad = False
26
27 def forward(self, X):
28 h = self.slice1(X)
29 h_relu1_2 = h
30 h = self.slice2(h)
31 h_relu2_2 = h
32 h = self.slice3(h)
33 h_relu3_3 = h
34 h = self.slice4(h)
35 h_relu4_3 = h
36 vgg_outputs = namedtuple("VggOutputs", ["relu1_2", "relu2_2", "relu3_3", "relu4_3"])
37 out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)
38 return out
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/fast_neural_style/vgg.py b/examples/fast_neural_style/vgg.py
--- a/examples/fast_neural_style/vgg.py
+++ b/examples/fast_neural_style/vgg.py
@@ -2,12 +2,13 @@
import torch
from torchvision import models
+from torchvision.models.vgg import VGG16_Weights
class Vgg16(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg16, self).__init__()
- vgg_pretrained_features = models.vgg16(pretrained=True).features
+ vgg_pretrained_features = models.vgg16(weights=VGG16_Weights.IMAGENET1K_V1).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
| {"golden_diff": "diff --git a/examples/fast_neural_style/vgg.py b/examples/fast_neural_style/vgg.py\n--- a/examples/fast_neural_style/vgg.py\n+++ b/examples/fast_neural_style/vgg.py\n@@ -2,12 +2,13 @@\n \n import torch\n from torchvision import models\n+from torchvision.models.vgg import VGG16_Weights\n \n \n class Vgg16(torch.nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg16, self).__init__()\n- vgg_pretrained_features = models.vgg16(pretrained=True).features\n+ vgg_pretrained_features = models.vgg16(weights=VGG16_Weights.IMAGENET1K_V1).features\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n", "issue": "Fix warning in fast_neural_style example\nHere is another good first issue to improve the ignite project. Currently, we have a warning on this line: https://github.com/pytorch/ignite/blob/master/examples/fast_neural_style/vgg.py#L10 (fast neural style example)\r\n /opt/hostedtoolcache/Python/3.9.17/x64/lib/python3.9/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.\r\n warnings.warn(\r\n/opt/hostedtoolcache/Python/3.9.17/x64/lib/python3.9/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=VGG16_Weights.IMAGENET1K_V1`. You can also use `weights=VGG16_Weights.DEFAULT` to get the most up-to-date weights.\n", "before_files": [{"content": "from collections import namedtuple\n\nimport torch\nfrom torchvision import models\n\n\nclass Vgg16(torch.nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg16, self).__init__()\n vgg_pretrained_features = models.vgg16(pretrained=True).features\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n for x in range(4):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(4, 9):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(9, 16):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(16, 23):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n h = self.slice1(X)\n h_relu1_2 = h\n h = self.slice2(h)\n h_relu2_2 = h\n h = self.slice3(h)\n h_relu3_3 = h\n h = self.slice4(h)\n h_relu4_3 = h\n vgg_outputs = namedtuple(\"VggOutputs\", [\"relu1_2\", \"relu2_2\", \"relu3_3\", \"relu4_3\"])\n out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)\n return out\n", "path": "examples/fast_neural_style/vgg.py"}], "after_files": [{"content": "from collections import namedtuple\n\nimport torch\nfrom torchvision import models\nfrom torchvision.models.vgg import VGG16_Weights\n\n\nclass Vgg16(torch.nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg16, self).__init__()\n vgg_pretrained_features = models.vgg16(weights=VGG16_Weights.IMAGENET1K_V1).features\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n for x in range(4):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(4, 9):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(9, 16):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(16, 23):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n h = self.slice1(X)\n h_relu1_2 = h\n h = self.slice2(h)\n h_relu2_2 = h\n h = self.slice3(h)\n h_relu3_3 = h\n h = self.slice4(h)\n h_relu4_3 = h\n vgg_outputs = namedtuple(\"VggOutputs\", [\"relu1_2\", \"relu2_2\", \"relu3_3\", \"relu4_3\"])\n out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)\n return out\n", "path": "examples/fast_neural_style/vgg.py"}]} | 955 | 189 |
gh_patches_debug_24196 | rasdani/github-patches | git_diff | NVIDIA__TransformerEngine-631 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Does `transformer_engine.pytorch.RMSNorm` support `zero_centered_gamma`?
The [doc](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/pytorch.html#transformer_engine.pytorch.RMSNorm) explains the option `zero_centered_gamma` clearly, but I found the implementation at [normalization.cu:L221](https://github.com/NVIDIA/TransformerEngine/blob/82555b3fd1ddece3a886fac3c7a4aa8d6f8c1955/transformer_engine/pytorch/csrc/extensions/normalization.cu#L221) assert `zero_centered_gamma == false`.
Is there any limitation?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `transformer_engine/pytorch/module/rmsnorm.py`
Content:
```
1 # Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 #
3 # See LICENSE for license information.
4
5 """RMSNorm API"""
6 import os
7 import warnings
8 from typing import Union, Tuple, Optional
9
10 import torch
11 from torch.nn.parameter import Parameter
12 from torch.nn import init
13
14 from .base import TransformerEngineBaseModule
15 from .. import cpp_extensions as tex
16 from ..jit import no_torch_dynamo
17 from ..utils import cast_if_needed
18
19
20 __all__ = ["RMSNorm"]
21
22
23 class _RMSNorm(torch.autograd.Function):
24 """functional RMSNorm"""
25
26 @staticmethod
27 def forward(
28 ctx,
29 inp: torch.Tensor,
30 rmsnorm_weight: torch.Tensor,
31 eps: float,
32 fwd_rmsnorm_sm_margin: int,
33 bwd_rmsnorm_sm_margin: int,
34 zero_centered_gamma: bool,
35 is_grad_enabled: bool,
36 activation_dtype: torch.dtype,
37 ) -> torch.Tensor:
38 # Make sure input dimensions are compatible
39 in_features = rmsnorm_weight.numel()
40 assert inp.is_cuda, "TransformerEngine needs CUDA."
41 assert inp.shape[-1] == in_features, "RMSNorm not possible"
42 inputmat = inp.view((-1, in_features))
43
44 # Cast for native AMP
45 inputmat = cast_if_needed(inputmat, activation_dtype)
46 rmsnorm_weight = cast_if_needed(rmsnorm_weight, activation_dtype)
47
48 if is_grad_enabled:
49 rmsnorm_out, rsigma = tex.rmsnorm_fwd(inputmat, rmsnorm_weight,
50 eps, fwd_rmsnorm_sm_margin,
51 zero_centered_gamma)
52 ctx.save_for_backward(inputmat, rmsnorm_weight, rsigma)
53 ctx.inp_shape = inp.shape
54 ctx.bwd_rmsnorm_sm_margin = bwd_rmsnorm_sm_margin
55 ctx.zero_centered_gamma = zero_centered_gamma
56 else:
57 rmsnorm_out = tex.rmsnorm_fwd_inf(inputmat, rmsnorm_weight,
58 eps,
59 zero_centered_gamma)
60 return rmsnorm_out.view_as(inp)
61
62 @staticmethod
63 def backward(
64 ctx, grad_output: torch.Tensor
65 ) -> Tuple[Union[torch.Tensor, None], ...]:
66 inputmat, rmsnorm_weight, rsigma = ctx.saved_tensors
67 grad_output = grad_output.contiguous()
68 d_rmsnorm_out = grad_output.view(inputmat.shape)
69 dxmat, dgamma = tex.rmsnorm_bwd(
70 d_rmsnorm_out, inputmat, rsigma, rmsnorm_weight,
71 ctx.bwd_rmsnorm_sm_margin, ctx.zero_centered_gamma
72 )
73 return (
74 dxmat.view(ctx.inp_shape),
75 dgamma,
76 None,
77 None,
78 None,
79 None,
80 None,
81 None,
82 )
83
84
85 class RMSNorm(torch.nn.Module):
86 r"""
87 Applies Root Mean Square Layer Normalization over a mini-batch of inputs as described in
88 the paper `Root Mean Square Layer Normalization <https://arxiv.org/abs/1910.07467>`__
89
90 .. math::
91 y = \frac{x}{RMS_\varepsilon(x)} * \gamma
92
93 where
94
95 .. math::
96 RMS_\varepsilon(x) = \sqrt{\frac{1}{n}\sum_{i=0}^nx_i^2 + \varepsilon}
97
98 :math:`\gamma` is a learnable affine transform parameter of size :attr:`hidden_size`
99
100 Parameters
101 ----------
102 hidden_size : int
103 size of each input sample.
104 eps : float, default = 1e-5
105 a value added to the denominator of layer normalization for numerical stability.
106 sequence_parallel : bool, default = `False`
107 if set to `True`, uses sequence parallelism.
108 params_dtype : torch.dtype, default = `torch.get_default_dtype()`
109 it controls the type used to allocate the initial parameters. Useful when
110 the model is trained with lower precision and the original FP32 parameters
111 would not fit in GPU memory.
112 zero_centered_gamma : bool, default = 'False'
113 if set to 'True', gamma parameter in RMSNorm is initialized to 0 and
114 the RMSNorm formula changes to
115
116 .. math::
117 y = \frac{x}{RMS(x) + \varepsilon} * (1 + \gamma)
118 device : Union[torch.device, str], default = "cuda"
119 The device on which the parameters of the model will allocated. It is the user's
120 responsibility to ensure all parameters are moved to the GPU before running the
121 forward pass.
122 """
123
124 def __init__(
125 self,
126 hidden_size: int,
127 eps: float = 1e-5,
128 sequence_parallel: bool = False,
129 params_dtype: Optional[torch.dtype] = None,
130 zero_centered_gamma: bool = False,
131 device: Union[torch.device, str] = "cuda",
132 ) -> None:
133 super().__init__()
134 params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
135 self.eps = eps
136 self.zero_centered_gamma = zero_centered_gamma
137 self.weight = Parameter(
138 torch.empty(
139 hidden_size,
140 device=device,
141 dtype=params_dtype,
142 )
143 )
144 self.sequence_parallel = sequence_parallel
145
146 self.reset_parameters(defer_init=(device == 'meta'))
147
148 # These many SMs are subtracted from the total SM count when calling forward
149 # and backward RMSNorm C APIs. These envvars can be used to prevent the LN
150 # kernels from using all SMs in the device. This is useful for cases such as
151 # communication overlap with RMSNorm.
152 self.fwd_rmsnorm_sm_margin = int(os.getenv("NVTE_FWD_LAYERNORM_SM_MARGIN", "0"))
153 self.bwd_rmsnorm_sm_margin = int(os.getenv("NVTE_BWD_LAYERNORM_SM_MARGIN", "0"))
154
155 def reset_rms_norm_parameters(self) -> None:
156 """Init RMSNorm params"""
157 warnings.warn(
158 ("This method will be deprecated in an upcoming release. "
159 "Update your code to use RMSNorm.reset_parameters() instead."),
160 DeprecationWarning,
161 stacklevel=2
162 )
163 if not self.zero_centered_gamma:
164 init.ones_(self.weight)
165 else:
166 init.zeros_(self.weight)
167
168 def reset_parameters(self, defer_init=False) -> None:
169 """Reset RMSNorm parameters"""
170 if defer_init:
171 return
172
173 if self.weight.device == torch.device('meta'):
174 self.weight = torch.nn.Parameter(torch.empty_like(self.weight, device='cuda'))
175 init.constant_(self.weight, float(not self.zero_centered_gamma))
176 setattr(self.weight, "sequence_parallel", self.sequence_parallel)
177
178 @no_torch_dynamo()
179 def forward(self, inp: torch.Tensor) -> torch.Tensor:
180 """RMSNorm FWD"""
181
182 # Set the activation type for AMP.
183 TransformerEngineBaseModule.set_activation_dtype(self, inp)
184
185 if torch.is_grad_enabled():
186 fwd_fn = _RMSNorm.apply
187 args = []
188 else:
189 fwd_fn = _RMSNorm.forward
190 args = [None]
191
192 args += (
193 inp,
194 self.weight,
195 self.eps,
196 self.fwd_rmsnorm_sm_margin,
197 self.bwd_rmsnorm_sm_margin,
198 self.zero_centered_gamma,
199 torch.is_grad_enabled(),
200 self.activation_dtype,
201 )
202
203 return fwd_fn(*args)
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/transformer_engine/pytorch/module/rmsnorm.py b/transformer_engine/pytorch/module/rmsnorm.py
--- a/transformer_engine/pytorch/module/rmsnorm.py
+++ b/transformer_engine/pytorch/module/rmsnorm.py
@@ -114,7 +114,7 @@
the RMSNorm formula changes to
.. math::
- y = \frac{x}{RMS(x) + \varepsilon} * (1 + \gamma)
+ y = \frac{x}{RMS_\varepsilon(x)} * (1 + \gamma)
device : Union[torch.device, str], default = "cuda"
The device on which the parameters of the model will allocated. It is the user's
responsibility to ensure all parameters are moved to the GPU before running the
@@ -155,7 +155,7 @@
def reset_rms_norm_parameters(self) -> None:
"""Init RMSNorm params"""
warnings.warn(
- ("This method will be deprecated in an upcoming release. "
+ ("This method is deprecated and will be removed in an upcoming release. "
"Update your code to use RMSNorm.reset_parameters() instead."),
DeprecationWarning,
stacklevel=2
| {"golden_diff": "diff --git a/transformer_engine/pytorch/module/rmsnorm.py b/transformer_engine/pytorch/module/rmsnorm.py\n--- a/transformer_engine/pytorch/module/rmsnorm.py\n+++ b/transformer_engine/pytorch/module/rmsnorm.py\n@@ -114,7 +114,7 @@\n the RMSNorm formula changes to\n \n .. math::\n- y = \\frac{x}{RMS(x) + \\varepsilon} * (1 + \\gamma)\n+ y = \\frac{x}{RMS_\\varepsilon(x)} * (1 + \\gamma)\n device : Union[torch.device, str], default = \"cuda\"\n The device on which the parameters of the model will allocated. It is the user's\n responsibility to ensure all parameters are moved to the GPU before running the\n@@ -155,7 +155,7 @@\n def reset_rms_norm_parameters(self) -> None:\n \"\"\"Init RMSNorm params\"\"\"\n warnings.warn(\n- (\"This method will be deprecated in an upcoming release. \"\n+ (\"This method is deprecated and will be removed in an upcoming release. \"\n \"Update your code to use RMSNorm.reset_parameters() instead.\"),\n DeprecationWarning,\n stacklevel=2\n", "issue": "Does `transformer_engine.pytorch.RMSNorm` support `zero_centered_gamma`?\nThe [doc](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/pytorch.html#transformer_engine.pytorch.RMSNorm) explains the option `zero_centered_gamma` clearly, but I found the implementation at [normalization.cu:L221](https://github.com/NVIDIA/TransformerEngine/blob/82555b3fd1ddece3a886fac3c7a4aa8d6f8c1955/transformer_engine/pytorch/csrc/extensions/normalization.cu#L221) assert `zero_centered_gamma == false`.\r\n\r\nIs there any limitation?\n", "before_files": [{"content": "# Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# See LICENSE for license information.\n\n\"\"\"RMSNorm API\"\"\"\nimport os\nimport warnings\nfrom typing import Union, Tuple, Optional\n\nimport torch\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import init\n\nfrom .base import TransformerEngineBaseModule\nfrom .. import cpp_extensions as tex\nfrom ..jit import no_torch_dynamo\nfrom ..utils import cast_if_needed\n\n\n__all__ = [\"RMSNorm\"]\n\n\nclass _RMSNorm(torch.autograd.Function):\n \"\"\"functional RMSNorm\"\"\"\n\n @staticmethod\n def forward(\n ctx,\n inp: torch.Tensor,\n rmsnorm_weight: torch.Tensor,\n eps: float,\n fwd_rmsnorm_sm_margin: int,\n bwd_rmsnorm_sm_margin: int,\n zero_centered_gamma: bool,\n is_grad_enabled: bool,\n activation_dtype: torch.dtype,\n ) -> torch.Tensor:\n # Make sure input dimensions are compatible\n in_features = rmsnorm_weight.numel()\n assert inp.is_cuda, \"TransformerEngine needs CUDA.\"\n assert inp.shape[-1] == in_features, \"RMSNorm not possible\"\n inputmat = inp.view((-1, in_features))\n\n # Cast for native AMP\n inputmat = cast_if_needed(inputmat, activation_dtype)\n rmsnorm_weight = cast_if_needed(rmsnorm_weight, activation_dtype)\n\n if is_grad_enabled:\n rmsnorm_out, rsigma = tex.rmsnorm_fwd(inputmat, rmsnorm_weight,\n eps, fwd_rmsnorm_sm_margin,\n zero_centered_gamma)\n ctx.save_for_backward(inputmat, rmsnorm_weight, rsigma)\n ctx.inp_shape = inp.shape\n ctx.bwd_rmsnorm_sm_margin = bwd_rmsnorm_sm_margin\n ctx.zero_centered_gamma = zero_centered_gamma\n else:\n rmsnorm_out = tex.rmsnorm_fwd_inf(inputmat, rmsnorm_weight,\n eps,\n zero_centered_gamma)\n return rmsnorm_out.view_as(inp)\n\n @staticmethod\n def backward(\n ctx, grad_output: torch.Tensor\n ) -> Tuple[Union[torch.Tensor, None], ...]:\n inputmat, rmsnorm_weight, rsigma = ctx.saved_tensors\n grad_output = grad_output.contiguous()\n d_rmsnorm_out = grad_output.view(inputmat.shape)\n dxmat, dgamma = tex.rmsnorm_bwd(\n d_rmsnorm_out, inputmat, rsigma, rmsnorm_weight,\n ctx.bwd_rmsnorm_sm_margin, ctx.zero_centered_gamma\n )\n return (\n dxmat.view(ctx.inp_shape),\n dgamma,\n None,\n None,\n None,\n None,\n None,\n None,\n )\n\n\nclass RMSNorm(torch.nn.Module):\n r\"\"\"\n Applies Root Mean Square Layer Normalization over a mini-batch of inputs as described in\n the paper `Root Mean Square Layer Normalization <https://arxiv.org/abs/1910.07467>`__\n\n .. math::\n y = \\frac{x}{RMS_\\varepsilon(x)} * \\gamma\n\n where\n\n .. math::\n RMS_\\varepsilon(x) = \\sqrt{\\frac{1}{n}\\sum_{i=0}^nx_i^2 + \\varepsilon}\n\n :math:`\\gamma` is a learnable affine transform parameter of size :attr:`hidden_size`\n\n Parameters\n ----------\n hidden_size : int\n size of each input sample.\n eps : float, default = 1e-5\n a value added to the denominator of layer normalization for numerical stability.\n sequence_parallel : bool, default = `False`\n if set to `True`, uses sequence parallelism.\n params_dtype : torch.dtype, default = `torch.get_default_dtype()`\n it controls the type used to allocate the initial parameters. Useful when\n the model is trained with lower precision and the original FP32 parameters\n would not fit in GPU memory.\n zero_centered_gamma : bool, default = 'False'\n if set to 'True', gamma parameter in RMSNorm is initialized to 0 and\n the RMSNorm formula changes to\n\n .. math::\n y = \\frac{x}{RMS(x) + \\varepsilon} * (1 + \\gamma)\n device : Union[torch.device, str], default = \"cuda\"\n The device on which the parameters of the model will allocated. It is the user's\n responsibility to ensure all parameters are moved to the GPU before running the\n forward pass.\n \"\"\"\n\n def __init__(\n self,\n hidden_size: int,\n eps: float = 1e-5,\n sequence_parallel: bool = False,\n params_dtype: Optional[torch.dtype] = None,\n zero_centered_gamma: bool = False,\n device: Union[torch.device, str] = \"cuda\",\n ) -> None:\n super().__init__()\n params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype\n self.eps = eps\n self.zero_centered_gamma = zero_centered_gamma\n self.weight = Parameter(\n torch.empty(\n hidden_size,\n device=device,\n dtype=params_dtype,\n )\n )\n self.sequence_parallel = sequence_parallel\n\n self.reset_parameters(defer_init=(device == 'meta'))\n\n # These many SMs are subtracted from the total SM count when calling forward\n # and backward RMSNorm C APIs. These envvars can be used to prevent the LN\n # kernels from using all SMs in the device. This is useful for cases such as\n # communication overlap with RMSNorm.\n self.fwd_rmsnorm_sm_margin = int(os.getenv(\"NVTE_FWD_LAYERNORM_SM_MARGIN\", \"0\"))\n self.bwd_rmsnorm_sm_margin = int(os.getenv(\"NVTE_BWD_LAYERNORM_SM_MARGIN\", \"0\"))\n\n def reset_rms_norm_parameters(self) -> None:\n \"\"\"Init RMSNorm params\"\"\"\n warnings.warn(\n (\"This method will be deprecated in an upcoming release. \"\n \"Update your code to use RMSNorm.reset_parameters() instead.\"),\n DeprecationWarning,\n stacklevel=2\n )\n if not self.zero_centered_gamma:\n init.ones_(self.weight)\n else:\n init.zeros_(self.weight)\n\n def reset_parameters(self, defer_init=False) -> None:\n \"\"\"Reset RMSNorm parameters\"\"\"\n if defer_init:\n return\n\n if self.weight.device == torch.device('meta'):\n self.weight = torch.nn.Parameter(torch.empty_like(self.weight, device='cuda'))\n init.constant_(self.weight, float(not self.zero_centered_gamma))\n setattr(self.weight, \"sequence_parallel\", self.sequence_parallel)\n\n @no_torch_dynamo()\n def forward(self, inp: torch.Tensor) -> torch.Tensor:\n \"\"\"RMSNorm FWD\"\"\"\n\n # Set the activation type for AMP.\n TransformerEngineBaseModule.set_activation_dtype(self, inp)\n\n if torch.is_grad_enabled():\n fwd_fn = _RMSNorm.apply\n args = []\n else:\n fwd_fn = _RMSNorm.forward\n args = [None]\n\n args += (\n inp,\n self.weight,\n self.eps,\n self.fwd_rmsnorm_sm_margin,\n self.bwd_rmsnorm_sm_margin,\n self.zero_centered_gamma,\n torch.is_grad_enabled(),\n self.activation_dtype,\n )\n\n return fwd_fn(*args)\n", "path": "transformer_engine/pytorch/module/rmsnorm.py"}], "after_files": [{"content": "# Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# See LICENSE for license information.\n\n\"\"\"RMSNorm API\"\"\"\nimport os\nimport warnings\nfrom typing import Union, Tuple, Optional\n\nimport torch\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import init\n\nfrom .base import TransformerEngineBaseModule\nfrom .. import cpp_extensions as tex\nfrom ..jit import no_torch_dynamo\nfrom ..utils import cast_if_needed\n\n\n__all__ = [\"RMSNorm\"]\n\n\nclass _RMSNorm(torch.autograd.Function):\n \"\"\"functional RMSNorm\"\"\"\n\n @staticmethod\n def forward(\n ctx,\n inp: torch.Tensor,\n rmsnorm_weight: torch.Tensor,\n eps: float,\n fwd_rmsnorm_sm_margin: int,\n bwd_rmsnorm_sm_margin: int,\n zero_centered_gamma: bool,\n is_grad_enabled: bool,\n activation_dtype: torch.dtype,\n ) -> torch.Tensor:\n # Make sure input dimensions are compatible\n in_features = rmsnorm_weight.numel()\n assert inp.is_cuda, \"TransformerEngine needs CUDA.\"\n assert inp.shape[-1] == in_features, \"RMSNorm not possible\"\n inputmat = inp.view((-1, in_features))\n\n # Cast for native AMP\n inputmat = cast_if_needed(inputmat, activation_dtype)\n rmsnorm_weight = cast_if_needed(rmsnorm_weight, activation_dtype)\n\n if is_grad_enabled:\n rmsnorm_out, rsigma = tex.rmsnorm_fwd(inputmat, rmsnorm_weight,\n eps, fwd_rmsnorm_sm_margin,\n zero_centered_gamma)\n ctx.save_for_backward(inputmat, rmsnorm_weight, rsigma)\n ctx.inp_shape = inp.shape\n ctx.bwd_rmsnorm_sm_margin = bwd_rmsnorm_sm_margin\n ctx.zero_centered_gamma = zero_centered_gamma\n else:\n rmsnorm_out = tex.rmsnorm_fwd_inf(inputmat, rmsnorm_weight,\n eps,\n zero_centered_gamma)\n return rmsnorm_out.view_as(inp)\n\n @staticmethod\n def backward(\n ctx, grad_output: torch.Tensor\n ) -> Tuple[Union[torch.Tensor, None], ...]:\n inputmat, rmsnorm_weight, rsigma = ctx.saved_tensors\n grad_output = grad_output.contiguous()\n d_rmsnorm_out = grad_output.view(inputmat.shape)\n dxmat, dgamma = tex.rmsnorm_bwd(\n d_rmsnorm_out, inputmat, rsigma, rmsnorm_weight,\n ctx.bwd_rmsnorm_sm_margin, ctx.zero_centered_gamma\n )\n return (\n dxmat.view(ctx.inp_shape),\n dgamma,\n None,\n None,\n None,\n None,\n None,\n None,\n )\n\n\nclass RMSNorm(torch.nn.Module):\n r\"\"\"\n Applies Root Mean Square Layer Normalization over a mini-batch of inputs as described in\n the paper `Root Mean Square Layer Normalization <https://arxiv.org/abs/1910.07467>`__\n\n .. math::\n y = \\frac{x}{RMS_\\varepsilon(x)} * \\gamma\n\n where\n\n .. math::\n RMS_\\varepsilon(x) = \\sqrt{\\frac{1}{n}\\sum_{i=0}^nx_i^2 + \\varepsilon}\n\n :math:`\\gamma` is a learnable affine transform parameter of size :attr:`hidden_size`\n\n Parameters\n ----------\n hidden_size : int\n size of each input sample.\n eps : float, default = 1e-5\n a value added to the denominator of layer normalization for numerical stability.\n sequence_parallel : bool, default = `False`\n if set to `True`, uses sequence parallelism.\n params_dtype : torch.dtype, default = `torch.get_default_dtype()`\n it controls the type used to allocate the initial parameters. Useful when\n the model is trained with lower precision and the original FP32 parameters\n would not fit in GPU memory.\n zero_centered_gamma : bool, default = 'False'\n if set to 'True', gamma parameter in RMSNorm is initialized to 0 and\n the RMSNorm formula changes to\n\n .. math::\n y = \\frac{x}{RMS_\\varepsilon(x)} * (1 + \\gamma)\n device : Union[torch.device, str], default = \"cuda\"\n The device on which the parameters of the model will allocated. It is the user's\n responsibility to ensure all parameters are moved to the GPU before running the\n forward pass.\n \"\"\"\n\n def __init__(\n self,\n hidden_size: int,\n eps: float = 1e-5,\n sequence_parallel: bool = False,\n params_dtype: Optional[torch.dtype] = None,\n zero_centered_gamma: bool = False,\n device: Union[torch.device, str] = \"cuda\",\n ) -> None:\n super().__init__()\n params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype\n self.eps = eps\n self.zero_centered_gamma = zero_centered_gamma\n self.weight = Parameter(\n torch.empty(\n hidden_size,\n device=device,\n dtype=params_dtype,\n )\n )\n self.sequence_parallel = sequence_parallel\n\n self.reset_parameters(defer_init=(device == 'meta'))\n\n # These many SMs are subtracted from the total SM count when calling forward\n # and backward RMSNorm C APIs. These envvars can be used to prevent the LN\n # kernels from using all SMs in the device. This is useful for cases such as\n # communication overlap with RMSNorm.\n self.fwd_rmsnorm_sm_margin = int(os.getenv(\"NVTE_FWD_LAYERNORM_SM_MARGIN\", \"0\"))\n self.bwd_rmsnorm_sm_margin = int(os.getenv(\"NVTE_BWD_LAYERNORM_SM_MARGIN\", \"0\"))\n\n def reset_rms_norm_parameters(self) -> None:\n \"\"\"Init RMSNorm params\"\"\"\n warnings.warn(\n (\"This method is deprecated and will be removed in an upcoming release. \"\n \"Update your code to use RMSNorm.reset_parameters() instead.\"),\n DeprecationWarning,\n stacklevel=2\n )\n if not self.zero_centered_gamma:\n init.ones_(self.weight)\n else:\n init.zeros_(self.weight)\n\n def reset_parameters(self, defer_init=False) -> None:\n \"\"\"Reset RMSNorm parameters\"\"\"\n if defer_init:\n return\n\n if self.weight.device == torch.device('meta'):\n self.weight = torch.nn.Parameter(torch.empty_like(self.weight, device='cuda'))\n init.constant_(self.weight, float(not self.zero_centered_gamma))\n setattr(self.weight, \"sequence_parallel\", self.sequence_parallel)\n\n @no_torch_dynamo()\n def forward(self, inp: torch.Tensor) -> torch.Tensor:\n \"\"\"RMSNorm FWD\"\"\"\n\n # Set the activation type for AMP.\n TransformerEngineBaseModule.set_activation_dtype(self, inp)\n\n if torch.is_grad_enabled():\n fwd_fn = _RMSNorm.apply\n args = []\n else:\n fwd_fn = _RMSNorm.forward\n args = [None]\n\n args += (\n inp,\n self.weight,\n self.eps,\n self.fwd_rmsnorm_sm_margin,\n self.bwd_rmsnorm_sm_margin,\n self.zero_centered_gamma,\n torch.is_grad_enabled(),\n self.activation_dtype,\n )\n\n return fwd_fn(*args)\n", "path": "transformer_engine/pytorch/module/rmsnorm.py"}]} | 2,560 | 273 |
gh_patches_debug_31562 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-520 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MyPy - Support for dynamic enums is broken at the moment
Annoyingly this bit of code doesn't work:
```python
import strawberry
Animal = Enum('Animal', 'ANT BEE CAT DOG')
AnimalEnum = strawberry.enum(Animal, name="Animal")
a: AnimalEnum
reveal_type(AnimalEnum)
reveal_type(a)
```
As it raises the following error:
```
➜ poetry run mypy demo_types.py --show-traceback
demo_types.py:6: error: INTERNAL ERROR -- Please try using mypy master on Github:
https://mypy.rtfd.io/en/latest/common_issues.html#using-a-development-mypy-build
Please report a bug at https://github.com/python/mypy/issues
version: 0.790
Traceback (most recent call last):
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/bin/mypy", line 8, in <module>
sys.exit(console_entry())
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/__main__.py", line 8, in console_entry
main(None, sys.stdout, sys.stderr)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/main.py", line 90, in main
res = build.build(sources, options, None, flush_errors, fscache, stdout, stderr)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/build.py", line 180, in build
result = _build(
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/build.py", line 254, in _build
graph = dispatch(sources, manager, stdout)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/build.py", line 2630, in dispatch
process_graph(graph, manager)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/build.py", line 2953, in process_graph
process_stale_scc(graph, scc, manager)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/build.py", line 3045, in process_stale_scc
mypy.semanal_main.semantic_analysis_for_scc(graph, scc, manager.errors)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal_main.py", line 78, in semantic_analysis_for_scc
process_top_levels(graph, scc, patches)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal_main.py", line 199, in process_top_levels
deferred, incomplete, progress = semantic_analyze_target(next_id, state,
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal_main.py", line 326, in semantic_analyze_target
analyzer.refresh_partial(refresh_node,
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal.py", line 357, in refresh_partial
self.refresh_top_level(node)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal.py", line 368, in refresh_top_level
self.accept(d)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal.py", line 4700, in accept
node.accept(self)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/nodes.py", line 1063, in accept
return visitor.visit_assignment_stmt(self)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal.py", line 1943, in visit_assignment_stmt
self.apply_dynamic_class_hook(s)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal.py", line 2194, in apply_dynamic_class_hook
hook(DynamicClassDefContext(call, lval.name, self))
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/strawberry/ext/mypy_plugin.py", line 69, in enum_hook
enum_type = _get_type_for_expr(ctx.call.args[0], ctx.api)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/strawberry/ext/mypy_plugin.py", line 39, in _get_type_for_expr
return api.named_type(expr.name)
File "/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal.py", line 4155, in named_type
assert isinstance(node, TypeInfo)
AssertionError:
demo_types.py:6: : note: use --pdb to drop into pdb
```
I'm not sure how to fix, since this error is deep inside mypy, and it seems to fail because the node we send to it is not a TypeInfo (it is a var node if I understood correctly). I wonder if we can check the type of node we have and then do something based on that 🤔
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/ext/mypy_plugin.py`
Content:
```
1 from typing import Callable, Optional
2
3 from mypy.nodes import (
4 GDEF,
5 Expression,
6 IndexExpr,
7 NameExpr,
8 SymbolTableNode,
9 TupleExpr,
10 TypeAlias,
11 )
12 from mypy.plugin import (
13 AnalyzeTypeContext,
14 ClassDefContext,
15 DynamicClassDefContext,
16 Plugin,
17 SemanticAnalyzerPluginInterface,
18 )
19 from mypy.plugins import dataclasses
20 from mypy.types import Type, UnionType
21
22
23 def lazy_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:
24 type_name = ctx.type.args[0]
25 type_ = ctx.api.analyze_type(type_name)
26
27 return type_
28
29
30 def private_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:
31 type_name = ctx.type.args[0]
32 type_ = ctx.api.analyze_type(type_name)
33
34 return type_
35
36
37 def _get_type_for_expr(expr: Expression, api: SemanticAnalyzerPluginInterface):
38 if isinstance(expr, NameExpr):
39 return api.named_type(expr.name)
40
41 if isinstance(expr, IndexExpr):
42 type_ = _get_type_for_expr(expr.base, api)
43 type_.args = (_get_type_for_expr(expr.index, api),)
44
45 return type_
46
47 raise ValueError(f"Unsupported expression f{type(expr)}")
48
49
50 def union_hook(ctx: DynamicClassDefContext) -> None:
51 types = ctx.call.args[1]
52
53 if isinstance(types, TupleExpr):
54 type_ = UnionType(tuple(_get_type_for_expr(x, ctx.api) for x in types.items))
55
56 type_alias = TypeAlias(
57 type_,
58 fullname=ctx.api.qualified_name(ctx.name),
59 line=ctx.call.line,
60 column=ctx.call.column,
61 )
62
63 ctx.api.add_symbol_table_node(
64 ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)
65 )
66
67
68 def enum_hook(ctx: DynamicClassDefContext) -> None:
69 enum_type = _get_type_for_expr(ctx.call.args[0], ctx.api)
70
71 type_alias = TypeAlias(
72 enum_type,
73 fullname=ctx.api.qualified_name(ctx.name),
74 line=ctx.call.line,
75 column=ctx.call.column,
76 )
77
78 ctx.api.add_symbol_table_node(
79 ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)
80 )
81
82
83 class StrawberryPlugin(Plugin):
84 def get_dynamic_class_hook(
85 self, fullname: str
86 ) -> Optional[Callable[[DynamicClassDefContext], None]]:
87 # TODO: investigate why we need this instead of `strawberry.union.union` on CI
88 # we have the same issue in the other hooks
89 if "strawberry.union" in fullname:
90 return union_hook
91
92 if "strawberry.enum" in fullname:
93 return enum_hook
94
95 return None
96
97 def get_type_analyze_hook(self, fullname: str):
98 if fullname == "strawberry.lazy_type.LazyType":
99 return lazy_type_analyze_callback
100
101 if any(
102 name in fullname
103 for name in {"strawberry.private.Private", "strawberry.Private"}
104 ):
105 return private_type_analyze_callback
106
107 return None
108
109 def get_class_decorator_hook(
110 self, fullname: str
111 ) -> Optional[Callable[[ClassDefContext], None]]:
112 if any(
113 strawberry_decorator in fullname
114 for strawberry_decorator in {
115 "strawberry.type",
116 "strawberry.federation.type",
117 "strawberry.input",
118 "strawberry.interface",
119 }
120 ):
121 return dataclasses.dataclass_class_maker_callback
122 return None
123
124
125 def plugin(version: str):
126 return StrawberryPlugin
127
```
Path: `.github/release-check-action/config.py`
Content:
```
1 import os
2
3
4 RELEASE_FILE_PATH = "RELEASE.md"
5 GITHUB_SHA = os.environ["GITHUB_SHA"]
6 GITHUB_EVENT_PATH = os.environ["GITHUB_EVENT_PATH"]
7 GITHUB_WORKSPACE = os.environ["GITHUB_WORKSPACE"]
8 API_URL = "https://gentle-beach-72400.herokuapp.com/graphql"
9
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/.github/release-check-action/config.py b/.github/release-check-action/config.py
--- a/.github/release-check-action/config.py
+++ b/.github/release-check-action/config.py
@@ -5,4 +5,4 @@
GITHUB_SHA = os.environ["GITHUB_SHA"]
GITHUB_EVENT_PATH = os.environ["GITHUB_EVENT_PATH"]
GITHUB_WORKSPACE = os.environ["GITHUB_WORKSPACE"]
-API_URL = "https://gentle-beach-72400.herokuapp.com/graphql"
+API_URL = "https://strawberry-github-bot.herokuapp.com/graphql"
diff --git a/strawberry/ext/mypy_plugin.py b/strawberry/ext/mypy_plugin.py
--- a/strawberry/ext/mypy_plugin.py
+++ b/strawberry/ext/mypy_plugin.py
@@ -8,6 +8,7 @@
SymbolTableNode,
TupleExpr,
TypeAlias,
+ Var,
)
from mypy.plugin import (
AnalyzeTypeContext,
@@ -17,7 +18,7 @@
SemanticAnalyzerPluginInterface,
)
from mypy.plugins import dataclasses
-from mypy.types import Type, UnionType
+from mypy.types import AnyType, Type, TypeOfAny, UnionType
def lazy_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:
@@ -66,7 +67,32 @@
def enum_hook(ctx: DynamicClassDefContext) -> None:
- enum_type = _get_type_for_expr(ctx.call.args[0], ctx.api)
+ first_argument = ctx.call.args[0]
+
+ if isinstance(first_argument, NameExpr):
+ if not first_argument.node:
+ ctx.api.defer()
+
+ return
+
+ if isinstance(first_argument.node, Var):
+ var_type = first_argument.node.type or AnyType(
+ TypeOfAny.implementation_artifact
+ )
+
+ type_alias = TypeAlias(
+ var_type,
+ fullname=ctx.api.qualified_name(ctx.name),
+ line=ctx.call.line,
+ column=ctx.call.column,
+ )
+
+ ctx.api.add_symbol_table_node(
+ ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)
+ )
+ return
+
+ enum_type = _get_type_for_expr(first_argument, ctx.api)
type_alias = TypeAlias(
enum_type,
| {"golden_diff": "diff --git a/.github/release-check-action/config.py b/.github/release-check-action/config.py\n--- a/.github/release-check-action/config.py\n+++ b/.github/release-check-action/config.py\n@@ -5,4 +5,4 @@\n GITHUB_SHA = os.environ[\"GITHUB_SHA\"]\n GITHUB_EVENT_PATH = os.environ[\"GITHUB_EVENT_PATH\"]\n GITHUB_WORKSPACE = os.environ[\"GITHUB_WORKSPACE\"]\n-API_URL = \"https://gentle-beach-72400.herokuapp.com/graphql\"\n+API_URL = \"https://strawberry-github-bot.herokuapp.com/graphql\"\ndiff --git a/strawberry/ext/mypy_plugin.py b/strawberry/ext/mypy_plugin.py\n--- a/strawberry/ext/mypy_plugin.py\n+++ b/strawberry/ext/mypy_plugin.py\n@@ -8,6 +8,7 @@\n SymbolTableNode,\n TupleExpr,\n TypeAlias,\n+ Var,\n )\n from mypy.plugin import (\n AnalyzeTypeContext,\n@@ -17,7 +18,7 @@\n SemanticAnalyzerPluginInterface,\n )\n from mypy.plugins import dataclasses\n-from mypy.types import Type, UnionType\n+from mypy.types import AnyType, Type, TypeOfAny, UnionType\n \n \n def lazy_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:\n@@ -66,7 +67,32 @@\n \n \n def enum_hook(ctx: DynamicClassDefContext) -> None:\n- enum_type = _get_type_for_expr(ctx.call.args[0], ctx.api)\n+ first_argument = ctx.call.args[0]\n+\n+ if isinstance(first_argument, NameExpr):\n+ if not first_argument.node:\n+ ctx.api.defer()\n+\n+ return\n+\n+ if isinstance(first_argument.node, Var):\n+ var_type = first_argument.node.type or AnyType(\n+ TypeOfAny.implementation_artifact\n+ )\n+\n+ type_alias = TypeAlias(\n+ var_type,\n+ fullname=ctx.api.qualified_name(ctx.name),\n+ line=ctx.call.line,\n+ column=ctx.call.column,\n+ )\n+\n+ ctx.api.add_symbol_table_node(\n+ ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)\n+ )\n+ return\n+\n+ enum_type = _get_type_for_expr(first_argument, ctx.api)\n \n type_alias = TypeAlias(\n enum_type,\n", "issue": "MyPy - Support for dynamic enums is broken at the moment\nAnnoyingly this bit of code doesn't work:\r\n\r\n```python\r\nimport strawberry\r\n\r\n\r\nAnimal = Enum('Animal', 'ANT BEE CAT DOG')\r\n\r\nAnimalEnum = strawberry.enum(Animal, name=\"Animal\")\r\n\r\n\r\na: AnimalEnum\r\nreveal_type(AnimalEnum)\r\nreveal_type(a)\r\n```\r\n\r\nAs it raises the following error:\r\n\r\n```\r\n\u279c poetry run mypy demo_types.py --show-traceback\r\ndemo_types.py:6: error: INTERNAL ERROR -- Please try using mypy master on Github:\r\nhttps://mypy.rtfd.io/en/latest/common_issues.html#using-a-development-mypy-build\r\nPlease report a bug at https://github.com/python/mypy/issues\r\nversion: 0.790\r\nTraceback (most recent call last):\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/bin/mypy\", line 8, in <module>\r\n sys.exit(console_entry())\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/__main__.py\", line 8, in console_entry\r\n main(None, sys.stdout, sys.stderr)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/main.py\", line 90, in main\r\n res = build.build(sources, options, None, flush_errors, fscache, stdout, stderr)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/build.py\", line 180, in build\r\n result = _build(\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/build.py\", line 254, in _build\r\n graph = dispatch(sources, manager, stdout)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/build.py\", line 2630, in dispatch\r\n process_graph(graph, manager)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/build.py\", line 2953, in process_graph\r\n process_stale_scc(graph, scc, manager)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/build.py\", line 3045, in process_stale_scc\r\n mypy.semanal_main.semantic_analysis_for_scc(graph, scc, manager.errors)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal_main.py\", line 78, in semantic_analysis_for_scc\r\n process_top_levels(graph, scc, patches)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal_main.py\", line 199, in process_top_levels\r\n deferred, incomplete, progress = semantic_analyze_target(next_id, state,\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal_main.py\", line 326, in semantic_analyze_target\r\n analyzer.refresh_partial(refresh_node,\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal.py\", line 357, in refresh_partial\r\n self.refresh_top_level(node)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal.py\", line 368, in refresh_top_level\r\n self.accept(d)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal.py\", line 4700, in accept\r\n node.accept(self)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/nodes.py\", line 1063, in accept\r\n return visitor.visit_assignment_stmt(self)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal.py\", line 1943, in visit_assignment_stmt\r\n self.apply_dynamic_class_hook(s)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal.py\", line 2194, in apply_dynamic_class_hook\r\n hook(DynamicClassDefContext(call, lval.name, self))\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/strawberry/ext/mypy_plugin.py\", line 69, in enum_hook\r\n enum_type = _get_type_for_expr(ctx.call.args[0], ctx.api)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/strawberry/ext/mypy_plugin.py\", line 39, in _get_type_for_expr\r\n return api.named_type(expr.name)\r\n File \"/Users/patrick/Documents/github/strawberry-graphql/strawberry/.venv/lib/python3.9/site-packages/mypy/semanal.py\", line 4155, in named_type\r\n assert isinstance(node, TypeInfo)\r\nAssertionError:\r\ndemo_types.py:6: : note: use --pdb to drop into pdb\r\n```\r\n\r\nI'm not sure how to fix, since this error is deep inside mypy, and it seems to fail because the node we send to it is not a TypeInfo (it is a var node if I understood correctly). I wonder if we can check the type of node we have and then do something based on that \ud83e\udd14 \n", "before_files": [{"content": "from typing import Callable, Optional\n\nfrom mypy.nodes import (\n GDEF,\n Expression,\n IndexExpr,\n NameExpr,\n SymbolTableNode,\n TupleExpr,\n TypeAlias,\n)\nfrom mypy.plugin import (\n AnalyzeTypeContext,\n ClassDefContext,\n DynamicClassDefContext,\n Plugin,\n SemanticAnalyzerPluginInterface,\n)\nfrom mypy.plugins import dataclasses\nfrom mypy.types import Type, UnionType\n\n\ndef lazy_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:\n type_name = ctx.type.args[0]\n type_ = ctx.api.analyze_type(type_name)\n\n return type_\n\n\ndef private_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:\n type_name = ctx.type.args[0]\n type_ = ctx.api.analyze_type(type_name)\n\n return type_\n\n\ndef _get_type_for_expr(expr: Expression, api: SemanticAnalyzerPluginInterface):\n if isinstance(expr, NameExpr):\n return api.named_type(expr.name)\n\n if isinstance(expr, IndexExpr):\n type_ = _get_type_for_expr(expr.base, api)\n type_.args = (_get_type_for_expr(expr.index, api),)\n\n return type_\n\n raise ValueError(f\"Unsupported expression f{type(expr)}\")\n\n\ndef union_hook(ctx: DynamicClassDefContext) -> None:\n types = ctx.call.args[1]\n\n if isinstance(types, TupleExpr):\n type_ = UnionType(tuple(_get_type_for_expr(x, ctx.api) for x in types.items))\n\n type_alias = TypeAlias(\n type_,\n fullname=ctx.api.qualified_name(ctx.name),\n line=ctx.call.line,\n column=ctx.call.column,\n )\n\n ctx.api.add_symbol_table_node(\n ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)\n )\n\n\ndef enum_hook(ctx: DynamicClassDefContext) -> None:\n enum_type = _get_type_for_expr(ctx.call.args[0], ctx.api)\n\n type_alias = TypeAlias(\n enum_type,\n fullname=ctx.api.qualified_name(ctx.name),\n line=ctx.call.line,\n column=ctx.call.column,\n )\n\n ctx.api.add_symbol_table_node(\n ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)\n )\n\n\nclass StrawberryPlugin(Plugin):\n def get_dynamic_class_hook(\n self, fullname: str\n ) -> Optional[Callable[[DynamicClassDefContext], None]]:\n # TODO: investigate why we need this instead of `strawberry.union.union` on CI\n # we have the same issue in the other hooks\n if \"strawberry.union\" in fullname:\n return union_hook\n\n if \"strawberry.enum\" in fullname:\n return enum_hook\n\n return None\n\n def get_type_analyze_hook(self, fullname: str):\n if fullname == \"strawberry.lazy_type.LazyType\":\n return lazy_type_analyze_callback\n\n if any(\n name in fullname\n for name in {\"strawberry.private.Private\", \"strawberry.Private\"}\n ):\n return private_type_analyze_callback\n\n return None\n\n def get_class_decorator_hook(\n self, fullname: str\n ) -> Optional[Callable[[ClassDefContext], None]]:\n if any(\n strawberry_decorator in fullname\n for strawberry_decorator in {\n \"strawberry.type\",\n \"strawberry.federation.type\",\n \"strawberry.input\",\n \"strawberry.interface\",\n }\n ):\n return dataclasses.dataclass_class_maker_callback\n return None\n\n\ndef plugin(version: str):\n return StrawberryPlugin\n", "path": "strawberry/ext/mypy_plugin.py"}, {"content": "import os\n\n\nRELEASE_FILE_PATH = \"RELEASE.md\"\nGITHUB_SHA = os.environ[\"GITHUB_SHA\"]\nGITHUB_EVENT_PATH = os.environ[\"GITHUB_EVENT_PATH\"]\nGITHUB_WORKSPACE = os.environ[\"GITHUB_WORKSPACE\"]\nAPI_URL = \"https://gentle-beach-72400.herokuapp.com/graphql\"\n", "path": ".github/release-check-action/config.py"}], "after_files": [{"content": "from typing import Callable, Optional\n\nfrom mypy.nodes import (\n GDEF,\n Expression,\n IndexExpr,\n NameExpr,\n SymbolTableNode,\n TupleExpr,\n TypeAlias,\n Var,\n)\nfrom mypy.plugin import (\n AnalyzeTypeContext,\n ClassDefContext,\n DynamicClassDefContext,\n Plugin,\n SemanticAnalyzerPluginInterface,\n)\nfrom mypy.plugins import dataclasses\nfrom mypy.types import AnyType, Type, TypeOfAny, UnionType\n\n\ndef lazy_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:\n type_name = ctx.type.args[0]\n type_ = ctx.api.analyze_type(type_name)\n\n return type_\n\n\ndef private_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:\n type_name = ctx.type.args[0]\n type_ = ctx.api.analyze_type(type_name)\n\n return type_\n\n\ndef _get_type_for_expr(expr: Expression, api: SemanticAnalyzerPluginInterface):\n if isinstance(expr, NameExpr):\n return api.named_type(expr.name)\n\n if isinstance(expr, IndexExpr):\n type_ = _get_type_for_expr(expr.base, api)\n type_.args = (_get_type_for_expr(expr.index, api),)\n\n return type_\n\n raise ValueError(f\"Unsupported expression f{type(expr)}\")\n\n\ndef union_hook(ctx: DynamicClassDefContext) -> None:\n types = ctx.call.args[1]\n\n if isinstance(types, TupleExpr):\n type_ = UnionType(tuple(_get_type_for_expr(x, ctx.api) for x in types.items))\n\n type_alias = TypeAlias(\n type_,\n fullname=ctx.api.qualified_name(ctx.name),\n line=ctx.call.line,\n column=ctx.call.column,\n )\n\n ctx.api.add_symbol_table_node(\n ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)\n )\n\n\ndef enum_hook(ctx: DynamicClassDefContext) -> None:\n first_argument = ctx.call.args[0]\n\n if isinstance(first_argument, NameExpr):\n if not first_argument.node:\n ctx.api.defer()\n\n return\n\n if isinstance(first_argument.node, Var):\n var_type = first_argument.node.type or AnyType(\n TypeOfAny.implementation_artifact\n )\n\n type_alias = TypeAlias(\n var_type,\n fullname=ctx.api.qualified_name(ctx.name),\n line=ctx.call.line,\n column=ctx.call.column,\n )\n\n ctx.api.add_symbol_table_node(\n ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)\n )\n return\n\n enum_type = _get_type_for_expr(first_argument, ctx.api)\n\n type_alias = TypeAlias(\n enum_type,\n fullname=ctx.api.qualified_name(ctx.name),\n line=ctx.call.line,\n column=ctx.call.column,\n )\n\n ctx.api.add_symbol_table_node(\n ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)\n )\n\n\nclass StrawberryPlugin(Plugin):\n def get_dynamic_class_hook(\n self, fullname: str\n ) -> Optional[Callable[[DynamicClassDefContext], None]]:\n # TODO: investigate why we need this instead of `strawberry.union.union` on CI\n # we have the same issue in the other hooks\n if \"strawberry.union\" in fullname:\n return union_hook\n\n if \"strawberry.enum\" in fullname:\n return enum_hook\n\n return None\n\n def get_type_analyze_hook(self, fullname: str):\n if fullname == \"strawberry.lazy_type.LazyType\":\n return lazy_type_analyze_callback\n\n if any(\n name in fullname\n for name in {\"strawberry.private.Private\", \"strawberry.Private\"}\n ):\n return private_type_analyze_callback\n\n return None\n\n def get_class_decorator_hook(\n self, fullname: str\n ) -> Optional[Callable[[ClassDefContext], None]]:\n if any(\n strawberry_decorator in fullname\n for strawberry_decorator in {\n \"strawberry.type\",\n \"strawberry.federation.type\",\n \"strawberry.input\",\n \"strawberry.interface\",\n }\n ):\n return dataclasses.dataclass_class_maker_callback\n return None\n\n\ndef plugin(version: str):\n return StrawberryPlugin\n", "path": "strawberry/ext/mypy_plugin.py"}, {"content": "import os\n\n\nRELEASE_FILE_PATH = \"RELEASE.md\"\nGITHUB_SHA = os.environ[\"GITHUB_SHA\"]\nGITHUB_EVENT_PATH = os.environ[\"GITHUB_EVENT_PATH\"]\nGITHUB_WORKSPACE = os.environ[\"GITHUB_WORKSPACE\"]\nAPI_URL = \"https://strawberry-github-bot.herokuapp.com/graphql\"\n", "path": ".github/release-check-action/config.py"}]} | 2,787 | 517 |
gh_patches_debug_8119 | rasdani/github-patches | git_diff | google__osv.dev-704 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Packagist version enumeration is incorrect
Since #675 was landed we're now seeing some false positives in `osv-detector` due to the `versions` field seemingly holding wrong versions.
Some examples that I've looked into specifically:
* https://api.osv.dev/v1/vulns/GHSA-vxrc-68xx-x48g has all of the v2.x versions listed in `affected[0].versions`, even though that `affected` says its `fixed` in version `1.38.0`. (it's also missing `affected[1].versions` which doesn't seem right?
* https://api.osv.dev/v1/vulns/GHSA-77mr-wc79-m8j3 has v6.5+ versions listed in `affected[0].versions`, even though that `affected` says `fixed` in version `6.5.0`.
* https://api.osv.dev/v1/vulns/GHSA-q348-f93x-9gx4 has v2.2.11+ versions listed in `affected[0].versions`, even though that `affected` says `fixed` in version `2.2.11`.
And here are some outputs from `osv-detector` which have some more examples:
```
phpmailer/[email protected]:
GHSA-77mr-wc79-m8j3: A validation function with the same name as a built-in validator can be called (https://github.com/advisories/GHSA-77mr-wc79-m8j3)
GHSA-7q44-r25x-wm4q: Remote Code Execution vulnerability in PHPMailer 6.4.1 running on Windows (https://github.com/advisories/GHSA-7q44-r25x-wm4q)
GHSA-f7hx-fqxw-rvvj: Insufficient output escaping of attachment names in PHPMailer (https://github.com/advisories/GHSA-f7hx-fqxw-rvvj)
GHSA-v5c9-mmw9-829q: PHPMailer susceptible to arbitrary code execution (https://github.com/advisories/GHSA-v5c9-mmw9-829q)
twig/[email protected]:
GHSA-vxrc-68xx-x48g: Sandbox Information Disclosure (https://github.com/advisories/GHSA-vxrc-68xx-x48g)
zendesk/[email protected]:
GHSA-q348-f93x-9gx4: Lack of Input Validation in zendesk_api_client_php for Zendesk Subdomain (https://github.com/advisories/GHSA-q348-f93x-9gx4)
laravel/[email protected]:
GHSA-3p32-j457-pg5x: Query Binding Exploitation (https://github.com/advisories/GHSA-3p32-j457-pg5x)
GHSA-4mg9-vhxq-vm7j: SQL Server LIMIT / OFFSET SQL Injection in laravel/framework and illuminate/database (https://github.com/advisories/GHSA-4mg9-vhxq-vm7j)
GHSA-66hf-2p6w-jqfw: Laravel Framework XSS in Blade templating engine (https://github.com/advisories/GHSA-66hf-2p6w-jqfw)
GHSA-w2pm-r78h-4m7v: OS Command Injection in Laravel Framework (https://github.com/advisories/GHSA-w2pm-r78h-4m7v)
GHSA-w68r-5p45-5rqp: Improper Input Validation in Laravel (https://github.com/advisories/GHSA-w68r-5p45-5rqp)
GHSA-x7p5-p2c9-phvg: Unexpected database bindings (https://github.com/advisories/GHSA-x7p5-p2c9-phvg)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `osv/packagist_version.py`
Content:
```
1 # Copyright 2022 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Ordered packagist version"""
15 import re
16
17
18 class PackagistVersion:
19 """
20 Follows the packagist version ordering, which is recommended to be semver, but
21 not enforced to be semver. The php standard version comparison code written in
22 C is located here: https://github.com/php/php-src/blob/master/ext/standard/versioning.c
23
24 The function first replaces _, - and + with a dot . in the version strings and
25 also inserts dots . before and after any non number so that for example
26 '4.3.2RC1' becomes '4.3.2.RC.1'. Then it compares the parts starting from
27 left to right.
28
29 If a part contains special version strings these are handled in the following order:
30 any string not found in this list < dev < alpha = a < beta = b < RC = rc < # < pl = p.
31 This way not only versions with different levels like '4.1' and '4.1.2' can be
32 compared but also any PHP specific version containing development state.
33
34 ---
35
36 ## Known differences:
37 The following are some examples of known differences between this python
38 implementation and the C implementation of PHP
39
40 - In this version, special version strings need to exactly match to not be
41 considered "any other string", while in the original implementation the
42 string only need to start with one of the listed strings.
43 """
44
45 version_str: str
46 canonicalized_version: str
47
48 def __init__(self, version: str):
49 self.version_str = version
50 self.canonicalized_version = self.php_canonicalize_version(version)
51
52 def __str__(self) -> str:
53 return self.version_str
54
55 def __hash__(self):
56 return self.canonicalized_version
57
58 def __eq__(self, other):
59 if not isinstance(other, self.__class__):
60 return NotImplemented
61 return self.canonicalized_version == other.canonicalized_version
62
63 def __lt__(self, other):
64 return self.__cmp__(other) < 0
65
66 def __le__(self, other):
67 return self.__cmp__(other) <= 0
68
69 def __gt__(self, other):
70 return self.__cmp__(other) > 0
71
72 def __ge__(self, other):
73 return self.__cmp__(other) >= 0
74
75 def __cmp__(self, other):
76 return self.php_version_compare(self.version_str, other.version_str)
77
78 @staticmethod
79 def php_slices_compare(a_split: [str], b_split: [str]):
80 """
81 Compare php versions after being split by '.'
82 """
83 for a, b in zip(a_split, b_split):
84 if a.isdigit() and b.isdigit():
85 compare = int(a) - int(b)
86 elif not a.isdigit() and not b.isdigit():
87 compare = PackagistVersion.compare_special_versions(a, b)
88 elif a.isdigit():
89 compare = PackagistVersion.compare_special_versions('#', b)
90 else:
91 compare = PackagistVersion.compare_special_versions(a, '#')
92
93 if compare != 0:
94 if compare > 0:
95 return 1
96 else:
97 return -1
98
99 if len(a_split) > len(b_split):
100 next_char = a_split[len(b_split)]
101 if next_char.isdigit():
102 return 1
103 return PackagistVersion.php_slices_compare(a_split[len(b_split):], ['#'])
104
105 if len(a_split) < len(b_split):
106 next_char = b_split[len(a_split)]
107 if next_char.isdigit():
108 return -1
109 return PackagistVersion.php_slices_compare(['#'], b_split[len(a_split):])
110
111 return 0
112
113 @staticmethod
114 def php_version_compare(version_a: str, version_b: str) -> int:
115 """
116 Given two php versions, compare which is newer
117
118 :return: 1 if a > b, -1 if b > a, 0 if a == b
119 """
120 version_a = PackagistVersion.php_canonicalize_version(version_a)
121 version_b = PackagistVersion.php_canonicalize_version(version_b)
122
123 a_split = version_a.split('.')
124 b_split = version_b.split('.')
125 return PackagistVersion.php_slices_compare(a_split, b_split)
126
127 @staticmethod
128 def php_canonicalize_version(version: str) -> str:
129 """
130 Replaces special separators (`-`,`_`,`+`) with `.`, and inserts `.`
131 between any digit and non-digit.
132 """
133 replaced = re.sub('[-_+]', '.', version)
134 replaced = re.sub(r'([^\d.])(\d)', r'\1.\2', replaced)
135 replaced = re.sub(r'(\d)([^\d.])', r'\1.\2', replaced)
136 return replaced
137
138 SPECIAL_CHARACTER_ORDER = {
139 "dev": 0,
140 "alpha": 1,
141 "a": 1,
142 "beta": 2,
143 "b": 2,
144 "RC": 3,
145 "rc": 3,
146 "#": 4,
147 "pl": 5,
148 "p": 5,
149 None: 0,
150 }
151
152 @staticmethod
153 def compare_special_versions(version_part_a: str, version_part_b: str) -> int:
154 """
155 Compares the order of special characters against the order specified in php
156 docs.
157
158 any string not found in this list < dev < alpha = a < beta = b < RC = rc < # < pl = p.
159
160 :return: 1 if a > b, -1 if b > a, 0 if a == b
161 """
162 # This isn't quite the behaviour of the c implementation of php version_compare
163 # In php if the part starts with special_chars its enough.
164 # ### For example:
165 # *PHP implementation:*
166 # `1.0.0beta1 = 1.0.0betawithsomeothertext1`
167 #
168 # *This python implementation*
169 # `1.0.0beta1 > 1.0.0betawithsomeothertext1`
170 # Because "any string not found in this list" should apply to
171 # `betawithsomeothertext`
172 found_a = PackagistVersion.SPECIAL_CHARACTER_ORDER.get(version_part_a, -1)
173 found_b = PackagistVersion.SPECIAL_CHARACTER_ORDER.get(version_part_b, -1)
174
175 if found_a > found_b:
176 return 1
177 if found_a < found_b:
178 return -1
179 return 0
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/osv/packagist_version.py b/osv/packagist_version.py
--- a/osv/packagist_version.py
+++ b/osv/packagist_version.py
@@ -130,6 +130,8 @@
Replaces special separators (`-`,`_`,`+`) with `.`, and inserts `.`
between any digit and non-digit.
"""
+ if version.startswith('v'):
+ version = version[1:]
replaced = re.sub('[-_+]', '.', version)
replaced = re.sub(r'([^\d.])(\d)', r'\1.\2', replaced)
replaced = re.sub(r'(\d)([^\d.])', r'\1.\2', replaced)
| {"golden_diff": "diff --git a/osv/packagist_version.py b/osv/packagist_version.py\n--- a/osv/packagist_version.py\n+++ b/osv/packagist_version.py\n@@ -130,6 +130,8 @@\n Replaces special separators (`-`,`_`,`+`) with `.`, and inserts `.`\n between any digit and non-digit.\n \"\"\"\n+ if version.startswith('v'):\n+ version = version[1:]\n replaced = re.sub('[-_+]', '.', version)\n replaced = re.sub(r'([^\\d.])(\\d)', r'\\1.\\2', replaced)\n replaced = re.sub(r'(\\d)([^\\d.])', r'\\1.\\2', replaced)\n", "issue": "Packagist version enumeration is incorrect\nSince #675 was landed we're now seeing some false positives in `osv-detector` due to the `versions` field seemingly holding wrong versions.\r\n\r\nSome examples that I've looked into specifically:\r\n * https://api.osv.dev/v1/vulns/GHSA-vxrc-68xx-x48g has all of the v2.x versions listed in `affected[0].versions`, even though that `affected` says its `fixed` in version `1.38.0`. (it's also missing `affected[1].versions` which doesn't seem right?\r\n * https://api.osv.dev/v1/vulns/GHSA-77mr-wc79-m8j3 has v6.5+ versions listed in `affected[0].versions`, even though that `affected` says `fixed` in version `6.5.0`.\r\n * https://api.osv.dev/v1/vulns/GHSA-q348-f93x-9gx4 has v2.2.11+ versions listed in `affected[0].versions`, even though that `affected` says `fixed` in version `2.2.11`.\r\n\r\nAnd here are some outputs from `osv-detector` which have some more examples:\r\n```\r\nphpmailer/[email protected]:\r\n GHSA-77mr-wc79-m8j3: A validation function with the same name as a built-in validator can be called (https://github.com/advisories/GHSA-77mr-wc79-m8j3)\r\n GHSA-7q44-r25x-wm4q: Remote Code Execution vulnerability in PHPMailer 6.4.1 running on Windows (https://github.com/advisories/GHSA-7q44-r25x-wm4q)\r\n GHSA-f7hx-fqxw-rvvj: Insufficient output escaping of attachment names in PHPMailer (https://github.com/advisories/GHSA-f7hx-fqxw-rvvj)\r\n GHSA-v5c9-mmw9-829q: PHPMailer susceptible to arbitrary code execution (https://github.com/advisories/GHSA-v5c9-mmw9-829q)\r\n\r\ntwig/[email protected]:\r\n GHSA-vxrc-68xx-x48g: Sandbox Information Disclosure (https://github.com/advisories/GHSA-vxrc-68xx-x48g)\r\n\r\nzendesk/[email protected]:\r\n GHSA-q348-f93x-9gx4: Lack of Input Validation in zendesk_api_client_php for Zendesk Subdomain (https://github.com/advisories/GHSA-q348-f93x-9gx4)\r\n\r\nlaravel/[email protected]:\r\n GHSA-3p32-j457-pg5x: Query Binding Exploitation (https://github.com/advisories/GHSA-3p32-j457-pg5x)\r\n GHSA-4mg9-vhxq-vm7j: SQL Server LIMIT / OFFSET SQL Injection in laravel/framework and illuminate/database (https://github.com/advisories/GHSA-4mg9-vhxq-vm7j)\r\n GHSA-66hf-2p6w-jqfw: Laravel Framework XSS in Blade templating engine (https://github.com/advisories/GHSA-66hf-2p6w-jqfw)\r\n GHSA-w2pm-r78h-4m7v: OS Command Injection in Laravel Framework (https://github.com/advisories/GHSA-w2pm-r78h-4m7v)\r\n GHSA-w68r-5p45-5rqp: Improper Input Validation in Laravel (https://github.com/advisories/GHSA-w68r-5p45-5rqp)\r\n GHSA-x7p5-p2c9-phvg: Unexpected database bindings (https://github.com/advisories/GHSA-x7p5-p2c9-phvg)\r\n```\n", "before_files": [{"content": "# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Ordered packagist version\"\"\"\nimport re\n\n\nclass PackagistVersion:\n \"\"\"\n Follows the packagist version ordering, which is recommended to be semver, but\n not enforced to be semver. The php standard version comparison code written in\n C is located here: https://github.com/php/php-src/blob/master/ext/standard/versioning.c\n\n The function first replaces _, - and + with a dot . in the version strings and\n also inserts dots . before and after any non number so that for example\n '4.3.2RC1' becomes '4.3.2.RC.1'. Then it compares the parts starting from\n left to right.\n\n If a part contains special version strings these are handled in the following order:\n any string not found in this list < dev < alpha = a < beta = b < RC = rc < # < pl = p.\n This way not only versions with different levels like '4.1' and '4.1.2' can be\n compared but also any PHP specific version containing development state.\n\n ---\n\n ## Known differences:\n The following are some examples of known differences between this python\n implementation and the C implementation of PHP\n\n - In this version, special version strings need to exactly match to not be\n considered \"any other string\", while in the original implementation the\n string only need to start with one of the listed strings.\n \"\"\"\n\n version_str: str\n canonicalized_version: str\n\n def __init__(self, version: str):\n self.version_str = version\n self.canonicalized_version = self.php_canonicalize_version(version)\n\n def __str__(self) -> str:\n return self.version_str\n\n def __hash__(self):\n return self.canonicalized_version\n\n def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return NotImplemented\n return self.canonicalized_version == other.canonicalized_version\n\n def __lt__(self, other):\n return self.__cmp__(other) < 0\n\n def __le__(self, other):\n return self.__cmp__(other) <= 0\n\n def __gt__(self, other):\n return self.__cmp__(other) > 0\n\n def __ge__(self, other):\n return self.__cmp__(other) >= 0\n\n def __cmp__(self, other):\n return self.php_version_compare(self.version_str, other.version_str)\n\n @staticmethod\n def php_slices_compare(a_split: [str], b_split: [str]):\n \"\"\"\n Compare php versions after being split by '.'\n \"\"\"\n for a, b in zip(a_split, b_split):\n if a.isdigit() and b.isdigit():\n compare = int(a) - int(b)\n elif not a.isdigit() and not b.isdigit():\n compare = PackagistVersion.compare_special_versions(a, b)\n elif a.isdigit():\n compare = PackagistVersion.compare_special_versions('#', b)\n else:\n compare = PackagistVersion.compare_special_versions(a, '#')\n\n if compare != 0:\n if compare > 0:\n return 1\n else:\n return -1\n\n if len(a_split) > len(b_split):\n next_char = a_split[len(b_split)]\n if next_char.isdigit():\n return 1\n return PackagistVersion.php_slices_compare(a_split[len(b_split):], ['#'])\n\n if len(a_split) < len(b_split):\n next_char = b_split[len(a_split)]\n if next_char.isdigit():\n return -1\n return PackagistVersion.php_slices_compare(['#'], b_split[len(a_split):])\n\n return 0\n\n @staticmethod\n def php_version_compare(version_a: str, version_b: str) -> int:\n \"\"\"\n Given two php versions, compare which is newer\n\n :return: 1 if a > b, -1 if b > a, 0 if a == b\n \"\"\"\n version_a = PackagistVersion.php_canonicalize_version(version_a)\n version_b = PackagistVersion.php_canonicalize_version(version_b)\n\n a_split = version_a.split('.')\n b_split = version_b.split('.')\n return PackagistVersion.php_slices_compare(a_split, b_split)\n\n @staticmethod\n def php_canonicalize_version(version: str) -> str:\n \"\"\"\n Replaces special separators (`-`,`_`,`+`) with `.`, and inserts `.`\n between any digit and non-digit.\n \"\"\"\n replaced = re.sub('[-_+]', '.', version)\n replaced = re.sub(r'([^\\d.])(\\d)', r'\\1.\\2', replaced)\n replaced = re.sub(r'(\\d)([^\\d.])', r'\\1.\\2', replaced)\n return replaced\n\n SPECIAL_CHARACTER_ORDER = {\n \"dev\": 0,\n \"alpha\": 1,\n \"a\": 1,\n \"beta\": 2,\n \"b\": 2,\n \"RC\": 3,\n \"rc\": 3,\n \"#\": 4,\n \"pl\": 5,\n \"p\": 5,\n None: 0,\n }\n\n @staticmethod\n def compare_special_versions(version_part_a: str, version_part_b: str) -> int:\n \"\"\"\n Compares the order of special characters against the order specified in php\n docs.\n\n any string not found in this list < dev < alpha = a < beta = b < RC = rc < # < pl = p.\n\n :return: 1 if a > b, -1 if b > a, 0 if a == b\n \"\"\"\n # This isn't quite the behaviour of the c implementation of php version_compare\n # In php if the part starts with special_chars its enough.\n # ### For example:\n # *PHP implementation:*\n # `1.0.0beta1 = 1.0.0betawithsomeothertext1`\n #\n # *This python implementation*\n # `1.0.0beta1 > 1.0.0betawithsomeothertext1`\n # Because \"any string not found in this list\" should apply to\n # `betawithsomeothertext`\n found_a = PackagistVersion.SPECIAL_CHARACTER_ORDER.get(version_part_a, -1)\n found_b = PackagistVersion.SPECIAL_CHARACTER_ORDER.get(version_part_b, -1)\n\n if found_a > found_b:\n return 1\n if found_a < found_b:\n return -1\n return 0\n", "path": "osv/packagist_version.py"}], "after_files": [{"content": "# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Ordered packagist version\"\"\"\nimport re\n\n\nclass PackagistVersion:\n \"\"\"\n Follows the packagist version ordering, which is recommended to be semver, but\n not enforced to be semver. The php standard version comparison code written in\n C is located here: https://github.com/php/php-src/blob/master/ext/standard/versioning.c\n\n The function first replaces _, - and + with a dot . in the version strings and\n also inserts dots . before and after any non number so that for example\n '4.3.2RC1' becomes '4.3.2.RC.1'. Then it compares the parts starting from\n left to right.\n\n If a part contains special version strings these are handled in the following order:\n any string not found in this list < dev < alpha = a < beta = b < RC = rc < # < pl = p.\n This way not only versions with different levels like '4.1' and '4.1.2' can be\n compared but also any PHP specific version containing development state.\n\n ---\n\n ## Known differences:\n The following are some examples of known differences between this python\n implementation and the C implementation of PHP\n\n - In this version, special version strings need to exactly match to not be\n considered \"any other string\", while in the original implementation the\n string only need to start with one of the listed strings.\n \"\"\"\n\n version_str: str\n canonicalized_version: str\n\n def __init__(self, version: str):\n self.version_str = version\n self.canonicalized_version = self.php_canonicalize_version(version)\n\n def __str__(self) -> str:\n return self.version_str\n\n def __hash__(self):\n return self.canonicalized_version\n\n def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return NotImplemented\n return self.canonicalized_version == other.canonicalized_version\n\n def __lt__(self, other):\n return self.__cmp__(other) < 0\n\n def __le__(self, other):\n return self.__cmp__(other) <= 0\n\n def __gt__(self, other):\n return self.__cmp__(other) > 0\n\n def __ge__(self, other):\n return self.__cmp__(other) >= 0\n\n def __cmp__(self, other):\n return self.php_version_compare(self.version_str, other.version_str)\n\n @staticmethod\n def php_slices_compare(a_split: [str], b_split: [str]):\n \"\"\"\n Compare php versions after being split by '.'\n \"\"\"\n for a, b in zip(a_split, b_split):\n if a.isdigit() and b.isdigit():\n compare = int(a) - int(b)\n elif not a.isdigit() and not b.isdigit():\n compare = PackagistVersion.compare_special_versions(a, b)\n elif a.isdigit():\n compare = PackagistVersion.compare_special_versions('#', b)\n else:\n compare = PackagistVersion.compare_special_versions(a, '#')\n\n if compare != 0:\n if compare > 0:\n return 1\n else:\n return -1\n\n if len(a_split) > len(b_split):\n next_char = a_split[len(b_split)]\n if next_char.isdigit():\n return 1\n return PackagistVersion.php_slices_compare(a_split[len(b_split):], ['#'])\n\n if len(a_split) < len(b_split):\n next_char = b_split[len(a_split)]\n if next_char.isdigit():\n return -1\n return PackagistVersion.php_slices_compare(['#'], b_split[len(a_split):])\n\n return 0\n\n @staticmethod\n def php_version_compare(version_a: str, version_b: str) -> int:\n \"\"\"\n Given two php versions, compare which is newer\n\n :return: 1 if a > b, -1 if b > a, 0 if a == b\n \"\"\"\n version_a = PackagistVersion.php_canonicalize_version(version_a)\n version_b = PackagistVersion.php_canonicalize_version(version_b)\n\n a_split = version_a.split('.')\n b_split = version_b.split('.')\n return PackagistVersion.php_slices_compare(a_split, b_split)\n\n @staticmethod\n def php_canonicalize_version(version: str) -> str:\n \"\"\"\n Replaces special separators (`-`,`_`,`+`) with `.`, and inserts `.`\n between any digit and non-digit.\n \"\"\"\n if version.startswith('v'):\n version = version[1:]\n replaced = re.sub('[-_+]', '.', version)\n replaced = re.sub(r'([^\\d.])(\\d)', r'\\1.\\2', replaced)\n replaced = re.sub(r'(\\d)([^\\d.])', r'\\1.\\2', replaced)\n return replaced\n\n SPECIAL_CHARACTER_ORDER = {\n \"dev\": 0,\n \"alpha\": 1,\n \"a\": 1,\n \"beta\": 2,\n \"b\": 2,\n \"RC\": 3,\n \"rc\": 3,\n \"#\": 4,\n \"pl\": 5,\n \"p\": 5,\n None: 0,\n }\n\n @staticmethod\n def compare_special_versions(version_part_a: str, version_part_b: str) -> int:\n \"\"\"\n Compares the order of special characters against the order specified in php\n docs.\n\n any string not found in this list < dev < alpha = a < beta = b < RC = rc < # < pl = p.\n\n :return: 1 if a > b, -1 if b > a, 0 if a == b\n \"\"\"\n # This isn't quite the behaviour of the c implementation of php version_compare\n # In php if the part starts with special_chars its enough.\n # ### For example:\n # *PHP implementation:*\n # `1.0.0beta1 = 1.0.0betawithsomeothertext1`\n #\n # *This python implementation*\n # `1.0.0beta1 > 1.0.0betawithsomeothertext1`\n # Because \"any string not found in this list\" should apply to\n # `betawithsomeothertext`\n found_a = PackagistVersion.SPECIAL_CHARACTER_ORDER.get(version_part_a, -1)\n found_b = PackagistVersion.SPECIAL_CHARACTER_ORDER.get(version_part_b, -1)\n\n if found_a > found_b:\n return 1\n if found_a < found_b:\n return -1\n return 0\n", "path": "osv/packagist_version.py"}]} | 3,185 | 163 |
gh_patches_debug_35040 | rasdani/github-patches | git_diff | MongoEngine__mongoengine-1541 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs: use vanilla sphinx RTD theme rather than duplicating it in the repo
Currently, [sphinx RTD theme](https://github.com/snide/sphinx_rtd_theme) is copied in the docs directory. Rather than duplicating, it would make sense to include it as a submodule, using latest release ([0.1.9](https://github.com/snide/sphinx_rtd_theme/releases/tag/0.1.9)).
And of course, in the future, try to follow new releases.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/_themes/sphinx_rtd_theme/__init__.py`
Content:
```
1 """Sphinx ReadTheDocs theme.
2
3 From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
4
5 """
6 import os
7
8 VERSION = (0, 1, 5)
9
10 __version__ = ".".join(str(v) for v in VERSION)
11 __version_full__ = __version__
12
13
14 def get_html_theme_path():
15 """Return list of HTML theme paths."""
16 cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
17 return cur_dir
18
```
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # MongoEngine documentation build configuration file, created by
4 # sphinx-quickstart on Sun Nov 22 18:14:13 2009.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # Note that not all possible configuration values are present in this
9 # autogenerated file.
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 import sys, os
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 sys.path.insert(0, os.path.abspath('..'))
20
21 # -- General configuration -----------------------------------------------------
22
23 # Add any Sphinx extension module names here, as strings. They can be extensions
24 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
25 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
26
27 # Add any paths that contain templates here, relative to this directory.
28 templates_path = ['_templates']
29
30 # The suffix of source filenames.
31 source_suffix = '.rst'
32
33 # The encoding of source files.
34 #source_encoding = 'utf-8'
35
36 # The master toctree document.
37 master_doc = 'index'
38
39 # General information about the project.
40 project = u'MongoEngine'
41 copyright = u'2009, MongoEngine Authors'
42
43 # The version info for the project you're documenting, acts as replacement for
44 # |version| and |release|, also used in various other places throughout the
45 # built documents.
46 #
47 import mongoengine
48 # The short X.Y version.
49 version = mongoengine.get_version()
50 # The full version, including alpha/beta/rc tags.
51 release = mongoengine.get_version()
52
53 # The language for content autogenerated by Sphinx. Refer to documentation
54 # for a list of supported languages.
55 #language = None
56
57 # There are two options for replacing |today|: either, you set today to some
58 # non-false value, then it is used:
59 #today = ''
60 # Else, today_fmt is used as the format for a strftime call.
61 #today_fmt = '%B %d, %Y'
62
63 # List of documents that shouldn't be included in the build.
64 #unused_docs = []
65
66 # List of directories, relative to source directory, that shouldn't be searched
67 # for source files.
68 exclude_trees = ['_build']
69
70 # The reST default role (used for this markup: `text`) to use for all documents.
71 #default_role = None
72
73 # If true, '()' will be appended to :func: etc. cross-reference text.
74 #add_function_parentheses = True
75
76 # If true, the current module name will be prepended to all description
77 # unit titles (such as .. function::).
78 #add_module_names = True
79
80 # If true, sectionauthor and moduleauthor directives will be shown in the
81 # output. They are ignored by default.
82 #show_authors = False
83
84 # The name of the Pygments (syntax highlighting) style to use.
85 pygments_style = 'sphinx'
86
87 # A list of ignored prefixes for module index sorting.
88 #modindex_common_prefix = []
89
90
91 # -- Options for HTML output ---------------------------------------------------
92
93 # The theme to use for HTML and HTML Help pages. Major themes that come with
94 # Sphinx are currently 'default' and 'sphinxdoc'.
95 html_theme = 'sphinx_rtd_theme'
96
97 # Theme options are theme-specific and customize the look and feel of a theme
98 # further. For a list of options available for each theme, see the
99 # documentation.
100 #html_theme_options = {}
101
102 # Add any paths that contain custom themes here, relative to this directory.
103 html_theme_path = ['_themes']
104
105 # The name for this set of Sphinx documents. If None, it defaults to
106 # "<project> v<release> documentation".
107 #html_title = None
108
109 # A shorter title for the navigation bar. Default is the same as html_title.
110 #html_short_title = None
111
112 # The name of an image file (relative to this directory) to place at the top
113 # of the sidebar.
114 #html_logo = None
115
116 # The name of an image file (within the static path) to use as favicon of the
117 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
118 # pixels large.
119 html_favicon = "favicon.ico"
120
121 # Add any paths that contain custom static files (such as style sheets) here,
122 # relative to this directory. They are copied after the builtin static files,
123 # so a file named "default.css" will overwrite the builtin "default.css".
124 #html_static_path = ['_static']
125
126 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
127 # using the given strftime format.
128 #html_last_updated_fmt = '%b %d, %Y'
129
130 # If true, SmartyPants will be used to convert quotes and dashes to
131 # typographically correct entities.
132 html_use_smartypants = True
133
134 # Custom sidebar templates, maps document names to template names.
135 html_sidebars = {
136 'index': ['globaltoc.html', 'searchbox.html'],
137 '**': ['localtoc.html', 'relations.html', 'searchbox.html']
138 }
139
140
141 # Additional templates that should be rendered to pages, maps page names to
142 # template names.
143 #html_additional_pages = {}
144
145 # If false, no module index is generated.
146 #html_use_modindex = True
147
148 # If false, no index is generated.
149 #html_use_index = True
150
151 # If true, the index is split into individual pages for each letter.
152 #html_split_index = False
153
154 # If true, links to the reST sources are added to the pages.
155 #html_show_sourcelink = True
156
157 # If true, an OpenSearch description file will be output, and all pages will
158 # contain a <link> tag referring to it. The value of this option must be the
159 # base URL from which the finished HTML is served.
160 #html_use_opensearch = ''
161
162 # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
163 #html_file_suffix = ''
164
165 # Output file base name for HTML help builder.
166 htmlhelp_basename = 'MongoEnginedoc'
167
168
169 # -- Options for LaTeX output --------------------------------------------------
170
171 # The paper size ('letter' or 'a4').
172 latex_paper_size = 'a4'
173
174 # The font size ('10pt', '11pt' or '12pt').
175 #latex_font_size = '10pt'
176
177 # Grouping the document tree into LaTeX files. List of tuples
178 # (source start file, target name, title, author, documentclass [howto/manual]).
179 latex_documents = [
180 ('index', 'MongoEngine.tex', 'MongoEngine Documentation',
181 'Ross Lawley', 'manual'),
182 ]
183
184 # The name of an image file (relative to this directory) to place at the top of
185 # the title page.
186 #latex_logo = None
187
188 # For "manual" documents, if this is true, then toplevel headings are parts,
189 # not chapters.
190 #latex_use_parts = False
191
192 # Additional stuff for the LaTeX preamble.
193 #latex_preamble = ''
194
195 # Documents to append as an appendix to all manuals.
196 #latex_appendices = []
197
198 # If false, no module index is generated.
199 #latex_use_modindex = True
200
201 autoclass_content = 'both'
202
203 html_theme_options = dict(
204 canonical_url='http://docs.mongoengine.org/en/latest/'
205 )
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/_themes/sphinx_rtd_theme/__init__.py b/docs/_themes/sphinx_rtd_theme/__init__.py
deleted file mode 100755
--- a/docs/_themes/sphinx_rtd_theme/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""Sphinx ReadTheDocs theme.
-
-From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
-
-"""
-import os
-
-VERSION = (0, 1, 5)
-
-__version__ = ".".join(str(v) for v in VERSION)
-__version_full__ = __version__
-
-
-def get_html_theme_path():
- """Return list of HTML theme paths."""
- cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
- return cur_dir
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -13,6 +13,10 @@
import sys, os
+import sphinx_rtd_theme
+
+import mongoengine
+
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
@@ -44,7 +48,6 @@
# |version| and |release|, also used in various other places throughout the
# built documents.
#
-import mongoengine
# The short X.Y version.
version = mongoengine.get_version()
# The full version, including alpha/beta/rc tags.
@@ -97,10 +100,12 @@
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
-#html_theme_options = {}
+html_theme_options = {
+ 'canonical_url': 'http://docs.mongoengine.org/en/latest/'
+}
# Add any paths that contain custom themes here, relative to this directory.
-html_theme_path = ['_themes']
+html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
@@ -199,7 +204,3 @@
#latex_use_modindex = True
autoclass_content = 'both'
-
-html_theme_options = dict(
- canonical_url='http://docs.mongoengine.org/en/latest/'
-)
| {"golden_diff": "diff --git a/docs/_themes/sphinx_rtd_theme/__init__.py b/docs/_themes/sphinx_rtd_theme/__init__.py\ndeleted file mode 100755\n--- a/docs/_themes/sphinx_rtd_theme/__init__.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-\"\"\"Sphinx ReadTheDocs theme.\n-\n-From https://github.com/ryan-roemer/sphinx-bootstrap-theme.\n-\n-\"\"\"\n-import os\n-\n-VERSION = (0, 1, 5)\n-\n-__version__ = \".\".join(str(v) for v in VERSION)\n-__version_full__ = __version__\n-\n-\n-def get_html_theme_path():\n- \"\"\"Return list of HTML theme paths.\"\"\"\n- cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n- return cur_dir\ndiff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -13,6 +13,10 @@\n \n import sys, os\n \n+import sphinx_rtd_theme\n+\n+import mongoengine\n+\n # If extensions (or modules to document with autodoc) are in another directory,\n # add these directories to sys.path here. If the directory is relative to the\n # documentation root, use os.path.abspath to make it absolute, like shown here.\n@@ -44,7 +48,6 @@\n # |version| and |release|, also used in various other places throughout the\n # built documents.\n #\n-import mongoengine\n # The short X.Y version.\n version = mongoengine.get_version()\n # The full version, including alpha/beta/rc tags.\n@@ -97,10 +100,12 @@\n # Theme options are theme-specific and customize the look and feel of a theme\n # further. For a list of options available for each theme, see the\n # documentation.\n-#html_theme_options = {}\n+html_theme_options = {\n+ 'canonical_url': 'http://docs.mongoengine.org/en/latest/'\n+}\n \n # Add any paths that contain custom themes here, relative to this directory.\n-html_theme_path = ['_themes']\n+html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n \n # The name for this set of Sphinx documents. If None, it defaults to\n # \"<project> v<release> documentation\".\n@@ -199,7 +204,3 @@\n #latex_use_modindex = True\n \n autoclass_content = 'both'\n-\n-html_theme_options = dict(\n- canonical_url='http://docs.mongoengine.org/en/latest/'\n-)\n", "issue": "Docs: use vanilla sphinx RTD theme rather than duplicating it in the repo\nCurrently, [sphinx RTD theme](https://github.com/snide/sphinx_rtd_theme) is copied in the docs directory. Rather than duplicating, it would make sense to include it as a submodule, using latest release ([0.1.9](https://github.com/snide/sphinx_rtd_theme/releases/tag/0.1.9)).\n\nAnd of course, in the future, try to follow new releases.\n\n", "before_files": [{"content": "\"\"\"Sphinx ReadTheDocs theme.\n\nFrom https://github.com/ryan-roemer/sphinx-bootstrap-theme.\n\n\"\"\"\nimport os\n\nVERSION = (0, 1, 5)\n\n__version__ = \".\".join(str(v) for v in VERSION)\n__version_full__ = __version__\n\n\ndef get_html_theme_path():\n \"\"\"Return list of HTML theme paths.\"\"\"\n cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n return cur_dir\n", "path": "docs/_themes/sphinx_rtd_theme/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# MongoEngine documentation build configuration file, created by\n# sphinx-quickstart on Sun Nov 22 18:14:13 2009.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys, os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- General configuration -----------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'MongoEngine'\ncopyright = u'2009, MongoEngine Authors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nimport mongoengine\n# The short X.Y version.\nversion = mongoengine.get_version()\n# The full version, including alpha/beta/rc tags.\nrelease = mongoengine.get_version()\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['_themes']\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = \"favicon.ico\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n#html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\nhtml_sidebars = {\n 'index': ['globaltoc.html', 'searchbox.html'],\n '**': ['localtoc.html', 'relations.html', 'searchbox.html']\n}\n\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'MongoEnginedoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\nlatex_paper_size = 'a4'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'MongoEngine.tex', 'MongoEngine Documentation',\n 'Ross Lawley', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\nautoclass_content = 'both'\n\nhtml_theme_options = dict(\n canonical_url='http://docs.mongoengine.org/en/latest/'\n)\n", "path": "docs/conf.py"}], "after_files": [{"content": null, "path": "docs/_themes/sphinx_rtd_theme/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# MongoEngine documentation build configuration file, created by\n# sphinx-quickstart on Sun Nov 22 18:14:13 2009.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys, os\n\nimport sphinx_rtd_theme\n\nimport mongoengine\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- General configuration -----------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'MongoEngine'\ncopyright = u'2009, MongoEngine Authors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mongoengine.get_version()\n# The full version, including alpha/beta/rc tags.\nrelease = mongoengine.get_version()\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'canonical_url': 'http://docs.mongoengine.org/en/latest/'\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = \"favicon.ico\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n#html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\nhtml_sidebars = {\n 'index': ['globaltoc.html', 'searchbox.html'],\n '**': ['localtoc.html', 'relations.html', 'searchbox.html']\n}\n\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'MongoEnginedoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\nlatex_paper_size = 'a4'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'MongoEngine.tex', 'MongoEngine Documentation',\n 'Ross Lawley', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\nautoclass_content = 'both'\n", "path": "docs/conf.py"}]} | 2,686 | 562 |
gh_patches_debug_4532 | rasdani/github-patches | git_diff | huggingface__dataset-viewer-2789 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Truncate all the logs
We sometimes have very big logs (one row > 5MB). It's not useful at all and triggers warnings from infra. When we setup the logs configuration, we could try to set a maximum length
https://github.com/huggingface/dataset-viewer/blob/95527c2f1f0b8f077ed9ec74d3c75e45dbc1d00a/libs/libcommon/src/libcommon/log.py#L7-L9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libs/libcommon/src/libcommon/log.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 # Copyright 2022 The HuggingFace Authors.
3
4 import logging
5
6
7 def init_logging(level: int = logging.INFO) -> None:
8 logging.basicConfig(level=level, format="%(levelname)s: %(asctime)s - %(name)s - %(message)s")
9 logging.debug(f"Log level set to: {logging.getLevelName(logging.getLogger().getEffectiveLevel())}")
10
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libs/libcommon/src/libcommon/log.py b/libs/libcommon/src/libcommon/log.py
--- a/libs/libcommon/src/libcommon/log.py
+++ b/libs/libcommon/src/libcommon/log.py
@@ -5,5 +5,5 @@
def init_logging(level: int = logging.INFO) -> None:
- logging.basicConfig(level=level, format="%(levelname)s: %(asctime)s - %(name)s - %(message)s")
+ logging.basicConfig(level=level, format="%(levelname)s: %(asctime)s - %(name)s - %(message).5000s")
logging.debug(f"Log level set to: {logging.getLevelName(logging.getLogger().getEffectiveLevel())}")
| {"golden_diff": "diff --git a/libs/libcommon/src/libcommon/log.py b/libs/libcommon/src/libcommon/log.py\n--- a/libs/libcommon/src/libcommon/log.py\n+++ b/libs/libcommon/src/libcommon/log.py\n@@ -5,5 +5,5 @@\n \n \n def init_logging(level: int = logging.INFO) -> None:\n- logging.basicConfig(level=level, format=\"%(levelname)s: %(asctime)s - %(name)s - %(message)s\")\n+ logging.basicConfig(level=level, format=\"%(levelname)s: %(asctime)s - %(name)s - %(message).5000s\")\n logging.debug(f\"Log level set to: {logging.getLevelName(logging.getLogger().getEffectiveLevel())}\")\n", "issue": "Truncate all the logs\nWe sometimes have very big logs (one row > 5MB). It's not useful at all and triggers warnings from infra. When we setup the logs configuration, we could try to set a maximum length\r\n\r\nhttps://github.com/huggingface/dataset-viewer/blob/95527c2f1f0b8f077ed9ec74d3c75e45dbc1d00a/libs/libcommon/src/libcommon/log.py#L7-L9\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nimport logging\n\n\ndef init_logging(level: int = logging.INFO) -> None:\n logging.basicConfig(level=level, format=\"%(levelname)s: %(asctime)s - %(name)s - %(message)s\")\n logging.debug(f\"Log level set to: {logging.getLevelName(logging.getLogger().getEffectiveLevel())}\")\n", "path": "libs/libcommon/src/libcommon/log.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nimport logging\n\n\ndef init_logging(level: int = logging.INFO) -> None:\n logging.basicConfig(level=level, format=\"%(levelname)s: %(asctime)s - %(name)s - %(message).5000s\")\n logging.debug(f\"Log level set to: {logging.getLevelName(logging.getLogger().getEffectiveLevel())}\")\n", "path": "libs/libcommon/src/libcommon/log.py"}]} | 477 | 148 |
gh_patches_debug_3428 | rasdani/github-patches | git_diff | iterative__dvc-6074 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
stage add: quotes are missing in dvc.yaml
# Bug Report
## Description
Quotes in `dvc stage add` aren't added to dvc.yaml as expected. The following `dvc repro` fails as python script expects to have an empty string provided.
### Reproduce
1. mkdir dvc-bug-empty-string-arg
2. cd dvc-bug-empty-string-arg
3. dvc init --no-scm
4. dvc stage add -n train python train.py --weights ""
5. dvc stage add -n train-second python train.py --weights '""'
6. cat dvc.yaml
output:
```
stages:
train:
cmd: 'python train.py --weights '
train-second:
cmd: python train.py --weights ""
```
### Expected
Expected to have quotes in the first stage, have two quotes in the second one.
### Environment information
**Output of `dvc doctor`:**
```console
$ dvc doctor
DVC version: 2.1.0 (brew)
---------------------------------
Platform: Python 3.9.5 on macOS-10.16-x86_64-i386-64bit
Supports: azure, gdrive, gs, http, https, s3, ssh, oss, webdav, webdavs
Cache types: <https://error.dvc.org/no-dvc-cache>
Caches: local
Remotes: None
Workspace directory: apfs on /dev/disk1s5s1
Repo: dvc (no_scm)
```
**Additional Information (if any):**
Rechecked this in bash and zsh.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/command/stage.py`
Content:
```
1 import argparse
2 import logging
3 from itertools import chain, filterfalse
4 from typing import TYPE_CHECKING, Dict, Iterable, List
5
6 from dvc.command import completion
7 from dvc.command.base import CmdBase, append_doc_link, fix_subparsers
8 from dvc.utils.cli_parse import parse_params
9 from dvc.utils.humanize import truncate_text
10
11 if TYPE_CHECKING:
12 from dvc.output import Output
13 from dvc.stage import Stage
14
15 logger = logging.getLogger(__name__)
16
17 MAX_TEXT_LENGTH = 80
18
19
20 def generate_description(stage: "Stage") -> str:
21 def part_desc(outs: Iterable["Output"]) -> str:
22 return ", ".join(out.def_path for out in outs)
23
24 if not stage.deps and not stage.outs:
25 return "No outputs or dependencies"
26
27 if not stage.outs and stage.deps:
28 return "Depends on " + part_desc(stage.deps)
29
30 def is_plot_or_metric(out: "Output"):
31 return bool(out.plot) or bool(out.metric)
32
33 desc: List[str] = []
34
35 outs = list(filterfalse(is_plot_or_metric, stage.outs))
36 if outs:
37 desc.append("Outputs " + part_desc(outs))
38
39 plots_and_metrics = list(filter(is_plot_or_metric, stage.outs))
40 if plots_and_metrics:
41 desc.append("Reports " + part_desc(plots_and_metrics))
42
43 return "; ".join(desc)
44
45
46 def prepare_description(
47 stage: "Stage", max_length: int = MAX_TEXT_LENGTH
48 ) -> str:
49 desc = stage.short_description() or generate_description(stage)
50 return truncate_text(desc, max_length)
51
52
53 def prepare_stages_data(
54 stages: Iterable["Stage"],
55 description: bool = True,
56 max_length: int = MAX_TEXT_LENGTH,
57 ) -> Dict[str, str]:
58 return {
59 stage.addressing: prepare_description(stage, max_length=max_length)
60 if description
61 else ""
62 for stage in stages
63 }
64
65
66 class CmdStageList(CmdBase):
67 def _get_stages(self) -> Iterable["Stage"]:
68 if self.args.all:
69 stages: List["Stage"] = self.repo.stages # type: ignore
70 logger.trace( # type: ignore[attr-defined]
71 "%d no. of stages found", len(stages)
72 )
73 return stages
74
75 # removing duplicates while maintaining order
76 collected = chain.from_iterable(
77 self.repo.stage.collect(
78 target=target, recursive=self.args.recursive, accept_group=True
79 )
80 for target in self.args.targets
81 )
82 return dict.fromkeys(collected).keys()
83
84 def run(self):
85 from dvc.ui import ui
86
87 def log_error(relpath: str, exc: Exception):
88 if self.args.fail:
89 raise exc
90 logger.debug("Stages from %s failed to load", relpath)
91
92 # silence stage collection error by default
93 self.repo.stage_collection_error_handler = log_error
94
95 stages = self._get_stages()
96 names_only = self.args.names_only
97
98 data = prepare_stages_data(stages, description=not names_only)
99 ui.table(data.items())
100
101 return 0
102
103
104 def parse_cmd(commands: List[str]) -> str:
105 """
106 We need to take into account two cases:
107
108 - ['python code.py foo bar']: Used mainly with dvc as a library
109 - ['echo', 'foo bar']: List of arguments received from the CLI
110
111 The second case would need quoting, as it was passed through:
112 dvc run echo "foo bar"
113 """
114
115 def quote_argument(arg: str):
116 should_quote = " " in arg and '"' not in arg
117 return f'"{arg}"' if should_quote else arg
118
119 if len(commands) < 2:
120 return " ".join(commands)
121 return " ".join(map(quote_argument, commands))
122
123
124 class CmdStageAdd(CmdBase):
125 def run(self):
126 kwargs = vars(self.args)
127 kwargs.update(
128 {
129 "cmd": parse_cmd(kwargs.pop("cmd")),
130 "params": parse_params(self.args.params),
131 }
132 )
133 self.repo.stage.add(**kwargs)
134 return 0
135
136
137 def _add_common_args(parser):
138 parser.add_argument(
139 "-d",
140 "--deps",
141 action="append",
142 default=[],
143 help="Declare dependencies for reproducible cmd.",
144 metavar="<path>",
145 ).complete = completion.FILE
146 parser.add_argument(
147 "-o",
148 "--outs",
149 action="append",
150 default=[],
151 help="Declare output file or directory.",
152 metavar="<filename>",
153 ).complete = completion.FILE
154 parser.add_argument(
155 "-O",
156 "--outs-no-cache",
157 action="append",
158 default=[],
159 help="Declare output file or directory "
160 "(do not put into DVC cache).",
161 metavar="<filename>",
162 ).complete = completion.FILE
163 parser.add_argument(
164 "-p",
165 "--params",
166 action="append",
167 default=[],
168 help="Declare parameter to use as additional dependency.",
169 metavar="[<filename>:]<params_list>",
170 ).complete = completion.FILE
171 parser.add_argument(
172 "-m",
173 "--metrics",
174 action="append",
175 default=[],
176 help="Declare output metrics file.",
177 metavar="<path>",
178 )
179 parser.add_argument(
180 "-M",
181 "--metrics-no-cache",
182 action="append",
183 default=[],
184 help="Declare output metrics file (do not put into DVC cache).",
185 metavar="<path>",
186 )
187 parser.add_argument(
188 "--plots",
189 action="append",
190 default=[],
191 help="Declare output plot file.",
192 metavar="<path>",
193 )
194 parser.add_argument(
195 "--plots-no-cache",
196 action="append",
197 default=[],
198 help="Declare output plot file (do not put into DVC cache).",
199 metavar="<path>",
200 )
201 parser.add_argument(
202 "--live", help="Declare output as dvclive.", metavar="<path>"
203 )
204 parser.add_argument(
205 "--live-no-cache",
206 help="Declare output as dvclive (do not put into DVC cache).",
207 metavar="<path>",
208 )
209 parser.add_argument(
210 "--live-no-summary",
211 action="store_true",
212 default=False,
213 help="Signal dvclive logger to not dump latest metrics file.",
214 )
215 parser.add_argument(
216 "--live-no-html",
217 action="store_true",
218 default=False,
219 help="Signal dvclive logger to not produce training report.",
220 )
221 parser.add_argument(
222 "-w",
223 "--wdir",
224 help="Directory within your repo to run your command in.",
225 metavar="<path>",
226 )
227 parser.add_argument(
228 "-f",
229 "--force",
230 action="store_true",
231 default=False,
232 help="Overwrite existing stage",
233 )
234 parser.add_argument(
235 "--outs-persist",
236 action="append",
237 default=[],
238 help="Declare output file or directory that will not be "
239 "removed upon repro.",
240 metavar="<filename>",
241 )
242 parser.add_argument(
243 "--outs-persist-no-cache",
244 action="append",
245 default=[],
246 help="Declare output file or directory that will not be "
247 "removed upon repro (do not put into DVC cache).",
248 metavar="<filename>",
249 )
250 parser.add_argument(
251 "-c",
252 "--checkpoints",
253 action="append",
254 default=[],
255 help="Declare checkpoint output file or directory for 'dvc exp run'. "
256 "Not compatible with 'dvc repro'.",
257 metavar="<filename>",
258 ).complete = completion.FILE
259 parser.add_argument(
260 "--always-changed",
261 action="store_true",
262 default=False,
263 help="Always consider this DVC-file as changed.",
264 )
265 parser.add_argument(
266 "--external",
267 action="store_true",
268 default=False,
269 help="Allow outputs that are outside of the DVC repository.",
270 )
271 parser.add_argument(
272 "--desc",
273 type=str,
274 metavar="<text>",
275 help=(
276 "User description of the stage (optional). "
277 "This doesn't affect any DVC operations."
278 ),
279 )
280 parser.add_argument(
281 "cmd",
282 nargs=argparse.REMAINDER,
283 help="Command to execute.",
284 metavar="command",
285 )
286
287
288 def add_parser(subparsers, parent_parser):
289 STAGES_HELP = "Commands to list and create stages."
290
291 stage_parser = subparsers.add_parser(
292 "stage",
293 parents=[parent_parser],
294 description=append_doc_link(STAGES_HELP, "stage"),
295 help=STAGES_HELP,
296 formatter_class=argparse.RawDescriptionHelpFormatter,
297 )
298
299 stage_subparsers = stage_parser.add_subparsers(
300 dest="cmd",
301 help="Use `dvc stage CMD --help` to display command-specific help.",
302 )
303
304 fix_subparsers(stage_subparsers)
305
306 STAGE_ADD_HELP = "Create stage"
307 stage_add_parser = stage_subparsers.add_parser(
308 "add",
309 parents=[parent_parser],
310 description=append_doc_link(STAGE_ADD_HELP, "stage/add"),
311 help=STAGE_ADD_HELP,
312 formatter_class=argparse.RawDescriptionHelpFormatter,
313 )
314 stage_add_parser.add_argument(
315 "-n", "--name", help="Name of the stage to add", required=True
316 )
317 _add_common_args(stage_add_parser)
318 stage_add_parser.set_defaults(func=CmdStageAdd)
319
320 STAGE_LIST_HELP = "List stages."
321 stage_list_parser = stage_subparsers.add_parser(
322 "list",
323 parents=[parent_parser],
324 description=append_doc_link(STAGE_LIST_HELP, "stage/list"),
325 help=STAGE_LIST_HELP,
326 formatter_class=argparse.RawDescriptionHelpFormatter,
327 )
328 stage_list_parser.add_argument(
329 "targets",
330 nargs="*",
331 default=["dvc.yaml"],
332 help=(
333 "Show stages from a dvc.yaml/.dvc file or a directory. "
334 "'dvc.yaml' by default"
335 ),
336 )
337 stage_list_parser.add_argument(
338 "--all",
339 action="store_true",
340 default=False,
341 help="List all of the stages in the repo.",
342 )
343 stage_list_parser.add_argument(
344 "--fail",
345 action="store_true",
346 default=False,
347 help="Fail immediately, do not suppress any syntax errors.",
348 )
349 stage_list_parser.add_argument(
350 "-R",
351 "--recursive",
352 action="store_true",
353 default=False,
354 help="List all stages inside the specified directory.",
355 )
356 stage_list_parser.add_argument(
357 "--names-only",
358 action="store_true",
359 default=False,
360 help="List only stage names.",
361 )
362 stage_list_parser.set_defaults(func=CmdStageList)
363
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/command/stage.py b/dvc/command/stage.py
--- a/dvc/command/stage.py
+++ b/dvc/command/stage.py
@@ -113,8 +113,11 @@
"""
def quote_argument(arg: str):
- should_quote = " " in arg and '"' not in arg
- return f'"{arg}"' if should_quote else arg
+ if not arg:
+ return '""'
+ if " " in arg and '"' not in arg:
+ return f'"{arg}"'
+ return arg
if len(commands) < 2:
return " ".join(commands)
| {"golden_diff": "diff --git a/dvc/command/stage.py b/dvc/command/stage.py\n--- a/dvc/command/stage.py\n+++ b/dvc/command/stage.py\n@@ -113,8 +113,11 @@\n \"\"\"\n \n def quote_argument(arg: str):\n- should_quote = \" \" in arg and '\"' not in arg\n- return f'\"{arg}\"' if should_quote else arg\n+ if not arg:\n+ return '\"\"'\n+ if \" \" in arg and '\"' not in arg:\n+ return f'\"{arg}\"'\n+ return arg\n \n if len(commands) < 2:\n return \" \".join(commands)\n", "issue": "stage add: quotes are missing in dvc.yaml\n# Bug Report\r\n\r\n## Description\r\n\r\nQuotes in `dvc stage add` aren't added to dvc.yaml as expected. The following `dvc repro` fails as python script expects to have an empty string provided.\r\n\r\n### Reproduce\r\n\r\n1. mkdir dvc-bug-empty-string-arg\r\n2. cd dvc-bug-empty-string-arg\r\n3. dvc init --no-scm\r\n4. dvc stage add -n train python train.py --weights \"\"\r\n5. dvc stage add -n train-second python train.py --weights '\"\"'\r\n6. cat dvc.yaml\r\n\r\noutput:\r\n```\r\nstages:\r\n train:\r\n cmd: 'python train.py --weights '\r\n train-second:\r\n cmd: python train.py --weights \"\"\r\n```\r\n\r\n### Expected\r\n\r\nExpected to have quotes in the first stage, have two quotes in the second one.\r\n\r\n### Environment information\r\n\r\n**Output of `dvc doctor`:**\r\n\r\n```console\r\n$ dvc doctor\r\nDVC version: 2.1.0 (brew)\r\n---------------------------------\r\nPlatform: Python 3.9.5 on macOS-10.16-x86_64-i386-64bit\r\nSupports: azure, gdrive, gs, http, https, s3, ssh, oss, webdav, webdavs\r\nCache types: <https://error.dvc.org/no-dvc-cache>\r\nCaches: local\r\nRemotes: None\r\nWorkspace directory: apfs on /dev/disk1s5s1\r\nRepo: dvc (no_scm)\r\n```\r\n\r\n**Additional Information (if any):**\r\n\r\nRechecked this in bash and zsh.\n", "before_files": [{"content": "import argparse\nimport logging\nfrom itertools import chain, filterfalse\nfrom typing import TYPE_CHECKING, Dict, Iterable, List\n\nfrom dvc.command import completion\nfrom dvc.command.base import CmdBase, append_doc_link, fix_subparsers\nfrom dvc.utils.cli_parse import parse_params\nfrom dvc.utils.humanize import truncate_text\n\nif TYPE_CHECKING:\n from dvc.output import Output\n from dvc.stage import Stage\n\nlogger = logging.getLogger(__name__)\n\nMAX_TEXT_LENGTH = 80\n\n\ndef generate_description(stage: \"Stage\") -> str:\n def part_desc(outs: Iterable[\"Output\"]) -> str:\n return \", \".join(out.def_path for out in outs)\n\n if not stage.deps and not stage.outs:\n return \"No outputs or dependencies\"\n\n if not stage.outs and stage.deps:\n return \"Depends on \" + part_desc(stage.deps)\n\n def is_plot_or_metric(out: \"Output\"):\n return bool(out.plot) or bool(out.metric)\n\n desc: List[str] = []\n\n outs = list(filterfalse(is_plot_or_metric, stage.outs))\n if outs:\n desc.append(\"Outputs \" + part_desc(outs))\n\n plots_and_metrics = list(filter(is_plot_or_metric, stage.outs))\n if plots_and_metrics:\n desc.append(\"Reports \" + part_desc(plots_and_metrics))\n\n return \"; \".join(desc)\n\n\ndef prepare_description(\n stage: \"Stage\", max_length: int = MAX_TEXT_LENGTH\n) -> str:\n desc = stage.short_description() or generate_description(stage)\n return truncate_text(desc, max_length)\n\n\ndef prepare_stages_data(\n stages: Iterable[\"Stage\"],\n description: bool = True,\n max_length: int = MAX_TEXT_LENGTH,\n) -> Dict[str, str]:\n return {\n stage.addressing: prepare_description(stage, max_length=max_length)\n if description\n else \"\"\n for stage in stages\n }\n\n\nclass CmdStageList(CmdBase):\n def _get_stages(self) -> Iterable[\"Stage\"]:\n if self.args.all:\n stages: List[\"Stage\"] = self.repo.stages # type: ignore\n logger.trace( # type: ignore[attr-defined]\n \"%d no. of stages found\", len(stages)\n )\n return stages\n\n # removing duplicates while maintaining order\n collected = chain.from_iterable(\n self.repo.stage.collect(\n target=target, recursive=self.args.recursive, accept_group=True\n )\n for target in self.args.targets\n )\n return dict.fromkeys(collected).keys()\n\n def run(self):\n from dvc.ui import ui\n\n def log_error(relpath: str, exc: Exception):\n if self.args.fail:\n raise exc\n logger.debug(\"Stages from %s failed to load\", relpath)\n\n # silence stage collection error by default\n self.repo.stage_collection_error_handler = log_error\n\n stages = self._get_stages()\n names_only = self.args.names_only\n\n data = prepare_stages_data(stages, description=not names_only)\n ui.table(data.items())\n\n return 0\n\n\ndef parse_cmd(commands: List[str]) -> str:\n \"\"\"\n We need to take into account two cases:\n\n - ['python code.py foo bar']: Used mainly with dvc as a library\n - ['echo', 'foo bar']: List of arguments received from the CLI\n\n The second case would need quoting, as it was passed through:\n dvc run echo \"foo bar\"\n \"\"\"\n\n def quote_argument(arg: str):\n should_quote = \" \" in arg and '\"' not in arg\n return f'\"{arg}\"' if should_quote else arg\n\n if len(commands) < 2:\n return \" \".join(commands)\n return \" \".join(map(quote_argument, commands))\n\n\nclass CmdStageAdd(CmdBase):\n def run(self):\n kwargs = vars(self.args)\n kwargs.update(\n {\n \"cmd\": parse_cmd(kwargs.pop(\"cmd\")),\n \"params\": parse_params(self.args.params),\n }\n )\n self.repo.stage.add(**kwargs)\n return 0\n\n\ndef _add_common_args(parser):\n parser.add_argument(\n \"-d\",\n \"--deps\",\n action=\"append\",\n default=[],\n help=\"Declare dependencies for reproducible cmd.\",\n metavar=\"<path>\",\n ).complete = completion.FILE\n parser.add_argument(\n \"-o\",\n \"--outs\",\n action=\"append\",\n default=[],\n help=\"Declare output file or directory.\",\n metavar=\"<filename>\",\n ).complete = completion.FILE\n parser.add_argument(\n \"-O\",\n \"--outs-no-cache\",\n action=\"append\",\n default=[],\n help=\"Declare output file or directory \"\n \"(do not put into DVC cache).\",\n metavar=\"<filename>\",\n ).complete = completion.FILE\n parser.add_argument(\n \"-p\",\n \"--params\",\n action=\"append\",\n default=[],\n help=\"Declare parameter to use as additional dependency.\",\n metavar=\"[<filename>:]<params_list>\",\n ).complete = completion.FILE\n parser.add_argument(\n \"-m\",\n \"--metrics\",\n action=\"append\",\n default=[],\n help=\"Declare output metrics file.\",\n metavar=\"<path>\",\n )\n parser.add_argument(\n \"-M\",\n \"--metrics-no-cache\",\n action=\"append\",\n default=[],\n help=\"Declare output metrics file (do not put into DVC cache).\",\n metavar=\"<path>\",\n )\n parser.add_argument(\n \"--plots\",\n action=\"append\",\n default=[],\n help=\"Declare output plot file.\",\n metavar=\"<path>\",\n )\n parser.add_argument(\n \"--plots-no-cache\",\n action=\"append\",\n default=[],\n help=\"Declare output plot file (do not put into DVC cache).\",\n metavar=\"<path>\",\n )\n parser.add_argument(\n \"--live\", help=\"Declare output as dvclive.\", metavar=\"<path>\"\n )\n parser.add_argument(\n \"--live-no-cache\",\n help=\"Declare output as dvclive (do not put into DVC cache).\",\n metavar=\"<path>\",\n )\n parser.add_argument(\n \"--live-no-summary\",\n action=\"store_true\",\n default=False,\n help=\"Signal dvclive logger to not dump latest metrics file.\",\n )\n parser.add_argument(\n \"--live-no-html\",\n action=\"store_true\",\n default=False,\n help=\"Signal dvclive logger to not produce training report.\",\n )\n parser.add_argument(\n \"-w\",\n \"--wdir\",\n help=\"Directory within your repo to run your command in.\",\n metavar=\"<path>\",\n )\n parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Overwrite existing stage\",\n )\n parser.add_argument(\n \"--outs-persist\",\n action=\"append\",\n default=[],\n help=\"Declare output file or directory that will not be \"\n \"removed upon repro.\",\n metavar=\"<filename>\",\n )\n parser.add_argument(\n \"--outs-persist-no-cache\",\n action=\"append\",\n default=[],\n help=\"Declare output file or directory that will not be \"\n \"removed upon repro (do not put into DVC cache).\",\n metavar=\"<filename>\",\n )\n parser.add_argument(\n \"-c\",\n \"--checkpoints\",\n action=\"append\",\n default=[],\n help=\"Declare checkpoint output file or directory for 'dvc exp run'. \"\n \"Not compatible with 'dvc repro'.\",\n metavar=\"<filename>\",\n ).complete = completion.FILE\n parser.add_argument(\n \"--always-changed\",\n action=\"store_true\",\n default=False,\n help=\"Always consider this DVC-file as changed.\",\n )\n parser.add_argument(\n \"--external\",\n action=\"store_true\",\n default=False,\n help=\"Allow outputs that are outside of the DVC repository.\",\n )\n parser.add_argument(\n \"--desc\",\n type=str,\n metavar=\"<text>\",\n help=(\n \"User description of the stage (optional). \"\n \"This doesn't affect any DVC operations.\"\n ),\n )\n parser.add_argument(\n \"cmd\",\n nargs=argparse.REMAINDER,\n help=\"Command to execute.\",\n metavar=\"command\",\n )\n\n\ndef add_parser(subparsers, parent_parser):\n STAGES_HELP = \"Commands to list and create stages.\"\n\n stage_parser = subparsers.add_parser(\n \"stage\",\n parents=[parent_parser],\n description=append_doc_link(STAGES_HELP, \"stage\"),\n help=STAGES_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n stage_subparsers = stage_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use `dvc stage CMD --help` to display command-specific help.\",\n )\n\n fix_subparsers(stage_subparsers)\n\n STAGE_ADD_HELP = \"Create stage\"\n stage_add_parser = stage_subparsers.add_parser(\n \"add\",\n parents=[parent_parser],\n description=append_doc_link(STAGE_ADD_HELP, \"stage/add\"),\n help=STAGE_ADD_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n stage_add_parser.add_argument(\n \"-n\", \"--name\", help=\"Name of the stage to add\", required=True\n )\n _add_common_args(stage_add_parser)\n stage_add_parser.set_defaults(func=CmdStageAdd)\n\n STAGE_LIST_HELP = \"List stages.\"\n stage_list_parser = stage_subparsers.add_parser(\n \"list\",\n parents=[parent_parser],\n description=append_doc_link(STAGE_LIST_HELP, \"stage/list\"),\n help=STAGE_LIST_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n stage_list_parser.add_argument(\n \"targets\",\n nargs=\"*\",\n default=[\"dvc.yaml\"],\n help=(\n \"Show stages from a dvc.yaml/.dvc file or a directory. \"\n \"'dvc.yaml' by default\"\n ),\n )\n stage_list_parser.add_argument(\n \"--all\",\n action=\"store_true\",\n default=False,\n help=\"List all of the stages in the repo.\",\n )\n stage_list_parser.add_argument(\n \"--fail\",\n action=\"store_true\",\n default=False,\n help=\"Fail immediately, do not suppress any syntax errors.\",\n )\n stage_list_parser.add_argument(\n \"-R\",\n \"--recursive\",\n action=\"store_true\",\n default=False,\n help=\"List all stages inside the specified directory.\",\n )\n stage_list_parser.add_argument(\n \"--names-only\",\n action=\"store_true\",\n default=False,\n help=\"List only stage names.\",\n )\n stage_list_parser.set_defaults(func=CmdStageList)\n", "path": "dvc/command/stage.py"}], "after_files": [{"content": "import argparse\nimport logging\nfrom itertools import chain, filterfalse\nfrom typing import TYPE_CHECKING, Dict, Iterable, List\n\nfrom dvc.command import completion\nfrom dvc.command.base import CmdBase, append_doc_link, fix_subparsers\nfrom dvc.utils.cli_parse import parse_params\nfrom dvc.utils.humanize import truncate_text\n\nif TYPE_CHECKING:\n from dvc.output import Output\n from dvc.stage import Stage\n\nlogger = logging.getLogger(__name__)\n\nMAX_TEXT_LENGTH = 80\n\n\ndef generate_description(stage: \"Stage\") -> str:\n def part_desc(outs: Iterable[\"Output\"]) -> str:\n return \", \".join(out.def_path for out in outs)\n\n if not stage.deps and not stage.outs:\n return \"No outputs or dependencies\"\n\n if not stage.outs and stage.deps:\n return \"Depends on \" + part_desc(stage.deps)\n\n def is_plot_or_metric(out: \"Output\"):\n return bool(out.plot) or bool(out.metric)\n\n desc: List[str] = []\n\n outs = list(filterfalse(is_plot_or_metric, stage.outs))\n if outs:\n desc.append(\"Outputs \" + part_desc(outs))\n\n plots_and_metrics = list(filter(is_plot_or_metric, stage.outs))\n if plots_and_metrics:\n desc.append(\"Reports \" + part_desc(plots_and_metrics))\n\n return \"; \".join(desc)\n\n\ndef prepare_description(\n stage: \"Stage\", max_length: int = MAX_TEXT_LENGTH\n) -> str:\n desc = stage.short_description() or generate_description(stage)\n return truncate_text(desc, max_length)\n\n\ndef prepare_stages_data(\n stages: Iterable[\"Stage\"],\n description: bool = True,\n max_length: int = MAX_TEXT_LENGTH,\n) -> Dict[str, str]:\n return {\n stage.addressing: prepare_description(stage, max_length=max_length)\n if description\n else \"\"\n for stage in stages\n }\n\n\nclass CmdStageList(CmdBase):\n def _get_stages(self) -> Iterable[\"Stage\"]:\n if self.args.all:\n stages: List[\"Stage\"] = self.repo.stages # type: ignore\n logger.trace( # type: ignore[attr-defined]\n \"%d no. of stages found\", len(stages)\n )\n return stages\n\n # removing duplicates while maintaining order\n collected = chain.from_iterable(\n self.repo.stage.collect(\n target=target, recursive=self.args.recursive, accept_group=True\n )\n for target in self.args.targets\n )\n return dict.fromkeys(collected).keys()\n\n def run(self):\n from dvc.ui import ui\n\n def log_error(relpath: str, exc: Exception):\n if self.args.fail:\n raise exc\n logger.debug(\"Stages from %s failed to load\", relpath)\n\n # silence stage collection error by default\n self.repo.stage_collection_error_handler = log_error\n\n stages = self._get_stages()\n names_only = self.args.names_only\n\n data = prepare_stages_data(stages, description=not names_only)\n ui.table(data.items())\n\n return 0\n\n\ndef parse_cmd(commands: List[str]) -> str:\n \"\"\"\n We need to take into account two cases:\n\n - ['python code.py foo bar']: Used mainly with dvc as a library\n - ['echo', 'foo bar']: List of arguments received from the CLI\n\n The second case would need quoting, as it was passed through:\n dvc run echo \"foo bar\"\n \"\"\"\n\n def quote_argument(arg: str):\n if not arg:\n return '\"\"'\n if \" \" in arg and '\"' not in arg:\n return f'\"{arg}\"'\n return arg\n\n if len(commands) < 2:\n return \" \".join(commands)\n return \" \".join(map(quote_argument, commands))\n\n\nclass CmdStageAdd(CmdBase):\n def run(self):\n kwargs = vars(self.args)\n kwargs.update(\n {\n \"cmd\": parse_cmd(kwargs.pop(\"cmd\")),\n \"params\": parse_params(self.args.params),\n }\n )\n self.repo.stage.add(**kwargs)\n return 0\n\n\ndef _add_common_args(parser):\n parser.add_argument(\n \"-d\",\n \"--deps\",\n action=\"append\",\n default=[],\n help=\"Declare dependencies for reproducible cmd.\",\n metavar=\"<path>\",\n ).complete = completion.FILE\n parser.add_argument(\n \"-o\",\n \"--outs\",\n action=\"append\",\n default=[],\n help=\"Declare output file or directory.\",\n metavar=\"<filename>\",\n ).complete = completion.FILE\n parser.add_argument(\n \"-O\",\n \"--outs-no-cache\",\n action=\"append\",\n default=[],\n help=\"Declare output file or directory \"\n \"(do not put into DVC cache).\",\n metavar=\"<filename>\",\n ).complete = completion.FILE\n parser.add_argument(\n \"-p\",\n \"--params\",\n action=\"append\",\n default=[],\n help=\"Declare parameter to use as additional dependency.\",\n metavar=\"[<filename>:]<params_list>\",\n ).complete = completion.FILE\n parser.add_argument(\n \"-m\",\n \"--metrics\",\n action=\"append\",\n default=[],\n help=\"Declare output metrics file.\",\n metavar=\"<path>\",\n )\n parser.add_argument(\n \"-M\",\n \"--metrics-no-cache\",\n action=\"append\",\n default=[],\n help=\"Declare output metrics file (do not put into DVC cache).\",\n metavar=\"<path>\",\n )\n parser.add_argument(\n \"--plots\",\n action=\"append\",\n default=[],\n help=\"Declare output plot file.\",\n metavar=\"<path>\",\n )\n parser.add_argument(\n \"--plots-no-cache\",\n action=\"append\",\n default=[],\n help=\"Declare output plot file (do not put into DVC cache).\",\n metavar=\"<path>\",\n )\n parser.add_argument(\n \"--live\", help=\"Declare output as dvclive.\", metavar=\"<path>\"\n )\n parser.add_argument(\n \"--live-no-cache\",\n help=\"Declare output as dvclive (do not put into DVC cache).\",\n metavar=\"<path>\",\n )\n parser.add_argument(\n \"--live-no-summary\",\n action=\"store_true\",\n default=False,\n help=\"Signal dvclive logger to not dump latest metrics file.\",\n )\n parser.add_argument(\n \"--live-no-html\",\n action=\"store_true\",\n default=False,\n help=\"Signal dvclive logger to not produce training report.\",\n )\n parser.add_argument(\n \"-w\",\n \"--wdir\",\n help=\"Directory within your repo to run your command in.\",\n metavar=\"<path>\",\n )\n parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Overwrite existing stage\",\n )\n parser.add_argument(\n \"--outs-persist\",\n action=\"append\",\n default=[],\n help=\"Declare output file or directory that will not be \"\n \"removed upon repro.\",\n metavar=\"<filename>\",\n )\n parser.add_argument(\n \"--outs-persist-no-cache\",\n action=\"append\",\n default=[],\n help=\"Declare output file or directory that will not be \"\n \"removed upon repro (do not put into DVC cache).\",\n metavar=\"<filename>\",\n )\n parser.add_argument(\n \"-c\",\n \"--checkpoints\",\n action=\"append\",\n default=[],\n help=\"Declare checkpoint output file or directory for 'dvc exp run'. \"\n \"Not compatible with 'dvc repro'.\",\n metavar=\"<filename>\",\n ).complete = completion.FILE\n parser.add_argument(\n \"--always-changed\",\n action=\"store_true\",\n default=False,\n help=\"Always consider this DVC-file as changed.\",\n )\n parser.add_argument(\n \"--external\",\n action=\"store_true\",\n default=False,\n help=\"Allow outputs that are outside of the DVC repository.\",\n )\n parser.add_argument(\n \"--desc\",\n type=str,\n metavar=\"<text>\",\n help=(\n \"User description of the stage (optional). \"\n \"This doesn't affect any DVC operations.\"\n ),\n )\n parser.add_argument(\n \"cmd\",\n nargs=argparse.REMAINDER,\n help=\"Command to execute.\",\n metavar=\"command\",\n )\n\n\ndef add_parser(subparsers, parent_parser):\n STAGES_HELP = \"Commands to list and create stages.\"\n\n stage_parser = subparsers.add_parser(\n \"stage\",\n parents=[parent_parser],\n description=append_doc_link(STAGES_HELP, \"stage\"),\n help=STAGES_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n stage_subparsers = stage_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use `dvc stage CMD --help` to display command-specific help.\",\n )\n\n fix_subparsers(stage_subparsers)\n\n STAGE_ADD_HELP = \"Create stage\"\n stage_add_parser = stage_subparsers.add_parser(\n \"add\",\n parents=[parent_parser],\n description=append_doc_link(STAGE_ADD_HELP, \"stage/add\"),\n help=STAGE_ADD_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n stage_add_parser.add_argument(\n \"-n\", \"--name\", help=\"Name of the stage to add\", required=True\n )\n _add_common_args(stage_add_parser)\n stage_add_parser.set_defaults(func=CmdStageAdd)\n\n STAGE_LIST_HELP = \"List stages.\"\n stage_list_parser = stage_subparsers.add_parser(\n \"list\",\n parents=[parent_parser],\n description=append_doc_link(STAGE_LIST_HELP, \"stage/list\"),\n help=STAGE_LIST_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n stage_list_parser.add_argument(\n \"targets\",\n nargs=\"*\",\n default=[\"dvc.yaml\"],\n help=(\n \"Show stages from a dvc.yaml/.dvc file or a directory. \"\n \"'dvc.yaml' by default\"\n ),\n )\n stage_list_parser.add_argument(\n \"--all\",\n action=\"store_true\",\n default=False,\n help=\"List all of the stages in the repo.\",\n )\n stage_list_parser.add_argument(\n \"--fail\",\n action=\"store_true\",\n default=False,\n help=\"Fail immediately, do not suppress any syntax errors.\",\n )\n stage_list_parser.add_argument(\n \"-R\",\n \"--recursive\",\n action=\"store_true\",\n default=False,\n help=\"List all stages inside the specified directory.\",\n )\n stage_list_parser.add_argument(\n \"--names-only\",\n action=\"store_true\",\n default=False,\n help=\"List only stage names.\",\n )\n stage_list_parser.set_defaults(func=CmdStageList)\n", "path": "dvc/command/stage.py"}]} | 3,901 | 147 |
gh_patches_debug_23578 | rasdani/github-patches | git_diff | Flexget__Flexget-2271 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kodi API has been changed in v18 (Leia) such that HTTP POST is required
<!---
Before opening an issue, verify:
- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget
- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions
- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon
- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues
- Did you fill out the issue template as completely as possible?
The issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!
--->
### Expected behaviour:
<!---
Please don't just say "it doesn't crash" or "it works". Explain what the expected result is.
--->
Updates should work
### Actual behaviour:
Error message: `JSONRPC failed. Error -32099: Bad client permission`
### Steps to reproduce:
- Step 1: Call a kodi library scan from a task
#### Config:
```
kodi_library:
action: scan
category: video
url: http://192.168.1.214
port: 80
```
### Details
The kodi API has been changed in v18 Leia and up. In the old API, all requests were HTTP GET (even API calls that update/mutate state). They've finally updated the API to require HTTP POST for updates, but they've completely failed to update the API version or even provide sensible error messages.
https://forum.kodi.tv/showthread.php?tid=324598
https://discuss.flexget.com/t/kodi-plugin-not-working-on-kodi-18/4196
**NOTE**: I no longer use Kodi, so I'm simply creating an issue based on a forum post to keep track of the issue in case other users begin to experience it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/services/kodi_library.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5 import json
6
7 from flexget import plugin
8 from flexget.event import event
9 from flexget.utils.requests import RequestException
10
11 log = logging.getLogger('kodi_library')
12
13 JSON_URI = '/jsonrpc'
14
15
16 class KodiLibrary(object):
17 schema = {
18 'type': 'object',
19 'properties': {
20 'action': {'type': 'string', 'enum': ['clean', 'scan']},
21 'category': {'type': 'string', 'enum': ['audio', 'video']},
22 'url': {'type': 'string', 'format': 'url'},
23 'port': {'type': 'integer', 'default': 8080},
24 'username': {'type': 'string'},
25 'password': {'type': 'string'},
26 'only_on_accepted': {'type': 'boolean', 'default': True}
27 },
28 'required': ['url', 'action', 'category'],
29 'additionalProperties': False,
30 }
31
32 @plugin.priority(-255)
33 def on_task_exit(self, task, config):
34 if task.accepted or not config['only_on_accepted']:
35 # make the url without trailing slash
36 base_url = config['url'][:-1] if config['url'].endswith('/') else config['url']
37 base_url += ':{0}'.format(config['port'])
38
39 url = base_url + JSON_URI
40 # create the params
41 json_params = {"id": 1, "jsonrpc": "2.0",
42 'method': '{category}Library.{action}'.format(category=config['category'].title(),
43 action=config['action'].title())}
44 params = {'request': json.dumps(json_params)}
45 log.debug('Sending request params %s', params)
46
47 try:
48 r = task.requests.get(url, params=params, auth=(config.get('username'), config.get('password'))).json()
49 if r.get('result') == 'OK':
50 log.info('Successfully sent a %s request for the %s library', config['action'], config['category'])
51 else:
52 if r.get('error'):
53 log.error('Kodi JSONRPC failed. Error %s: %s', r['error']['code'], r['error']['message'])
54 else:
55 # this should never happen as Kodi say they follow the JSON-RPC 2.0 spec
56 log.debug('Received error response %s', json.dumps(r))
57 log.error('Kodi JSONRPC failed with unrecognized message: %s', json.dumps(r))
58 except RequestException as e:
59 raise plugin.PluginError('Failed to send request to Kodi: %s' % e.args[0])
60 else:
61 log.info('No entries were accepted. No request is sent.')
62
63
64 @event('plugin.register')
65 def register_plugin():
66 plugin.register(KodiLibrary, 'kodi_library', api_ver=2)
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/plugins/services/kodi_library.py b/flexget/plugins/services/kodi_library.py
--- a/flexget/plugins/services/kodi_library.py
+++ b/flexget/plugins/services/kodi_library.py
@@ -38,14 +38,13 @@
url = base_url + JSON_URI
# create the params
- json_params = {"id": 1, "jsonrpc": "2.0",
- 'method': '{category}Library.{action}'.format(category=config['category'].title(),
- action=config['action'].title())}
- params = {'request': json.dumps(json_params)}
+ params = {"id": 1, "jsonrpc": "2.0",
+ 'method': '{category}Library.{action}'.format(category=config['category'].title(),
+ action=config['action'].title())}
log.debug('Sending request params %s', params)
try:
- r = task.requests.get(url, params=params, auth=(config.get('username'), config.get('password'))).json()
+ r = task.requests.post(url, json=params, auth=(config.get('username'), config.get('password'))).json()
if r.get('result') == 'OK':
log.info('Successfully sent a %s request for the %s library', config['action'], config['category'])
else:
| {"golden_diff": "diff --git a/flexget/plugins/services/kodi_library.py b/flexget/plugins/services/kodi_library.py\n--- a/flexget/plugins/services/kodi_library.py\n+++ b/flexget/plugins/services/kodi_library.py\n@@ -38,14 +38,13 @@\n \n url = base_url + JSON_URI\n # create the params\n- json_params = {\"id\": 1, \"jsonrpc\": \"2.0\",\n- 'method': '{category}Library.{action}'.format(category=config['category'].title(),\n- action=config['action'].title())}\n- params = {'request': json.dumps(json_params)}\n+ params = {\"id\": 1, \"jsonrpc\": \"2.0\",\n+ 'method': '{category}Library.{action}'.format(category=config['category'].title(),\n+ action=config['action'].title())}\n log.debug('Sending request params %s', params)\n \n try:\n- r = task.requests.get(url, params=params, auth=(config.get('username'), config.get('password'))).json()\n+ r = task.requests.post(url, json=params, auth=(config.get('username'), config.get('password'))).json()\n if r.get('result') == 'OK':\n log.info('Successfully sent a %s request for the %s library', config['action'], config['category'])\n else:\n", "issue": "Kodi API has been changed in v18 (Leia) such that HTTP POST is required\n<!---\r\nBefore opening an issue, verify:\r\n\r\n- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget\r\n- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions\r\n- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon\r\n- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues\r\n- Did you fill out the issue template as completely as possible?\r\n\r\nThe issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!\r\n--->\r\n### Expected behaviour:\r\n<!---\r\nPlease don't just say \"it doesn't crash\" or \"it works\". Explain what the expected result is.\r\n--->\r\nUpdates should work\r\n### Actual behaviour:\r\nError message: `JSONRPC failed. Error -32099: Bad client permission`\r\n### Steps to reproduce:\r\n- Step 1: Call a kodi library scan from a task\r\n\r\n#### Config:\r\n```\r\nkodi_library:\r\n action: scan\r\n category: video\r\n url: http://192.168.1.214\r\n port: 80\r\n```\r\n\r\n### Details\r\nThe kodi API has been changed in v18 Leia and up. In the old API, all requests were HTTP GET (even API calls that update/mutate state). They've finally updated the API to require HTTP POST for updates, but they've completely failed to update the API version or even provide sensible error messages.\r\n\r\nhttps://forum.kodi.tv/showthread.php?tid=324598\r\nhttps://discuss.flexget.com/t/kodi-plugin-not-working-on-kodi-18/4196\r\n\r\n**NOTE**: I no longer use Kodi, so I'm simply creating an issue based on a forum post to keep track of the issue in case other users begin to experience it.\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport json\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.utils.requests import RequestException\n\nlog = logging.getLogger('kodi_library')\n\nJSON_URI = '/jsonrpc'\n\n\nclass KodiLibrary(object):\n schema = {\n 'type': 'object',\n 'properties': {\n 'action': {'type': 'string', 'enum': ['clean', 'scan']},\n 'category': {'type': 'string', 'enum': ['audio', 'video']},\n 'url': {'type': 'string', 'format': 'url'},\n 'port': {'type': 'integer', 'default': 8080},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'only_on_accepted': {'type': 'boolean', 'default': True}\n },\n 'required': ['url', 'action', 'category'],\n 'additionalProperties': False,\n }\n\n @plugin.priority(-255)\n def on_task_exit(self, task, config):\n if task.accepted or not config['only_on_accepted']:\n # make the url without trailing slash\n base_url = config['url'][:-1] if config['url'].endswith('/') else config['url']\n base_url += ':{0}'.format(config['port'])\n\n url = base_url + JSON_URI\n # create the params\n json_params = {\"id\": 1, \"jsonrpc\": \"2.0\",\n 'method': '{category}Library.{action}'.format(category=config['category'].title(),\n action=config['action'].title())}\n params = {'request': json.dumps(json_params)}\n log.debug('Sending request params %s', params)\n\n try:\n r = task.requests.get(url, params=params, auth=(config.get('username'), config.get('password'))).json()\n if r.get('result') == 'OK':\n log.info('Successfully sent a %s request for the %s library', config['action'], config['category'])\n else:\n if r.get('error'):\n log.error('Kodi JSONRPC failed. Error %s: %s', r['error']['code'], r['error']['message'])\n else:\n # this should never happen as Kodi say they follow the JSON-RPC 2.0 spec\n log.debug('Received error response %s', json.dumps(r))\n log.error('Kodi JSONRPC failed with unrecognized message: %s', json.dumps(r))\n except RequestException as e:\n raise plugin.PluginError('Failed to send request to Kodi: %s' % e.args[0])\n else:\n log.info('No entries were accepted. No request is sent.')\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(KodiLibrary, 'kodi_library', api_ver=2)\n", "path": "flexget/plugins/services/kodi_library.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport json\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.utils.requests import RequestException\n\nlog = logging.getLogger('kodi_library')\n\nJSON_URI = '/jsonrpc'\n\n\nclass KodiLibrary(object):\n schema = {\n 'type': 'object',\n 'properties': {\n 'action': {'type': 'string', 'enum': ['clean', 'scan']},\n 'category': {'type': 'string', 'enum': ['audio', 'video']},\n 'url': {'type': 'string', 'format': 'url'},\n 'port': {'type': 'integer', 'default': 8080},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'only_on_accepted': {'type': 'boolean', 'default': True}\n },\n 'required': ['url', 'action', 'category'],\n 'additionalProperties': False,\n }\n\n @plugin.priority(-255)\n def on_task_exit(self, task, config):\n if task.accepted or not config['only_on_accepted']:\n # make the url without trailing slash\n base_url = config['url'][:-1] if config['url'].endswith('/') else config['url']\n base_url += ':{0}'.format(config['port'])\n\n url = base_url + JSON_URI\n # create the params\n params = {\"id\": 1, \"jsonrpc\": \"2.0\",\n 'method': '{category}Library.{action}'.format(category=config['category'].title(),\n action=config['action'].title())}\n log.debug('Sending request params %s', params)\n\n try:\n r = task.requests.post(url, json=params, auth=(config.get('username'), config.get('password'))).json()\n if r.get('result') == 'OK':\n log.info('Successfully sent a %s request for the %s library', config['action'], config['category'])\n else:\n if r.get('error'):\n log.error('Kodi JSONRPC failed. Error %s: %s', r['error']['code'], r['error']['message'])\n else:\n # this should never happen as Kodi say they follow the JSON-RPC 2.0 spec\n log.debug('Received error response %s', json.dumps(r))\n log.error('Kodi JSONRPC failed with unrecognized message: %s', json.dumps(r))\n except RequestException as e:\n raise plugin.PluginError('Failed to send request to Kodi: %s' % e.args[0])\n else:\n log.info('No entries were accepted. No request is sent.')\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(KodiLibrary, 'kodi_library', api_ver=2)\n", "path": "flexget/plugins/services/kodi_library.py"}]} | 1,531 | 297 |
gh_patches_debug_20265 | rasdani/github-patches | git_diff | mdn__kuma-7050 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update Jenkins build pipeline to remove tests for PR
**Summary**
The default pipeline (any pushes to branch) will trigger a python unit test and npm tests using `docker-compose`, we should stop doing this since we are already doing this in travis. So before a PR even gets merged it has already passed tests using travis so I feel we should just remove that extra job on the jenkins side of things
**Rationale**
It speeds up builds on PRs and also fixes an issue with jenkins where network port collision can happen when multiple builds are running at the same time.
**Audience**
Only Developers and SRE's will see this change
Our flake8 tests suddenly failed
**Summary**
Builds from `master` currently fail in Python linting.
It's because the version of [flake8 used in TravisCI](https://github.com/mdn/kuma/blob/59afefbe55ca8456203baa96ba421f1fa3b63ede/.travis.yml#L17-L18) don't match what we have in our pyproject.toml.
Today a new release of flake8 came out that broke our master. [Example](https://travis-ci.com/github/mdn/kuma/jobs/332498567)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/search/views.py`
Content:
```
1 from django.shortcuts import render
2 from django.urls import reverse_lazy
3 from django.views.decorators.cache import never_cache
4 from django.views.decorators.http import require_GET
5 from django.views.generic import RedirectView
6 from ratelimit.decorators import ratelimit
7
8 from kuma.api.v1.views import search as search_api
9 from kuma.core.decorators import shared_cache_control
10 from kuma.core.utils import is_wiki
11
12 from .search import SearchView
13
14 # Since the search endpoint accepts user input (via query parameters) and its
15 # response is compressed, use rate limiting to mitigate the BREACH attack
16 # (see http://breachattack.com/). It still needs to allow a user to click
17 # the filter switches (bug 1426968).
18 # Alternate: forbid gzip by setting Content-Encoding: identity
19 @never_cache
20 @require_GET
21 @ratelimit(key="user_or_ip", rate="25/m", block=True)
22 def search(request, *args, **kwargs):
23 """
24 The search view.
25 """
26 if is_wiki(request):
27 return wiki_search(request, *args, **kwargs)
28
29 # The underlying v1 API supports searching without a 'q' but the web
30 # UI doesn't. For example, the search input field requires a value.
31 # So we match that here too.
32 if not request.GET.get("q", "").strip():
33 status = 400
34 context = {"results": {}}
35 else:
36 results = search_api(request, *args, **kwargs).data
37
38 # Determine if there were validation errors
39 error = results.get("error") or results.get("q")
40 # If q is returned in the data, there was a validation error for that field,
41 # so return 400 status.
42 status = 200 if results.get("q") is None else 400
43 # If there was an error with the pagination you'll get...
44 if results.get("detail"):
45 error = str(results["detail"])
46 status = 400
47
48 context = {"results": {"results": None if error else results, "error": error}}
49 return render(request, "search/react.html", context, status=status)
50
51
52 wiki_search = SearchView.as_view()
53
54
55 class SearchRedirectView(RedirectView):
56 permanent = True
57
58 def get_redirect_url(self, *args, **kwargs):
59 query_string = self.request.META.get("QUERY_STRING")
60 url = reverse_lazy(
61 "api.v1.search", kwargs={"locale": self.request.LANGUAGE_CODE}
62 )
63 if query_string:
64 url += "?" + query_string
65 return url
66
67
68 @shared_cache_control(s_maxage=60 * 60 * 24 * 7)
69 def plugin(request):
70 """Render an OpenSearch Plugin."""
71 return render(
72 request,
73 "search/plugin.html",
74 {"locale": request.LANGUAGE_CODE},
75 content_type="application/opensearchdescription+xml",
76 )
77
```
Path: `kuma/users/models.py`
Content:
```
1 import datetime
2
3 from django.conf import settings
4 from django.contrib.auth.models import AbstractUser
5 from django.contrib.auth.tokens import default_token_generator
6 from django.core import validators
7 from django.core.cache import cache
8 from django.db import models, transaction
9 from django.db.models import Max
10 from django.dispatch import receiver
11 from django.utils import timezone
12 from django.utils.encoding import force_bytes
13 from django.utils.functional import cached_property
14 from django.utils.http import urlsafe_base64_encode
15 from django.utils.translation import gettext_lazy as _
16
17 from kuma.core.urlresolvers import reverse
18
19 from .constants import USERNAME_REGEX
20
21
22 class UserBan(models.Model):
23 user = models.ForeignKey(
24 settings.AUTH_USER_MODEL,
25 related_name="bans",
26 verbose_name="Banned user",
27 on_delete=models.CASCADE,
28 )
29 by = models.ForeignKey(
30 settings.AUTH_USER_MODEL,
31 related_name="bans_issued",
32 verbose_name="Banned by",
33 on_delete=models.PROTECT,
34 )
35 reason = models.TextField()
36 date = models.DateField(default=datetime.date.today)
37 is_active = models.BooleanField(default=True, help_text="(Is ban active)")
38
39 def __str__(self):
40 message = _("%(banned_user)s banned by %(banned_by)s") % {
41 "banned_user": self.user,
42 "banned_by": self.by,
43 }
44 if not self.is_active:
45 message = _("%s (no longer active)") % message
46 return message
47
48
49 class User(AbstractUser):
50 """
51 Our custom user class.
52 """
53
54 timezone = models.CharField(
55 verbose_name=_("Timezone"),
56 max_length=42,
57 blank=True,
58 default=settings.TIME_ZONE,
59 # Note the deliberate omission of the `choices=` here.
60 # That's because there's no good way to list all possible
61 # timezones as a 2-D tuple. The *name* of the timezone rarely
62 # changes but the human-friendly description of it easily does.
63 )
64 locale = models.CharField(
65 max_length=7,
66 default=settings.LANGUAGE_CODE,
67 choices=settings.SORTED_LANGUAGES,
68 verbose_name=_("Language"),
69 blank=True,
70 db_index=True,
71 )
72 homepage = models.URLField(
73 verbose_name=_("Homepage"),
74 max_length=255,
75 blank=True,
76 error_messages={
77 "invalid": _(
78 "This URL has an invalid format. "
79 "Valid URLs look like http://example.com/my_page."
80 )
81 },
82 )
83 title = models.CharField(verbose_name=_("Title"), max_length=255, blank=True,)
84 fullname = models.CharField(verbose_name=_("Name"), max_length=255, blank=True,)
85 organization = models.CharField(
86 verbose_name=_("Organization"), max_length=255, blank=True,
87 )
88 location = models.CharField(verbose_name=_("Location"), max_length=255, blank=True,)
89 bio = models.TextField(verbose_name=_("About Me"), blank=True,)
90 irc_nickname = models.CharField(
91 verbose_name=_("IRC nickname"), max_length=255, blank=True,
92 )
93
94 is_newsletter_subscribed = models.BooleanField(default=False)
95
96 WEBSITE_VALIDATORS = {
97 "website": validators.RegexValidator(
98 r"^https?://", _("Enter a valid website URL."), "invalid",
99 ),
100 "twitter": validators.RegexValidator(
101 r"^https?://twitter\.com/", _("Enter a valid Twitter URL."), "invalid",
102 ),
103 "github": validators.RegexValidator(
104 r"^https?://github\.com/", _("Enter a valid GitHub URL."), "invalid",
105 ),
106 "stackoverflow": validators.RegexValidator(
107 r"^https?://([a-z]{2}\.)?stackoverflow\.com/users/",
108 _("Enter a valid Stack Overflow URL."),
109 "invalid",
110 ),
111 "linkedin": validators.RegexValidator(
112 r"^https?://((www|\w\w)\.)?linkedin.com/((in/[^/]+/?)|(pub/[^/]+/((\w|\d)+/?){3}))$",
113 _("Enter a valid LinkedIn URL."),
114 "invalid",
115 ),
116 "mozillians": validators.RegexValidator(
117 r"^https?://mozillians\.org/u/",
118 _("Enter a valid Mozillians URL."),
119 "invalid",
120 ),
121 "facebook": validators.RegexValidator(
122 r"^https?://www\.facebook\.com/",
123 _("Enter a valid Facebook URL."),
124 "invalid",
125 ),
126 "discourse": validators.RegexValidator(
127 r"^https://discourse\.mozilla\.org/u/",
128 _("Enter a valid Discourse URL."),
129 "invalid",
130 ),
131 }
132
133 # a bunch of user URLs
134 website_url = models.TextField(
135 _("Website"), blank=True, validators=[WEBSITE_VALIDATORS["website"]],
136 )
137 mozillians_url = models.TextField(
138 _("Mozillians"), blank=True, validators=[WEBSITE_VALIDATORS["mozillians"]],
139 )
140 github_url = models.TextField(
141 _("GitHub"), blank=True, validators=[WEBSITE_VALIDATORS["github"]],
142 )
143 is_github_url_public = models.BooleanField(_("Public GitHub URL"), default=False,)
144 twitter_url = models.TextField(
145 _("Twitter"), blank=True, validators=[WEBSITE_VALIDATORS["twitter"]],
146 )
147 linkedin_url = models.TextField(
148 _("LinkedIn"), blank=True, validators=[WEBSITE_VALIDATORS["linkedin"]],
149 )
150 facebook_url = models.TextField(
151 _("Facebook"), blank=True, validators=[WEBSITE_VALIDATORS["facebook"]],
152 )
153 stackoverflow_url = models.TextField(
154 _("Stack Overflow"),
155 blank=True,
156 validators=[WEBSITE_VALIDATORS["stackoverflow"]],
157 )
158 discourse_url = models.TextField(
159 _("Discourse"), blank=True, validators=[WEBSITE_VALIDATORS["discourse"]],
160 )
161 stripe_customer_id = models.CharField(max_length=255, blank=True)
162
163 subscriber_number = models.PositiveIntegerField(blank=True, null=True)
164
165 class Meta:
166 db_table = "auth_user"
167
168 @property
169 def has_legacy_username(self):
170 return not USERNAME_REGEX.search(self.username)
171
172 @cached_property
173 def is_beta_tester(self):
174 return settings.BETA_GROUP_NAME in self.groups.values_list("name", flat=True)
175
176 @cached_property
177 def active_ban(self):
178 """
179 Returns the first active ban for the user or None.
180 """
181 return self.bans.filter(is_active=True).first()
182
183 def wiki_revisions(self, count=5):
184 return (
185 self.created_revisions.prefetch_related("document")
186 .defer("content", "summary")
187 .order_by("-id")[:count]
188 )
189
190 def allows_editing_by(self, user):
191 return user.is_staff or user.is_superuser or user.pk == self.pk
192
193 def get_recovery_url(self):
194 """Creates a recovery URL for the user."""
195 uidb64 = urlsafe_base64_encode(force_bytes(self.pk))
196 token = default_token_generator.make_token(self)
197 link = reverse("users.recover", kwargs={"token": token, "uidb64": uidb64})
198 return link
199
200 def set_next_subscriber_number_and_save(self):
201 assert not self.subscriber_number, "already set"
202 lock_key = f"set_next_subscriber_number_and_save"
203 # By locking "globally", we get to be certain that our query to get
204 # the current highest `subscriber_number`, gets done alone.
205 with cache.lock(lock_key):
206 highest_number = User.get_highest_subscriber_number()
207 User.objects.filter(id=self.id).update(subscriber_number=highest_number + 1)
208
209 @classmethod
210 def get_highest_subscriber_number(cls):
211 return (
212 cls.objects.filter(subscriber_number__isnull=False).aggregate(
213 number=Max("subscriber_number")
214 )["number"]
215 or 0
216 )
217
218
219 class UserSubscription(models.Model):
220 user = models.ForeignKey(User, on_delete=models.CASCADE)
221 stripe_subscription_id = models.CharField(max_length=255, blank=True)
222 canceled = models.DateTimeField(null=True)
223 created = models.DateTimeField(auto_now_add=True)
224 updated = models.DateTimeField(auto_now=True)
225
226 def __str__(self):
227 return (
228 f"{self.user.username} ({self.stripe_subscription_id})"
229 f"{' CANCELED' if self.canceled else ''}"
230 )
231
232 @classmethod
233 # The reason we make this (class) method transaction atomic is because this
234 # use of `update_or_create` will actually trigger a `post_save` signal
235 # on the `User` model which will set the `User.subscriber_number` if needed.
236 # So, this way we're pre-emptively making sure these two things are atomically
237 # connected.
238 @transaction.atomic()
239 def set_active(cls, user, stripe_subscription_id):
240 cls.objects.update_or_create(
241 stripe_subscription_id=stripe_subscription_id,
242 user=user,
243 defaults={"canceled": None, "updated": timezone.now()},
244 )
245
246 @classmethod
247 def set_canceled(cls, user, stripe_subscription_id):
248 cls.objects.update_or_create(
249 stripe_subscription_id=stripe_subscription_id,
250 user=user,
251 defaults={"canceled": timezone.now(), "updated": timezone.now()},
252 )
253
254
255 @receiver(models.signals.post_save, sender=UserSubscription)
256 def set_user_subscriber_number(sender, instance, **kwargs):
257 if not instance.canceled and not instance.user.subscriber_number:
258 instance.user.set_next_subscriber_number_and_save()
259
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/search/views.py b/kuma/search/views.py
--- a/kuma/search/views.py
+++ b/kuma/search/views.py
@@ -11,6 +11,7 @@
from .search import SearchView
+
# Since the search endpoint accepts user input (via query parameters) and its
# response is compressed, use rate limiting to mitigate the BREACH attack
# (see http://breachattack.com/). It still needs to allow a user to click
diff --git a/kuma/users/models.py b/kuma/users/models.py
--- a/kuma/users/models.py
+++ b/kuma/users/models.py
@@ -199,7 +199,7 @@
def set_next_subscriber_number_and_save(self):
assert not self.subscriber_number, "already set"
- lock_key = f"set_next_subscriber_number_and_save"
+ lock_key = "set_next_subscriber_number_and_save"
# By locking "globally", we get to be certain that our query to get
# the current highest `subscriber_number`, gets done alone.
with cache.lock(lock_key):
| {"golden_diff": "diff --git a/kuma/search/views.py b/kuma/search/views.py\n--- a/kuma/search/views.py\n+++ b/kuma/search/views.py\n@@ -11,6 +11,7 @@\n \n from .search import SearchView\n \n+\n # Since the search endpoint accepts user input (via query parameters) and its\n # response is compressed, use rate limiting to mitigate the BREACH attack\n # (see http://breachattack.com/). It still needs to allow a user to click\ndiff --git a/kuma/users/models.py b/kuma/users/models.py\n--- a/kuma/users/models.py\n+++ b/kuma/users/models.py\n@@ -199,7 +199,7 @@\n \n def set_next_subscriber_number_and_save(self):\n assert not self.subscriber_number, \"already set\"\n- lock_key = f\"set_next_subscriber_number_and_save\"\n+ lock_key = \"set_next_subscriber_number_and_save\"\n # By locking \"globally\", we get to be certain that our query to get\n # the current highest `subscriber_number`, gets done alone.\n with cache.lock(lock_key):\n", "issue": "Update Jenkins build pipeline to remove tests for PR\n**Summary**\r\nThe default pipeline (any pushes to branch) will trigger a python unit test and npm tests using `docker-compose`, we should stop doing this since we are already doing this in travis. So before a PR even gets merged it has already passed tests using travis so I feel we should just remove that extra job on the jenkins side of things\r\n\r\n**Rationale**\r\nIt speeds up builds on PRs and also fixes an issue with jenkins where network port collision can happen when multiple builds are running at the same time.\r\n\r\n**Audience**\r\nOnly Developers and SRE's will see this change\r\n\r\n\nOur flake8 tests suddenly failed\n**Summary**\r\nBuilds from `master` currently fail in Python linting. \r\nIt's because the version of [flake8 used in TravisCI](https://github.com/mdn/kuma/blob/59afefbe55ca8456203baa96ba421f1fa3b63ede/.travis.yml#L17-L18) don't match what we have in our pyproject.toml. \r\nToday a new release of flake8 came out that broke our master. [Example](https://travis-ci.com/github/mdn/kuma/jobs/332498567)\r\n\n", "before_files": [{"content": "from django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.views import search as search_api\nfrom kuma.core.decorators import shared_cache_control\nfrom kuma.core.utils import is_wiki\n\nfrom .search import SearchView\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n \"\"\"\n if is_wiki(request):\n return wiki_search(request, *args, **kwargs)\n\n # The underlying v1 API supports searching without a 'q' but the web\n # UI doesn't. For example, the search input field requires a value.\n # So we match that here too.\n if not request.GET.get(\"q\", \"\").strip():\n status = 400\n context = {\"results\": {}}\n else:\n results = search_api(request, *args, **kwargs).data\n\n # Determine if there were validation errors\n error = results.get(\"error\") or results.get(\"q\")\n # If q is returned in the data, there was a validation error for that field,\n # so return 400 status.\n status = 200 if results.get(\"q\") is None else 400\n # If there was an error with the pagination you'll get...\n if results.get(\"detail\"):\n error = str(results[\"detail\"])\n status = 400\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n return render(request, \"search/react.html\", context, status=status)\n\n\nwiki_search = SearchView.as_view()\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\n \"api.v1.search\", kwargs={\"locale\": self.request.LANGUAGE_CODE}\n )\n if query_string:\n url += \"?\" + query_string\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}, {"content": "import datetime\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.core import validators\nfrom django.core.cache import cache\nfrom django.db import models, transaction\nfrom django.db.models import Max\nfrom django.dispatch import receiver\nfrom django.utils import timezone\nfrom django.utils.encoding import force_bytes\nfrom django.utils.functional import cached_property\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.utils.translation import gettext_lazy as _\n\nfrom kuma.core.urlresolvers import reverse\n\nfrom .constants import USERNAME_REGEX\n\n\nclass UserBan(models.Model):\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n related_name=\"bans\",\n verbose_name=\"Banned user\",\n on_delete=models.CASCADE,\n )\n by = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n related_name=\"bans_issued\",\n verbose_name=\"Banned by\",\n on_delete=models.PROTECT,\n )\n reason = models.TextField()\n date = models.DateField(default=datetime.date.today)\n is_active = models.BooleanField(default=True, help_text=\"(Is ban active)\")\n\n def __str__(self):\n message = _(\"%(banned_user)s banned by %(banned_by)s\") % {\n \"banned_user\": self.user,\n \"banned_by\": self.by,\n }\n if not self.is_active:\n message = _(\"%s (no longer active)\") % message\n return message\n\n\nclass User(AbstractUser):\n \"\"\"\n Our custom user class.\n \"\"\"\n\n timezone = models.CharField(\n verbose_name=_(\"Timezone\"),\n max_length=42,\n blank=True,\n default=settings.TIME_ZONE,\n # Note the deliberate omission of the `choices=` here.\n # That's because there's no good way to list all possible\n # timezones as a 2-D tuple. The *name* of the timezone rarely\n # changes but the human-friendly description of it easily does.\n )\n locale = models.CharField(\n max_length=7,\n default=settings.LANGUAGE_CODE,\n choices=settings.SORTED_LANGUAGES,\n verbose_name=_(\"Language\"),\n blank=True,\n db_index=True,\n )\n homepage = models.URLField(\n verbose_name=_(\"Homepage\"),\n max_length=255,\n blank=True,\n error_messages={\n \"invalid\": _(\n \"This URL has an invalid format. \"\n \"Valid URLs look like http://example.com/my_page.\"\n )\n },\n )\n title = models.CharField(verbose_name=_(\"Title\"), max_length=255, blank=True,)\n fullname = models.CharField(verbose_name=_(\"Name\"), max_length=255, blank=True,)\n organization = models.CharField(\n verbose_name=_(\"Organization\"), max_length=255, blank=True,\n )\n location = models.CharField(verbose_name=_(\"Location\"), max_length=255, blank=True,)\n bio = models.TextField(verbose_name=_(\"About Me\"), blank=True,)\n irc_nickname = models.CharField(\n verbose_name=_(\"IRC nickname\"), max_length=255, blank=True,\n )\n\n is_newsletter_subscribed = models.BooleanField(default=False)\n\n WEBSITE_VALIDATORS = {\n \"website\": validators.RegexValidator(\n r\"^https?://\", _(\"Enter a valid website URL.\"), \"invalid\",\n ),\n \"twitter\": validators.RegexValidator(\n r\"^https?://twitter\\.com/\", _(\"Enter a valid Twitter URL.\"), \"invalid\",\n ),\n \"github\": validators.RegexValidator(\n r\"^https?://github\\.com/\", _(\"Enter a valid GitHub URL.\"), \"invalid\",\n ),\n \"stackoverflow\": validators.RegexValidator(\n r\"^https?://([a-z]{2}\\.)?stackoverflow\\.com/users/\",\n _(\"Enter a valid Stack Overflow URL.\"),\n \"invalid\",\n ),\n \"linkedin\": validators.RegexValidator(\n r\"^https?://((www|\\w\\w)\\.)?linkedin.com/((in/[^/]+/?)|(pub/[^/]+/((\\w|\\d)+/?){3}))$\",\n _(\"Enter a valid LinkedIn URL.\"),\n \"invalid\",\n ),\n \"mozillians\": validators.RegexValidator(\n r\"^https?://mozillians\\.org/u/\",\n _(\"Enter a valid Mozillians URL.\"),\n \"invalid\",\n ),\n \"facebook\": validators.RegexValidator(\n r\"^https?://www\\.facebook\\.com/\",\n _(\"Enter a valid Facebook URL.\"),\n \"invalid\",\n ),\n \"discourse\": validators.RegexValidator(\n r\"^https://discourse\\.mozilla\\.org/u/\",\n _(\"Enter a valid Discourse URL.\"),\n \"invalid\",\n ),\n }\n\n # a bunch of user URLs\n website_url = models.TextField(\n _(\"Website\"), blank=True, validators=[WEBSITE_VALIDATORS[\"website\"]],\n )\n mozillians_url = models.TextField(\n _(\"Mozillians\"), blank=True, validators=[WEBSITE_VALIDATORS[\"mozillians\"]],\n )\n github_url = models.TextField(\n _(\"GitHub\"), blank=True, validators=[WEBSITE_VALIDATORS[\"github\"]],\n )\n is_github_url_public = models.BooleanField(_(\"Public GitHub URL\"), default=False,)\n twitter_url = models.TextField(\n _(\"Twitter\"), blank=True, validators=[WEBSITE_VALIDATORS[\"twitter\"]],\n )\n linkedin_url = models.TextField(\n _(\"LinkedIn\"), blank=True, validators=[WEBSITE_VALIDATORS[\"linkedin\"]],\n )\n facebook_url = models.TextField(\n _(\"Facebook\"), blank=True, validators=[WEBSITE_VALIDATORS[\"facebook\"]],\n )\n stackoverflow_url = models.TextField(\n _(\"Stack Overflow\"),\n blank=True,\n validators=[WEBSITE_VALIDATORS[\"stackoverflow\"]],\n )\n discourse_url = models.TextField(\n _(\"Discourse\"), blank=True, validators=[WEBSITE_VALIDATORS[\"discourse\"]],\n )\n stripe_customer_id = models.CharField(max_length=255, blank=True)\n\n subscriber_number = models.PositiveIntegerField(blank=True, null=True)\n\n class Meta:\n db_table = \"auth_user\"\n\n @property\n def has_legacy_username(self):\n return not USERNAME_REGEX.search(self.username)\n\n @cached_property\n def is_beta_tester(self):\n return settings.BETA_GROUP_NAME in self.groups.values_list(\"name\", flat=True)\n\n @cached_property\n def active_ban(self):\n \"\"\"\n Returns the first active ban for the user or None.\n \"\"\"\n return self.bans.filter(is_active=True).first()\n\n def wiki_revisions(self, count=5):\n return (\n self.created_revisions.prefetch_related(\"document\")\n .defer(\"content\", \"summary\")\n .order_by(\"-id\")[:count]\n )\n\n def allows_editing_by(self, user):\n return user.is_staff or user.is_superuser or user.pk == self.pk\n\n def get_recovery_url(self):\n \"\"\"Creates a recovery URL for the user.\"\"\"\n uidb64 = urlsafe_base64_encode(force_bytes(self.pk))\n token = default_token_generator.make_token(self)\n link = reverse(\"users.recover\", kwargs={\"token\": token, \"uidb64\": uidb64})\n return link\n\n def set_next_subscriber_number_and_save(self):\n assert not self.subscriber_number, \"already set\"\n lock_key = f\"set_next_subscriber_number_and_save\"\n # By locking \"globally\", we get to be certain that our query to get\n # the current highest `subscriber_number`, gets done alone.\n with cache.lock(lock_key):\n highest_number = User.get_highest_subscriber_number()\n User.objects.filter(id=self.id).update(subscriber_number=highest_number + 1)\n\n @classmethod\n def get_highest_subscriber_number(cls):\n return (\n cls.objects.filter(subscriber_number__isnull=False).aggregate(\n number=Max(\"subscriber_number\")\n )[\"number\"]\n or 0\n )\n\n\nclass UserSubscription(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n stripe_subscription_id = models.CharField(max_length=255, blank=True)\n canceled = models.DateTimeField(null=True)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return (\n f\"{self.user.username} ({self.stripe_subscription_id})\"\n f\"{' CANCELED' if self.canceled else ''}\"\n )\n\n @classmethod\n # The reason we make this (class) method transaction atomic is because this\n # use of `update_or_create` will actually trigger a `post_save` signal\n # on the `User` model which will set the `User.subscriber_number` if needed.\n # So, this way we're pre-emptively making sure these two things are atomically\n # connected.\n @transaction.atomic()\n def set_active(cls, user, stripe_subscription_id):\n cls.objects.update_or_create(\n stripe_subscription_id=stripe_subscription_id,\n user=user,\n defaults={\"canceled\": None, \"updated\": timezone.now()},\n )\n\n @classmethod\n def set_canceled(cls, user, stripe_subscription_id):\n cls.objects.update_or_create(\n stripe_subscription_id=stripe_subscription_id,\n user=user,\n defaults={\"canceled\": timezone.now(), \"updated\": timezone.now()},\n )\n\n\n@receiver(models.signals.post_save, sender=UserSubscription)\ndef set_user_subscriber_number(sender, instance, **kwargs):\n if not instance.canceled and not instance.user.subscriber_number:\n instance.user.set_next_subscriber_number_and_save()\n", "path": "kuma/users/models.py"}], "after_files": [{"content": "from django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.views import search as search_api\nfrom kuma.core.decorators import shared_cache_control\nfrom kuma.core.utils import is_wiki\n\nfrom .search import SearchView\n\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n \"\"\"\n if is_wiki(request):\n return wiki_search(request, *args, **kwargs)\n\n # The underlying v1 API supports searching without a 'q' but the web\n # UI doesn't. For example, the search input field requires a value.\n # So we match that here too.\n if not request.GET.get(\"q\", \"\").strip():\n status = 400\n context = {\"results\": {}}\n else:\n results = search_api(request, *args, **kwargs).data\n\n # Determine if there were validation errors\n error = results.get(\"error\") or results.get(\"q\")\n # If q is returned in the data, there was a validation error for that field,\n # so return 400 status.\n status = 200 if results.get(\"q\") is None else 400\n # If there was an error with the pagination you'll get...\n if results.get(\"detail\"):\n error = str(results[\"detail\"])\n status = 400\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n return render(request, \"search/react.html\", context, status=status)\n\n\nwiki_search = SearchView.as_view()\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\n \"api.v1.search\", kwargs={\"locale\": self.request.LANGUAGE_CODE}\n )\n if query_string:\n url += \"?\" + query_string\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}, {"content": "import datetime\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.core import validators\nfrom django.core.cache import cache\nfrom django.db import models, transaction\nfrom django.db.models import Max\nfrom django.dispatch import receiver\nfrom django.utils import timezone\nfrom django.utils.encoding import force_bytes\nfrom django.utils.functional import cached_property\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.utils.translation import gettext_lazy as _\n\nfrom kuma.core.urlresolvers import reverse\n\nfrom .constants import USERNAME_REGEX\n\n\nclass UserBan(models.Model):\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n related_name=\"bans\",\n verbose_name=\"Banned user\",\n on_delete=models.CASCADE,\n )\n by = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n related_name=\"bans_issued\",\n verbose_name=\"Banned by\",\n on_delete=models.PROTECT,\n )\n reason = models.TextField()\n date = models.DateField(default=datetime.date.today)\n is_active = models.BooleanField(default=True, help_text=\"(Is ban active)\")\n\n def __str__(self):\n message = _(\"%(banned_user)s banned by %(banned_by)s\") % {\n \"banned_user\": self.user,\n \"banned_by\": self.by,\n }\n if not self.is_active:\n message = _(\"%s (no longer active)\") % message\n return message\n\n\nclass User(AbstractUser):\n \"\"\"\n Our custom user class.\n \"\"\"\n\n timezone = models.CharField(\n verbose_name=_(\"Timezone\"),\n max_length=42,\n blank=True,\n default=settings.TIME_ZONE,\n # Note the deliberate omission of the `choices=` here.\n # That's because there's no good way to list all possible\n # timezones as a 2-D tuple. The *name* of the timezone rarely\n # changes but the human-friendly description of it easily does.\n )\n locale = models.CharField(\n max_length=7,\n default=settings.LANGUAGE_CODE,\n choices=settings.SORTED_LANGUAGES,\n verbose_name=_(\"Language\"),\n blank=True,\n db_index=True,\n )\n homepage = models.URLField(\n verbose_name=_(\"Homepage\"),\n max_length=255,\n blank=True,\n error_messages={\n \"invalid\": _(\n \"This URL has an invalid format. \"\n \"Valid URLs look like http://example.com/my_page.\"\n )\n },\n )\n title = models.CharField(verbose_name=_(\"Title\"), max_length=255, blank=True,)\n fullname = models.CharField(verbose_name=_(\"Name\"), max_length=255, blank=True,)\n organization = models.CharField(\n verbose_name=_(\"Organization\"), max_length=255, blank=True,\n )\n location = models.CharField(verbose_name=_(\"Location\"), max_length=255, blank=True,)\n bio = models.TextField(verbose_name=_(\"About Me\"), blank=True,)\n irc_nickname = models.CharField(\n verbose_name=_(\"IRC nickname\"), max_length=255, blank=True,\n )\n\n is_newsletter_subscribed = models.BooleanField(default=False)\n\n WEBSITE_VALIDATORS = {\n \"website\": validators.RegexValidator(\n r\"^https?://\", _(\"Enter a valid website URL.\"), \"invalid\",\n ),\n \"twitter\": validators.RegexValidator(\n r\"^https?://twitter\\.com/\", _(\"Enter a valid Twitter URL.\"), \"invalid\",\n ),\n \"github\": validators.RegexValidator(\n r\"^https?://github\\.com/\", _(\"Enter a valid GitHub URL.\"), \"invalid\",\n ),\n \"stackoverflow\": validators.RegexValidator(\n r\"^https?://([a-z]{2}\\.)?stackoverflow\\.com/users/\",\n _(\"Enter a valid Stack Overflow URL.\"),\n \"invalid\",\n ),\n \"linkedin\": validators.RegexValidator(\n r\"^https?://((www|\\w\\w)\\.)?linkedin.com/((in/[^/]+/?)|(pub/[^/]+/((\\w|\\d)+/?){3}))$\",\n _(\"Enter a valid LinkedIn URL.\"),\n \"invalid\",\n ),\n \"mozillians\": validators.RegexValidator(\n r\"^https?://mozillians\\.org/u/\",\n _(\"Enter a valid Mozillians URL.\"),\n \"invalid\",\n ),\n \"facebook\": validators.RegexValidator(\n r\"^https?://www\\.facebook\\.com/\",\n _(\"Enter a valid Facebook URL.\"),\n \"invalid\",\n ),\n \"discourse\": validators.RegexValidator(\n r\"^https://discourse\\.mozilla\\.org/u/\",\n _(\"Enter a valid Discourse URL.\"),\n \"invalid\",\n ),\n }\n\n # a bunch of user URLs\n website_url = models.TextField(\n _(\"Website\"), blank=True, validators=[WEBSITE_VALIDATORS[\"website\"]],\n )\n mozillians_url = models.TextField(\n _(\"Mozillians\"), blank=True, validators=[WEBSITE_VALIDATORS[\"mozillians\"]],\n )\n github_url = models.TextField(\n _(\"GitHub\"), blank=True, validators=[WEBSITE_VALIDATORS[\"github\"]],\n )\n is_github_url_public = models.BooleanField(_(\"Public GitHub URL\"), default=False,)\n twitter_url = models.TextField(\n _(\"Twitter\"), blank=True, validators=[WEBSITE_VALIDATORS[\"twitter\"]],\n )\n linkedin_url = models.TextField(\n _(\"LinkedIn\"), blank=True, validators=[WEBSITE_VALIDATORS[\"linkedin\"]],\n )\n facebook_url = models.TextField(\n _(\"Facebook\"), blank=True, validators=[WEBSITE_VALIDATORS[\"facebook\"]],\n )\n stackoverflow_url = models.TextField(\n _(\"Stack Overflow\"),\n blank=True,\n validators=[WEBSITE_VALIDATORS[\"stackoverflow\"]],\n )\n discourse_url = models.TextField(\n _(\"Discourse\"), blank=True, validators=[WEBSITE_VALIDATORS[\"discourse\"]],\n )\n stripe_customer_id = models.CharField(max_length=255, blank=True)\n\n subscriber_number = models.PositiveIntegerField(blank=True, null=True)\n\n class Meta:\n db_table = \"auth_user\"\n\n @property\n def has_legacy_username(self):\n return not USERNAME_REGEX.search(self.username)\n\n @cached_property\n def is_beta_tester(self):\n return settings.BETA_GROUP_NAME in self.groups.values_list(\"name\", flat=True)\n\n @cached_property\n def active_ban(self):\n \"\"\"\n Returns the first active ban for the user or None.\n \"\"\"\n return self.bans.filter(is_active=True).first()\n\n def wiki_revisions(self, count=5):\n return (\n self.created_revisions.prefetch_related(\"document\")\n .defer(\"content\", \"summary\")\n .order_by(\"-id\")[:count]\n )\n\n def allows_editing_by(self, user):\n return user.is_staff or user.is_superuser or user.pk == self.pk\n\n def get_recovery_url(self):\n \"\"\"Creates a recovery URL for the user.\"\"\"\n uidb64 = urlsafe_base64_encode(force_bytes(self.pk))\n token = default_token_generator.make_token(self)\n link = reverse(\"users.recover\", kwargs={\"token\": token, \"uidb64\": uidb64})\n return link\n\n def set_next_subscriber_number_and_save(self):\n assert not self.subscriber_number, \"already set\"\n lock_key = \"set_next_subscriber_number_and_save\"\n # By locking \"globally\", we get to be certain that our query to get\n # the current highest `subscriber_number`, gets done alone.\n with cache.lock(lock_key):\n highest_number = User.get_highest_subscriber_number()\n User.objects.filter(id=self.id).update(subscriber_number=highest_number + 1)\n\n @classmethod\n def get_highest_subscriber_number(cls):\n return (\n cls.objects.filter(subscriber_number__isnull=False).aggregate(\n number=Max(\"subscriber_number\")\n )[\"number\"]\n or 0\n )\n\n\nclass UserSubscription(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n stripe_subscription_id = models.CharField(max_length=255, blank=True)\n canceled = models.DateTimeField(null=True)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return (\n f\"{self.user.username} ({self.stripe_subscription_id})\"\n f\"{' CANCELED' if self.canceled else ''}\"\n )\n\n @classmethod\n # The reason we make this (class) method transaction atomic is because this\n # use of `update_or_create` will actually trigger a `post_save` signal\n # on the `User` model which will set the `User.subscriber_number` if needed.\n # So, this way we're pre-emptively making sure these two things are atomically\n # connected.\n @transaction.atomic()\n def set_active(cls, user, stripe_subscription_id):\n cls.objects.update_or_create(\n stripe_subscription_id=stripe_subscription_id,\n user=user,\n defaults={\"canceled\": None, \"updated\": timezone.now()},\n )\n\n @classmethod\n def set_canceled(cls, user, stripe_subscription_id):\n cls.objects.update_or_create(\n stripe_subscription_id=stripe_subscription_id,\n user=user,\n defaults={\"canceled\": timezone.now(), \"updated\": timezone.now()},\n )\n\n\n@receiver(models.signals.post_save, sender=UserSubscription)\ndef set_user_subscriber_number(sender, instance, **kwargs):\n if not instance.canceled and not instance.user.subscriber_number:\n instance.user.set_next_subscriber_number_and_save()\n", "path": "kuma/users/models.py"}]} | 4,046 | 243 |
gh_patches_debug_3874 | rasdani/github-patches | git_diff | quantumlib__Cirq-5706 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`dev_tools/modules.py` fails with a confusing error if invoked without argument
**Description of the issue**
The program `dev_tools/modules.py` exits with a confusing and user-unfriendly error message if it is invoked without any arguments. At the same time, it is not obvious (or at least, I haven't found it mentioned in the docs) that _not_ passing any arguments to it is unsupported.
**How to reproduce the issue**
In a shell, with the current directory being the top level of the Cirq source code repo cloned from GitHub, run the following command:
```
python dev_tools/modules.py
```
<details>
```
Traceback (most recent call last):
File "/Users/mhucka/project-files/quantum/repos/Cirq/dev_tools/modules.py", line 317, in <module>
main(sys.argv[1:]) # coverage: ignore
File "/Users/mhucka/project-files/quantum/repos/Cirq/dev_tools/modules.py", line 309, in main
f = args.func
AttributeError: 'Namespace' object has no attribute 'func'
```
</details>
**Cirq version**
0.16.0.dev
**Additional comments**
A user coming to Cirq might reasonably try to run `python dev_tools/modules.py`, not knowing ahead of time what arguments are valid. IMHO, it would be more user-friendly if `modules.py` would at least print a help message if invoked without arguments.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dev_tools/modules.py`
Content:
```
1 # Copyright 2021 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 """Utility tool for cirq modules.
17
18 It can be used as a python library for python scripts as well as a CLI tool for
19 bash scripts and interactive use.
20
21 Features:
22
23 listing modules:
24 - Python: see list_modules
25
26 Version management:
27 - Python: get_version and replace_version
28 - CLI:
29 - python3 dev_tools/modules.py print_version
30 - python3 dev_tools/modules.py replace_version --old v0.12.0.dev --new v0.12.1.dev
31
32 optional arguments:
33 -h, --help show this help message and exit
34
35 subcommands:
36 valid subcommands
37
38 {list,print_version,replace_version}
39 additional help
40 list lists all the modules
41 print_version Check that all module versions are the same, and print it.
42 replace_version replace Cirq version in all modules
43 """
44
45 import argparse
46 import dataclasses
47 import os
48 import re
49 import sys
50 from pathlib import Path
51 from typing import List, Dict, Any, Optional
52
53 _FOLDER = 'folder'
54 _PACKAGE_PATH = 'package-path'
55 _PACKAGE = 'package'
56
57 _DEFAULT_SEARCH_DIR: Path = Path(".")
58
59
60 @dataclasses.dataclass
61 class Module:
62 root: Path
63 raw_setup: Dict[str, Any]
64
65 name: str = dataclasses.field(init=False)
66 version: str = dataclasses.field(init=False)
67 top_level_packages: List[str] = dataclasses.field(init=False)
68 top_level_package_paths: List[Path] = dataclasses.field(init=False)
69 install_requires: List[str] = dataclasses.field(init=False)
70
71 def __post_init__(self) -> None:
72 self.name = self.raw_setup['name']
73 if 'packages' in self.raw_setup:
74 self.top_level_packages = [p for p in self.raw_setup['packages'] if '.' not in p]
75 else:
76 self.top_level_packages = []
77 self.top_level_package_paths = [self.root / p for p in self.top_level_packages]
78 self.version = self.raw_setup['version']
79 self.install_requires = (
80 [] if 'install_requires' not in self.raw_setup else self.raw_setup['install_requires']
81 )
82
83
84 def list_modules(
85 search_dir: Path = _DEFAULT_SEARCH_DIR, include_parent: bool = False
86 ) -> List[Module]:
87 """Returns a list of python modules based defined by setup.py files.
88
89 Args:
90 include_parent: if true, a setup.py is expected in `search_dir`, and the corresponding
91 module will be included.
92 search_dir: the search directory for modules, by default the repo root.
93 Returns:
94 a list of `Module`s that were found, where each module `m` is initialized with `m.root`
95 relative to `search_dir`, `m.raw_setup` contains the dictionary equivalent to the
96 keyword args passed to the `setuptools.setup` method in setup.py
97 Raises:
98 ValueError: if include_parent=True but there is no setup.py in `search_dir`.
99 """
100
101 relative_folders = sorted(
102 f.relative_to(search_dir)
103 for f in search_dir.glob("*")
104 if f.is_dir() and (f / "setup.py").is_file()
105 )
106 if include_parent:
107 parent_setup_py = search_dir / "setup.py"
108 if not parent_setup_py.exists():
109 raise ValueError(f"include_parent=True, but {parent_setup_py} does not exist.")
110 relative_folders.append(Path("."))
111
112 result = [
113 Module(root=folder, raw_setup=_parse_module(search_dir / folder))
114 for folder in relative_folders
115 ]
116
117 return result
118
119
120 def get_version(search_dir: Path = _DEFAULT_SEARCH_DIR) -> Optional[str]:
121 """Check for all versions are the same and return that version.
122
123 Lists all the modules within `search_dir` (default the current working directory), checks that
124 all of them are the same version and returns that version. If no modules found, None is
125 returned, if more than one, ValueError is raised.
126
127 Args:
128 search_dir: the search directory for modules.
129 Returns:
130 None if no modules are found, the version number if exactly one version number is found.
131 Raises:
132 ValueError: if more than one version numbers are found.
133 """
134 try:
135 mods = list_modules(search_dir=search_dir, include_parent=True)
136 except ValueError:
137 return None
138 versions = {m.name: m.version for m in mods}
139 if len(set(versions.values())) > 1:
140 raise ValueError(f"Versions should be the same, instead: \n{versions}")
141 return list(set(versions.values()))[0]
142
143
144 def replace_version(search_dir: Path = _DEFAULT_SEARCH_DIR, *, old: str, new: str):
145 """Replaces the current version number with a new version number.
146
147 Args:
148 search_dir: the search directory for modules.
149 old: the current version number.
150 new: the new version number.
151 Raises:
152 ValueError: if `old` does not match the current version, or if there is not exactly one
153 version number in the found modules.
154 """
155 version = get_version(search_dir=search_dir)
156 if version != old:
157 raise ValueError(f"{old} does not match current version: {version}")
158
159 _validate_version(new)
160
161 for m in list_modules(search_dir=search_dir, include_parent=True):
162 version_file = _find_version_file(search_dir / m.root)
163 content = version_file.read_text("UTF-8")
164 new_content = content.replace(old, new)
165 version_file.write_text(new_content)
166
167
168 def _validate_version(new_version: str):
169 if not re.match(r"\d+\.\d+\.\d+(\.dev)?", new_version):
170 raise ValueError(f"{new_version} is not a valid version number.")
171
172
173 def _find_version_file(top: Path) -> Path:
174 for root, _, files in os.walk(str(top)):
175 if "_version.py" in files:
176 return Path(root) / "_version.py"
177 raise FileNotFoundError(f"Can't find _version.py in {top}.")
178
179
180 def _parse_module(folder: Path) -> Dict[str, Any]:
181 setup_args = {}
182 import setuptools
183
184 orig_setup = setuptools.setup
185 cwd = os.getcwd()
186
187 def setup(**kwargs):
188 setup_args.update(kwargs)
189
190 try:
191 setuptools.setup = setup
192 os.chdir(str(folder))
193 setup_py = open("setup.py").read()
194 exec(setup_py, globals(), {})
195 assert setup_args, f"Invalid setup.py - setup() was not called in {folder}/setup.py!"
196 return setup_args
197 except BaseException:
198 print(f"Failed to run {folder}/setup.py:")
199 raise
200 finally:
201 setuptools.setup = orig_setup
202 os.chdir(cwd)
203
204
205 ############################################
206 # CLI MANAGEMENT
207 ############################################
208
209
210 # --------------
211 # print_version
212 # --------------
213
214
215 def _print_version():
216 print(get_version())
217
218
219 def _add_print_version_cmd(subparsers):
220 print_version_cmd = subparsers.add_parser(
221 "print_version", help="Check that all module versions are the same, " "and print it."
222 )
223 print_version_cmd.set_defaults(func=_print_version)
224
225
226 # --------------
227 # replace_version
228 # --------------
229
230
231 def _replace_version(old: str, new: str):
232 replace_version(old=old, new=new)
233 print(f"Successfully replaced version {old} with {new}.")
234
235
236 def _add_replace_version_cmd(subparsers):
237 replace_version_cmd = subparsers.add_parser(
238 "replace_version", help="replace Cirq version in all modules"
239 )
240 replace_version_cmd.add_argument(
241 "--old", required=True, help="the current version to be replaced"
242 )
243 replace_version_cmd.add_argument("--new", required=True, help="the new version to be replaced")
244 replace_version_cmd.set_defaults(func=_replace_version)
245
246
247 # --------------
248 # list_modules
249 # --------------
250
251
252 def _add_list_modules_cmd(subparsers):
253 list_modules_cmd = subparsers.add_parser("list", help="lists all the modules")
254 list_modules_cmd.add_argument(
255 "--mode",
256 default=_FOLDER,
257 choices=[_FOLDER, _PACKAGE_PATH, _PACKAGE],
258 type=str,
259 help="'folder' to list root folder for module (e.g. cirq-google),\n"
260 "'package-path' for top level python package path (e.g. cirq-google/cirq_google),\n"
261 "'package' for top level python package (e.g cirq_google),\n",
262 )
263 list_modules_cmd.add_argument(
264 "--include-parent",
265 help="whether to include the parent package or not",
266 default=False,
267 action="store_true",
268 )
269 list_modules_cmd.set_defaults(func=_print_list_modules)
270
271
272 def _print_list_modules(mode: str, include_parent: bool = False):
273 """Prints certain properties of cirq modules on separate lines.
274
275 Module root folder and top level package paths are supported. The search dir is the current
276 directory.
277
278 Args:
279 mode: 'folder' lists the root folder for each module, 'package-path' lists the path to
280 the top level package(s).
281 include_parent: when true the cirq metapackage is included in the list
282 """
283 for m in list_modules(Path("."), include_parent):
284 if mode == _FOLDER:
285 print(m.root, end=" ")
286 elif mode == _PACKAGE_PATH:
287 for p in m.top_level_package_paths:
288 print(p, end=" ")
289 elif mode == _PACKAGE:
290 for package in m.top_level_packages:
291 print(package, end=" ")
292
293
294 def parse(args):
295 parser = argparse.ArgumentParser('A utility for modules.')
296 subparsers = parser.add_subparsers(
297 title='subcommands', description='valid subcommands', help='additional help'
298 )
299 _add_list_modules_cmd(subparsers)
300 _add_print_version_cmd(subparsers)
301 _add_replace_version_cmd(subparsers)
302 return parser.parse_args(args)
303
304
305 def main(argv: List[str]):
306 args = parse(argv)
307 # args.func is where we store the function to be called for a given subparser
308 # e.g. it is list_modules for the `list` subcommand
309 f = args.func
310 # however the func is not going to be needed for the function itself, so
311 # we remove it here
312 del args.func
313 f(**vars(args))
314
315
316 if __name__ == '__main__':
317 main(sys.argv[1:]) # coverage: ignore
318
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dev_tools/modules.py b/dev_tools/modules.py
--- a/dev_tools/modules.py
+++ b/dev_tools/modules.py
@@ -303,6 +303,9 @@
def main(argv: List[str]):
+ if argv == []:
+ # If no arguments are given, print the help/usage info.
+ argv = ['--help']
args = parse(argv)
# args.func is where we store the function to be called for a given subparser
# e.g. it is list_modules for the `list` subcommand
| {"golden_diff": "diff --git a/dev_tools/modules.py b/dev_tools/modules.py\n--- a/dev_tools/modules.py\n+++ b/dev_tools/modules.py\n@@ -303,6 +303,9 @@\n \n \n def main(argv: List[str]):\n+ if argv == []:\n+ # If no arguments are given, print the help/usage info.\n+ argv = ['--help']\n args = parse(argv)\n # args.func is where we store the function to be called for a given subparser\n # e.g. it is list_modules for the `list` subcommand\n", "issue": "`dev_tools/modules.py` fails with a confusing error if invoked without argument\n**Description of the issue**\r\n\r\nThe program `dev_tools/modules.py` exits with a confusing and user-unfriendly error message if it is invoked without any arguments. At the same time, it is not obvious (or at least, I haven't found it mentioned in the docs) that _not_ passing any arguments to it is unsupported. \r\n\r\n**How to reproduce the issue**\r\n\r\nIn a shell, with the current directory being the top level of the Cirq source code repo cloned from GitHub, run the following command:\r\n\r\n```\r\npython dev_tools/modules.py\r\n```\r\n\r\n<details>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/mhucka/project-files/quantum/repos/Cirq/dev_tools/modules.py\", line 317, in <module>\r\n main(sys.argv[1:]) # coverage: ignore\r\n File \"/Users/mhucka/project-files/quantum/repos/Cirq/dev_tools/modules.py\", line 309, in main\r\n f = args.func\r\nAttributeError: 'Namespace' object has no attribute 'func'\r\n```\r\n\r\n</details>\r\n\r\n**Cirq version**\r\n\r\n0.16.0.dev\r\n\r\n**Additional comments**\r\n\r\nA user coming to Cirq might reasonably try to run `python dev_tools/modules.py`, not knowing ahead of time what arguments are valid. IMHO, it would be more user-friendly if `modules.py` would at least print a help message if invoked without arguments.\r\n\n", "before_files": [{"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Utility tool for cirq modules.\n\nIt can be used as a python library for python scripts as well as a CLI tool for\nbash scripts and interactive use.\n\nFeatures:\n\nlisting modules:\n - Python: see list_modules\n\nVersion management:\n - Python: get_version and replace_version\n - CLI:\n - python3 dev_tools/modules.py print_version\n - python3 dev_tools/modules.py replace_version --old v0.12.0.dev --new v0.12.1.dev\n\noptional arguments:\n -h, --help show this help message and exit\n\nsubcommands:\n valid subcommands\n\n {list,print_version,replace_version}\n additional help\n list lists all the modules\n print_version Check that all module versions are the same, and print it.\n replace_version replace Cirq version in all modules\n\"\"\"\n\nimport argparse\nimport dataclasses\nimport os\nimport re\nimport sys\nfrom pathlib import Path\nfrom typing import List, Dict, Any, Optional\n\n_FOLDER = 'folder'\n_PACKAGE_PATH = 'package-path'\n_PACKAGE = 'package'\n\n_DEFAULT_SEARCH_DIR: Path = Path(\".\")\n\n\[email protected]\nclass Module:\n root: Path\n raw_setup: Dict[str, Any]\n\n name: str = dataclasses.field(init=False)\n version: str = dataclasses.field(init=False)\n top_level_packages: List[str] = dataclasses.field(init=False)\n top_level_package_paths: List[Path] = dataclasses.field(init=False)\n install_requires: List[str] = dataclasses.field(init=False)\n\n def __post_init__(self) -> None:\n self.name = self.raw_setup['name']\n if 'packages' in self.raw_setup:\n self.top_level_packages = [p for p in self.raw_setup['packages'] if '.' not in p]\n else:\n self.top_level_packages = []\n self.top_level_package_paths = [self.root / p for p in self.top_level_packages]\n self.version = self.raw_setup['version']\n self.install_requires = (\n [] if 'install_requires' not in self.raw_setup else self.raw_setup['install_requires']\n )\n\n\ndef list_modules(\n search_dir: Path = _DEFAULT_SEARCH_DIR, include_parent: bool = False\n) -> List[Module]:\n \"\"\"Returns a list of python modules based defined by setup.py files.\n\n Args:\n include_parent: if true, a setup.py is expected in `search_dir`, and the corresponding\n module will be included.\n search_dir: the search directory for modules, by default the repo root.\n Returns:\n a list of `Module`s that were found, where each module `m` is initialized with `m.root`\n relative to `search_dir`, `m.raw_setup` contains the dictionary equivalent to the\n keyword args passed to the `setuptools.setup` method in setup.py\n Raises:\n ValueError: if include_parent=True but there is no setup.py in `search_dir`.\n \"\"\"\n\n relative_folders = sorted(\n f.relative_to(search_dir)\n for f in search_dir.glob(\"*\")\n if f.is_dir() and (f / \"setup.py\").is_file()\n )\n if include_parent:\n parent_setup_py = search_dir / \"setup.py\"\n if not parent_setup_py.exists():\n raise ValueError(f\"include_parent=True, but {parent_setup_py} does not exist.\")\n relative_folders.append(Path(\".\"))\n\n result = [\n Module(root=folder, raw_setup=_parse_module(search_dir / folder))\n for folder in relative_folders\n ]\n\n return result\n\n\ndef get_version(search_dir: Path = _DEFAULT_SEARCH_DIR) -> Optional[str]:\n \"\"\"Check for all versions are the same and return that version.\n\n Lists all the modules within `search_dir` (default the current working directory), checks that\n all of them are the same version and returns that version. If no modules found, None is\n returned, if more than one, ValueError is raised.\n\n Args:\n search_dir: the search directory for modules.\n Returns:\n None if no modules are found, the version number if exactly one version number is found.\n Raises:\n ValueError: if more than one version numbers are found.\n \"\"\"\n try:\n mods = list_modules(search_dir=search_dir, include_parent=True)\n except ValueError:\n return None\n versions = {m.name: m.version for m in mods}\n if len(set(versions.values())) > 1:\n raise ValueError(f\"Versions should be the same, instead: \\n{versions}\")\n return list(set(versions.values()))[0]\n\n\ndef replace_version(search_dir: Path = _DEFAULT_SEARCH_DIR, *, old: str, new: str):\n \"\"\"Replaces the current version number with a new version number.\n\n Args:\n search_dir: the search directory for modules.\n old: the current version number.\n new: the new version number.\n Raises:\n ValueError: if `old` does not match the current version, or if there is not exactly one\n version number in the found modules.\n \"\"\"\n version = get_version(search_dir=search_dir)\n if version != old:\n raise ValueError(f\"{old} does not match current version: {version}\")\n\n _validate_version(new)\n\n for m in list_modules(search_dir=search_dir, include_parent=True):\n version_file = _find_version_file(search_dir / m.root)\n content = version_file.read_text(\"UTF-8\")\n new_content = content.replace(old, new)\n version_file.write_text(new_content)\n\n\ndef _validate_version(new_version: str):\n if not re.match(r\"\\d+\\.\\d+\\.\\d+(\\.dev)?\", new_version):\n raise ValueError(f\"{new_version} is not a valid version number.\")\n\n\ndef _find_version_file(top: Path) -> Path:\n for root, _, files in os.walk(str(top)):\n if \"_version.py\" in files:\n return Path(root) / \"_version.py\"\n raise FileNotFoundError(f\"Can't find _version.py in {top}.\")\n\n\ndef _parse_module(folder: Path) -> Dict[str, Any]:\n setup_args = {}\n import setuptools\n\n orig_setup = setuptools.setup\n cwd = os.getcwd()\n\n def setup(**kwargs):\n setup_args.update(kwargs)\n\n try:\n setuptools.setup = setup\n os.chdir(str(folder))\n setup_py = open(\"setup.py\").read()\n exec(setup_py, globals(), {})\n assert setup_args, f\"Invalid setup.py - setup() was not called in {folder}/setup.py!\"\n return setup_args\n except BaseException:\n print(f\"Failed to run {folder}/setup.py:\")\n raise\n finally:\n setuptools.setup = orig_setup\n os.chdir(cwd)\n\n\n############################################\n# CLI MANAGEMENT\n############################################\n\n\n# --------------\n# print_version\n# --------------\n\n\ndef _print_version():\n print(get_version())\n\n\ndef _add_print_version_cmd(subparsers):\n print_version_cmd = subparsers.add_parser(\n \"print_version\", help=\"Check that all module versions are the same, \" \"and print it.\"\n )\n print_version_cmd.set_defaults(func=_print_version)\n\n\n# --------------\n# replace_version\n# --------------\n\n\ndef _replace_version(old: str, new: str):\n replace_version(old=old, new=new)\n print(f\"Successfully replaced version {old} with {new}.\")\n\n\ndef _add_replace_version_cmd(subparsers):\n replace_version_cmd = subparsers.add_parser(\n \"replace_version\", help=\"replace Cirq version in all modules\"\n )\n replace_version_cmd.add_argument(\n \"--old\", required=True, help=\"the current version to be replaced\"\n )\n replace_version_cmd.add_argument(\"--new\", required=True, help=\"the new version to be replaced\")\n replace_version_cmd.set_defaults(func=_replace_version)\n\n\n# --------------\n# list_modules\n# --------------\n\n\ndef _add_list_modules_cmd(subparsers):\n list_modules_cmd = subparsers.add_parser(\"list\", help=\"lists all the modules\")\n list_modules_cmd.add_argument(\n \"--mode\",\n default=_FOLDER,\n choices=[_FOLDER, _PACKAGE_PATH, _PACKAGE],\n type=str,\n help=\"'folder' to list root folder for module (e.g. cirq-google),\\n\"\n \"'package-path' for top level python package path (e.g. cirq-google/cirq_google),\\n\"\n \"'package' for top level python package (e.g cirq_google),\\n\",\n )\n list_modules_cmd.add_argument(\n \"--include-parent\",\n help=\"whether to include the parent package or not\",\n default=False,\n action=\"store_true\",\n )\n list_modules_cmd.set_defaults(func=_print_list_modules)\n\n\ndef _print_list_modules(mode: str, include_parent: bool = False):\n \"\"\"Prints certain properties of cirq modules on separate lines.\n\n Module root folder and top level package paths are supported. The search dir is the current\n directory.\n\n Args:\n mode: 'folder' lists the root folder for each module, 'package-path' lists the path to\n the top level package(s).\n include_parent: when true the cirq metapackage is included in the list\n \"\"\"\n for m in list_modules(Path(\".\"), include_parent):\n if mode == _FOLDER:\n print(m.root, end=\" \")\n elif mode == _PACKAGE_PATH:\n for p in m.top_level_package_paths:\n print(p, end=\" \")\n elif mode == _PACKAGE:\n for package in m.top_level_packages:\n print(package, end=\" \")\n\n\ndef parse(args):\n parser = argparse.ArgumentParser('A utility for modules.')\n subparsers = parser.add_subparsers(\n title='subcommands', description='valid subcommands', help='additional help'\n )\n _add_list_modules_cmd(subparsers)\n _add_print_version_cmd(subparsers)\n _add_replace_version_cmd(subparsers)\n return parser.parse_args(args)\n\n\ndef main(argv: List[str]):\n args = parse(argv)\n # args.func is where we store the function to be called for a given subparser\n # e.g. it is list_modules for the `list` subcommand\n f = args.func\n # however the func is not going to be needed for the function itself, so\n # we remove it here\n del args.func\n f(**vars(args))\n\n\nif __name__ == '__main__':\n main(sys.argv[1:]) # coverage: ignore\n", "path": "dev_tools/modules.py"}], "after_files": [{"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Utility tool for cirq modules.\n\nIt can be used as a python library for python scripts as well as a CLI tool for\nbash scripts and interactive use.\n\nFeatures:\n\nlisting modules:\n - Python: see list_modules\n\nVersion management:\n - Python: get_version and replace_version\n - CLI:\n - python3 dev_tools/modules.py print_version\n - python3 dev_tools/modules.py replace_version --old v0.12.0.dev --new v0.12.1.dev\n\noptional arguments:\n -h, --help show this help message and exit\n\nsubcommands:\n valid subcommands\n\n {list,print_version,replace_version}\n additional help\n list lists all the modules\n print_version Check that all module versions are the same, and print it.\n replace_version replace Cirq version in all modules\n\"\"\"\n\nimport argparse\nimport dataclasses\nimport os\nimport re\nimport sys\nfrom pathlib import Path\nfrom typing import List, Dict, Any, Optional\n\n_FOLDER = 'folder'\n_PACKAGE_PATH = 'package-path'\n_PACKAGE = 'package'\n\n_DEFAULT_SEARCH_DIR: Path = Path(\".\")\n\n\[email protected]\nclass Module:\n root: Path\n raw_setup: Dict[str, Any]\n\n name: str = dataclasses.field(init=False)\n version: str = dataclasses.field(init=False)\n top_level_packages: List[str] = dataclasses.field(init=False)\n top_level_package_paths: List[Path] = dataclasses.field(init=False)\n install_requires: List[str] = dataclasses.field(init=False)\n\n def __post_init__(self) -> None:\n self.name = self.raw_setup['name']\n if 'packages' in self.raw_setup:\n self.top_level_packages = [p for p in self.raw_setup['packages'] if '.' not in p]\n else:\n self.top_level_packages = []\n self.top_level_package_paths = [self.root / p for p in self.top_level_packages]\n self.version = self.raw_setup['version']\n self.install_requires = (\n [] if 'install_requires' not in self.raw_setup else self.raw_setup['install_requires']\n )\n\n\ndef list_modules(\n search_dir: Path = _DEFAULT_SEARCH_DIR, include_parent: bool = False\n) -> List[Module]:\n \"\"\"Returns a list of python modules based defined by setup.py files.\n\n Args:\n include_parent: if true, a setup.py is expected in `search_dir`, and the corresponding\n module will be included.\n search_dir: the search directory for modules, by default the repo root.\n Returns:\n a list of `Module`s that were found, where each module `m` is initialized with `m.root`\n relative to `search_dir`, `m.raw_setup` contains the dictionary equivalent to the\n keyword args passed to the `setuptools.setup` method in setup.py\n Raises:\n ValueError: if include_parent=True but there is no setup.py in `search_dir`.\n \"\"\"\n\n relative_folders = sorted(\n f.relative_to(search_dir)\n for f in search_dir.glob(\"*\")\n if f.is_dir() and (f / \"setup.py\").is_file()\n )\n if include_parent:\n parent_setup_py = search_dir / \"setup.py\"\n if not parent_setup_py.exists():\n raise ValueError(f\"include_parent=True, but {parent_setup_py} does not exist.\")\n relative_folders.append(Path(\".\"))\n\n result = [\n Module(root=folder, raw_setup=_parse_module(search_dir / folder))\n for folder in relative_folders\n ]\n\n return result\n\n\ndef get_version(search_dir: Path = _DEFAULT_SEARCH_DIR) -> Optional[str]:\n \"\"\"Check for all versions are the same and return that version.\n\n Lists all the modules within `search_dir` (default the current working directory), checks that\n all of them are the same version and returns that version. If no modules found, None is\n returned, if more than one, ValueError is raised.\n\n Args:\n search_dir: the search directory for modules.\n Returns:\n None if no modules are found, the version number if exactly one version number is found.\n Raises:\n ValueError: if more than one version numbers are found.\n \"\"\"\n try:\n mods = list_modules(search_dir=search_dir, include_parent=True)\n except ValueError:\n return None\n versions = {m.name: m.version for m in mods}\n if len(set(versions.values())) > 1:\n raise ValueError(f\"Versions should be the same, instead: \\n{versions}\")\n return list(set(versions.values()))[0]\n\n\ndef replace_version(search_dir: Path = _DEFAULT_SEARCH_DIR, *, old: str, new: str):\n \"\"\"Replaces the current version number with a new version number.\n\n Args:\n search_dir: the search directory for modules.\n old: the current version number.\n new: the new version number.\n Raises:\n ValueError: if `old` does not match the current version, or if there is not exactly one\n version number in the found modules.\n \"\"\"\n version = get_version(search_dir=search_dir)\n if version != old:\n raise ValueError(f\"{old} does not match current version: {version}\")\n\n _validate_version(new)\n\n for m in list_modules(search_dir=search_dir, include_parent=True):\n version_file = _find_version_file(search_dir / m.root)\n content = version_file.read_text(\"UTF-8\")\n new_content = content.replace(old, new)\n version_file.write_text(new_content)\n\n\ndef _validate_version(new_version: str):\n if not re.match(r\"\\d+\\.\\d+\\.\\d+(\\.dev)?\", new_version):\n raise ValueError(f\"{new_version} is not a valid version number.\")\n\n\ndef _find_version_file(top: Path) -> Path:\n for root, _, files in os.walk(str(top)):\n if \"_version.py\" in files:\n return Path(root) / \"_version.py\"\n raise FileNotFoundError(f\"Can't find _version.py in {top}.\")\n\n\ndef _parse_module(folder: Path) -> Dict[str, Any]:\n setup_args = {}\n import setuptools\n\n orig_setup = setuptools.setup\n cwd = os.getcwd()\n\n def setup(**kwargs):\n setup_args.update(kwargs)\n\n try:\n setuptools.setup = setup\n os.chdir(str(folder))\n setup_py = open(\"setup.py\").read()\n exec(setup_py, globals(), {})\n assert setup_args, f\"Invalid setup.py - setup() was not called in {folder}/setup.py!\"\n return setup_args\n except BaseException:\n print(f\"Failed to run {folder}/setup.py:\")\n raise\n finally:\n setuptools.setup = orig_setup\n os.chdir(cwd)\n\n\n############################################\n# CLI MANAGEMENT\n############################################\n\n\n# --------------\n# print_version\n# --------------\n\n\ndef _print_version():\n print(get_version())\n\n\ndef _add_print_version_cmd(subparsers):\n print_version_cmd = subparsers.add_parser(\n \"print_version\", help=\"Check that all module versions are the same, \" \"and print it.\"\n )\n print_version_cmd.set_defaults(func=_print_version)\n\n\n# --------------\n# replace_version\n# --------------\n\n\ndef _replace_version(old: str, new: str):\n replace_version(old=old, new=new)\n print(f\"Successfully replaced version {old} with {new}.\")\n\n\ndef _add_replace_version_cmd(subparsers):\n replace_version_cmd = subparsers.add_parser(\n \"replace_version\", help=\"replace Cirq version in all modules\"\n )\n replace_version_cmd.add_argument(\n \"--old\", required=True, help=\"the current version to be replaced\"\n )\n replace_version_cmd.add_argument(\"--new\", required=True, help=\"the new version to be replaced\")\n replace_version_cmd.set_defaults(func=_replace_version)\n\n\n# --------------\n# list_modules\n# --------------\n\n\ndef _add_list_modules_cmd(subparsers):\n list_modules_cmd = subparsers.add_parser(\"list\", help=\"lists all the modules\")\n list_modules_cmd.add_argument(\n \"--mode\",\n default=_FOLDER,\n choices=[_FOLDER, _PACKAGE_PATH, _PACKAGE],\n type=str,\n help=\"'folder' to list root folder for module (e.g. cirq-google),\\n\"\n \"'package-path' for top level python package path (e.g. cirq-google/cirq_google),\\n\"\n \"'package' for top level python package (e.g cirq_google),\\n\",\n )\n list_modules_cmd.add_argument(\n \"--include-parent\",\n help=\"whether to include the parent package or not\",\n default=False,\n action=\"store_true\",\n )\n list_modules_cmd.set_defaults(func=_print_list_modules)\n\n\ndef _print_list_modules(mode: str, include_parent: bool = False):\n \"\"\"Prints certain properties of cirq modules on separate lines.\n\n Module root folder and top level package paths are supported. The search dir is the current\n directory.\n\n Args:\n mode: 'folder' lists the root folder for each module, 'package-path' lists the path to\n the top level package(s).\n include_parent: when true the cirq metapackage is included in the list\n \"\"\"\n for m in list_modules(Path(\".\"), include_parent):\n if mode == _FOLDER:\n print(m.root, end=\" \")\n elif mode == _PACKAGE_PATH:\n for p in m.top_level_package_paths:\n print(p, end=\" \")\n elif mode == _PACKAGE:\n for package in m.top_level_packages:\n print(package, end=\" \")\n\n\ndef parse(args):\n parser = argparse.ArgumentParser('A utility for modules.')\n subparsers = parser.add_subparsers(\n title='subcommands', description='valid subcommands', help='additional help'\n )\n _add_list_modules_cmd(subparsers)\n _add_print_version_cmd(subparsers)\n _add_replace_version_cmd(subparsers)\n return parser.parse_args(args)\n\n\ndef main(argv: List[str]):\n if argv == []:\n # If no arguments are given, print the help/usage info.\n argv = ['--help']\n args = parse(argv)\n # args.func is where we store the function to be called for a given subparser\n # e.g. it is list_modules for the `list` subcommand\n f = args.func\n # however the func is not going to be needed for the function itself, so\n # we remove it here\n del args.func\n f(**vars(args))\n\n\nif __name__ == '__main__':\n main(sys.argv[1:]) # coverage: ignore\n", "path": "dev_tools/modules.py"}]} | 3,828 | 122 |
gh_patches_debug_4894 | rasdani/github-patches | git_diff | streamlink__streamlink-5369 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.pluzz: La 1ere subdomain from francetvinfo.fr is not supported
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
VODs and 24/7 live from the La 1ere francetvinfo.fr subdomain (https://la1ere.francetvinfo.fr) are not supported by the Pluzz plugin. Find the original .m3u8 works for the live, but the link changes every (at least) 2 hours.
First log with VOD : https://la1ere.francetvinfo.fr/programme-video/la1ere_mitterrand-et-l-outre-mer/diffusion/2976069-espoir-et-promesses.html
Second log with live 24/7 : https://la1ere.francetvinfo.fr/info-en-continu-24-24 (HTTP referer needed for playing it)
French IP needed for playing both.
### Debug log
```text
streamlink --loglevel=debug https://la1ere.francetvinfo.fr/programme-video/la1ere_mitterrand-et-l-outre-mer/diffusion/2976069-espoir-et-promesses.html best
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.11.3
[cli][debug] Streamlink: 5.5.1
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.5.7
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.2
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.17
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.29.0
[cli][debug] urllib3: 1.26.15
[cli][debug] websocket-client: 1.5.1
[cli][debug] Arguments:
[cli][debug] url=https://la1ere.francetvinfo.fr/programme-video/la1ere_mitterrand-et-l-outre-mer/diffusion/2976069-espoir-et-promesses.html
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][debug] --player="D:\Program Files\VideoLAN\VLC\vlc.exe" --file-caching=5000
[cli][debug] --ffmpeg-ffmpeg=C:\Users\Orion\AppData\Local\Programs\Streamlink\ffmpeg\ffmpeg.exe
[cli][info] Found matching plugin pluzz for URL https://la1ere.francetvinfo.fr/programme-video/la1ere_mitterrand-et-l-outre-mer/diffusion/2976069-espoir-et-promesses.html
[plugins.pluzz][debug] Country: FR
error: No playable streams found on this URL: https://la1ere.francetvinfo.fr/programme-video/la1ere_mitterrand-et-l-outre-mer/diffusion/2976069-espoir-et-promesses.html
streamlink --loglevel=debug --http-header "Referer= https://la1ere.francetvinfo.fr" https://la1ere.francetvinfo.fr/info-en-continu-24-24 best
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.11.3
[cli][debug] Streamlink: 5.5.1
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.5.7
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.2
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.17
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.29.0
[cli][debug] urllib3: 1.26.15
[cli][debug] websocket-client: 1.5.1
[cli][debug] Arguments:
[cli][debug] url=https://la1ere.francetvinfo.fr/info-en-continu-24-24
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][debug] --player="D:\Program Files\VideoLAN\VLC\vlc.exe" --file-caching=5000
[cli][debug] --ffmpeg-ffmpeg=C:\Users\Orion\AppData\Local\Programs\Streamlink\ffmpeg\ffmpeg.exe
[cli][debug] --http-header=[('Referer', 'https://la1ere.francetvinfo.fr')]
[cli][info] Found matching plugin pluzz for URL https://la1ere.francetvinfo.fr/info-en-continu-24-24
[plugins.pluzz][debug] Country: FR
error: No playable streams found on this URL: https://la1ere.francetvinfo.fr/info-en-continu-24-24
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/pluzz.py`
Content:
```
1 """
2 $description Live TV channels and video on-demand service from france.tv, a French public, state-owned broadcaster.
3 $url france.tv
4 $url francetvinfo.fr
5 $type live, vod
6 $region France, Andorra, Monaco
7 """
8
9 import logging
10 import re
11 from urllib.parse import urlparse
12
13 from streamlink.plugin import Plugin, PluginError, pluginmatcher
14 from streamlink.plugin.api import useragents, validate
15 from streamlink.stream.dash import DASHStream
16 from streamlink.stream.hls import HLSStream
17 from streamlink.utils.times import localnow
18 from streamlink.utils.url import update_qsd
19
20
21 log = logging.getLogger(__name__)
22
23
24 @pluginmatcher(re.compile(r"""
25 https?://(?:
26 (?:www\.)?france\.tv/
27 |
28 (?:.+\.)?francetvinfo\.fr/
29 )
30 """, re.VERBOSE))
31 class Pluzz(Plugin):
32 PLAYER_VERSION = "5.51.35"
33 GEO_URL = "https://geoftv-a.akamaihd.net/ws/edgescape.json"
34 API_URL = "https://player.webservices.francetelevisions.fr/v1/videos/{video_id}"
35
36 def _get_streams(self):
37 self.session.http.headers.update({
38 "User-Agent": useragents.CHROME,
39 })
40 CHROME_VERSION = re.compile(r"Chrome/(\d+)").search(useragents.CHROME).group(1)
41
42 # Retrieve geolocation data
43 country_code = self.session.http.get(self.GEO_URL, schema=validate.Schema(
44 validate.parse_json(),
45 {"reponse": {"geo_info": {
46 "country_code": str,
47 }}},
48 validate.get(("reponse", "geo_info", "country_code")),
49 ))
50 log.debug(f"Country: {country_code}")
51
52 # Retrieve URL page and search for video ID
53 video_id = None
54 try:
55 video_id = self.session.http.get(self.url, schema=validate.Schema(
56 validate.parse_html(),
57 validate.any(
58 validate.all(
59 validate.xml_xpath_string(".//script[contains(text(),'window.FTVPlayerVideos')][1]/text()"),
60 str,
61 validate.regex(re.compile(
62 r"window\.FTVPlayerVideos\s*=\s*(?P<json>\[{.+?}])\s*;\s*(?:$|var)",
63 re.DOTALL,
64 )),
65 validate.get("json"),
66 validate.parse_json(),
67 [{"videoId": str}],
68 validate.get((0, "videoId")),
69 ),
70 validate.all(
71 validate.xml_xpath_string(".//script[contains(text(),'new Magnetoscope')][1]/text()"),
72 str,
73 validate.regex(re.compile(
74 r"""player\.load\s*\(\s*{\s*src\s*:\s*(?P<q>['"])(?P<video_id>.+?)(?P=q)\s*}\s*\)\s*;""",
75 )),
76 validate.get("video_id"),
77 ),
78 validate.all(
79 validate.xml_xpath_string(".//*[@id][contains(@class,'francetv-player-wrapper')][1]/@id"),
80 str,
81 ),
82 validate.all(
83 validate.xml_xpath_string(".//*[@data-id][@class='magneto'][1]/@data-id"),
84 str,
85 ),
86 ),
87 ))
88 except PluginError:
89 pass
90 if not video_id:
91 return
92 log.debug(f"Video ID: {video_id}")
93
94 api_url = update_qsd(self.API_URL.format(video_id=video_id), {
95 "country_code": country_code,
96 "w": 1920,
97 "h": 1080,
98 "player_version": self.PLAYER_VERSION,
99 "domain": urlparse(self.url).netloc,
100 "device_type": "mobile",
101 "browser": "chrome",
102 "browser_version": CHROME_VERSION,
103 "os": "ios",
104 "gmt": localnow().strftime("%z"),
105 })
106 video_format, token_url, url, self.title = self.session.http.get(api_url, schema=validate.Schema(
107 validate.parse_json(),
108 {
109 "video": {
110 "workflow": validate.any("token-akamai", "dai"),
111 "format": validate.any("dash", "hls"),
112 "token": validate.url(),
113 "url": validate.url(),
114 },
115 "meta": {
116 "title": str,
117 },
118 },
119 validate.union_get(
120 ("video", "format"),
121 ("video", "token"),
122 ("video", "url"),
123 ("meta", "title"),
124 ),
125 ))
126
127 data_url = update_qsd(token_url, {
128 "url": url,
129 })
130 video_url = self.session.http.get(data_url, schema=validate.Schema(
131 validate.parse_json(),
132 {"url": validate.url()},
133 validate.get("url"),
134 ))
135
136 if video_format == "dash":
137 yield from DASHStream.parse_manifest(self.session, video_url).items()
138 elif video_format == "hls":
139 yield from HLSStream.parse_variant_playlist(self.session, video_url).items()
140
141
142 __plugin__ = Pluzz
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/pluzz.py b/src/streamlink/plugins/pluzz.py
--- a/src/streamlink/plugins/pluzz.py
+++ b/src/streamlink/plugins/pluzz.py
@@ -80,7 +80,7 @@
str,
),
validate.all(
- validate.xml_xpath_string(".//*[@data-id][@class='magneto'][1]/@data-id"),
+ validate.xml_xpath_string(".//*[@data-id][contains(@class,'magneto')][1]/@data-id"),
str,
),
),
| {"golden_diff": "diff --git a/src/streamlink/plugins/pluzz.py b/src/streamlink/plugins/pluzz.py\n--- a/src/streamlink/plugins/pluzz.py\n+++ b/src/streamlink/plugins/pluzz.py\n@@ -80,7 +80,7 @@\n str,\n ),\n validate.all(\n- validate.xml_xpath_string(\".//*[@data-id][@class='magneto'][1]/@data-id\"),\n+ validate.xml_xpath_string(\".//*[@data-id][contains(@class,'magneto')][1]/@data-id\"),\n str,\n ),\n ),\n", "issue": "plugins.pluzz: La 1ere subdomain from francetvinfo.fr is not supported\n### Checklist\r\n\r\n- [X] This is a plugin issue and not a different kind of issue\r\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\r\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\r\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\r\n\r\n### Streamlink version\r\n\r\nLatest stable release\r\n\r\n### Description\r\n\r\nVODs and 24/7 live from the La 1ere francetvinfo.fr subdomain (https://la1ere.francetvinfo.fr) are not supported by the Pluzz plugin. Find the original .m3u8 works for the live, but the link changes every (at least) 2 hours.\r\n\r\nFirst log with VOD : https://la1ere.francetvinfo.fr/programme-video/la1ere_mitterrand-et-l-outre-mer/diffusion/2976069-espoir-et-promesses.html\r\n\r\nSecond log with live 24/7 : https://la1ere.francetvinfo.fr/info-en-continu-24-24 (HTTP referer needed for playing it)\r\n\r\nFrench IP needed for playing both.\r\n\r\n### Debug log\r\n\r\n```text\r\nstreamlink --loglevel=debug https://la1ere.francetvinfo.fr/programme-video/la1ere_mitterrand-et-l-outre-mer/diffusion/2976069-espoir-et-promesses.html best\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.11.3\r\n[cli][debug] Streamlink: 5.5.1\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.5.7\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.2\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.17\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.29.0\r\n[cli][debug] urllib3: 1.26.15\r\n[cli][debug] websocket-client: 1.5.1\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://la1ere.francetvinfo.fr/programme-video/la1ere_mitterrand-et-l-outre-mer/diffusion/2976069-espoir-et-promesses.html\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --player=\"D:\\Program Files\\VideoLAN\\VLC\\vlc.exe\" --file-caching=5000\r\n[cli][debug] --ffmpeg-ffmpeg=C:\\Users\\Orion\\AppData\\Local\\Programs\\Streamlink\\ffmpeg\\ffmpeg.exe\r\n[cli][info] Found matching plugin pluzz for URL https://la1ere.francetvinfo.fr/programme-video/la1ere_mitterrand-et-l-outre-mer/diffusion/2976069-espoir-et-promesses.html\r\n[plugins.pluzz][debug] Country: FR\r\nerror: No playable streams found on this URL: https://la1ere.francetvinfo.fr/programme-video/la1ere_mitterrand-et-l-outre-mer/diffusion/2976069-espoir-et-promesses.html\r\n\r\nstreamlink --loglevel=debug --http-header \"Referer= https://la1ere.francetvinfo.fr\" https://la1ere.francetvinfo.fr/info-en-continu-24-24 best\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.11.3\r\n[cli][debug] Streamlink: 5.5.1\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.5.7\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.2\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.17\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.29.0\r\n[cli][debug] urllib3: 1.26.15\r\n[cli][debug] websocket-client: 1.5.1\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://la1ere.francetvinfo.fr/info-en-continu-24-24\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --player=\"D:\\Program Files\\VideoLAN\\VLC\\vlc.exe\" --file-caching=5000\r\n[cli][debug] --ffmpeg-ffmpeg=C:\\Users\\Orion\\AppData\\Local\\Programs\\Streamlink\\ffmpeg\\ffmpeg.exe\r\n[cli][debug] --http-header=[('Referer', 'https://la1ere.francetvinfo.fr')]\r\n[cli][info] Found matching plugin pluzz for URL https://la1ere.francetvinfo.fr/info-en-continu-24-24\r\n[plugins.pluzz][debug] Country: FR\r\nerror: No playable streams found on this URL: https://la1ere.francetvinfo.fr/info-en-continu-24-24\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n$description Live TV channels and video on-demand service from france.tv, a French public, state-owned broadcaster.\n$url france.tv\n$url francetvinfo.fr\n$type live, vod\n$region France, Andorra, Monaco\n\"\"\"\n\nimport logging\nimport re\nfrom urllib.parse import urlparse\n\nfrom streamlink.plugin import Plugin, PluginError, pluginmatcher\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.times import localnow\nfrom streamlink.utils.url import update_qsd\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"\"\"\n https?://(?:\n (?:www\\.)?france\\.tv/\n |\n (?:.+\\.)?francetvinfo\\.fr/\n )\n\"\"\", re.VERBOSE))\nclass Pluzz(Plugin):\n PLAYER_VERSION = \"5.51.35\"\n GEO_URL = \"https://geoftv-a.akamaihd.net/ws/edgescape.json\"\n API_URL = \"https://player.webservices.francetelevisions.fr/v1/videos/{video_id}\"\n\n def _get_streams(self):\n self.session.http.headers.update({\n \"User-Agent\": useragents.CHROME,\n })\n CHROME_VERSION = re.compile(r\"Chrome/(\\d+)\").search(useragents.CHROME).group(1)\n\n # Retrieve geolocation data\n country_code = self.session.http.get(self.GEO_URL, schema=validate.Schema(\n validate.parse_json(),\n {\"reponse\": {\"geo_info\": {\n \"country_code\": str,\n }}},\n validate.get((\"reponse\", \"geo_info\", \"country_code\")),\n ))\n log.debug(f\"Country: {country_code}\")\n\n # Retrieve URL page and search for video ID\n video_id = None\n try:\n video_id = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.any(\n validate.all(\n validate.xml_xpath_string(\".//script[contains(text(),'window.FTVPlayerVideos')][1]/text()\"),\n str,\n validate.regex(re.compile(\n r\"window\\.FTVPlayerVideos\\s*=\\s*(?P<json>\\[{.+?}])\\s*;\\s*(?:$|var)\",\n re.DOTALL,\n )),\n validate.get(\"json\"),\n validate.parse_json(),\n [{\"videoId\": str}],\n validate.get((0, \"videoId\")),\n ),\n validate.all(\n validate.xml_xpath_string(\".//script[contains(text(),'new Magnetoscope')][1]/text()\"),\n str,\n validate.regex(re.compile(\n r\"\"\"player\\.load\\s*\\(\\s*{\\s*src\\s*:\\s*(?P<q>['\"])(?P<video_id>.+?)(?P=q)\\s*}\\s*\\)\\s*;\"\"\",\n )),\n validate.get(\"video_id\"),\n ),\n validate.all(\n validate.xml_xpath_string(\".//*[@id][contains(@class,'francetv-player-wrapper')][1]/@id\"),\n str,\n ),\n validate.all(\n validate.xml_xpath_string(\".//*[@data-id][@class='magneto'][1]/@data-id\"),\n str,\n ),\n ),\n ))\n except PluginError:\n pass\n if not video_id:\n return\n log.debug(f\"Video ID: {video_id}\")\n\n api_url = update_qsd(self.API_URL.format(video_id=video_id), {\n \"country_code\": country_code,\n \"w\": 1920,\n \"h\": 1080,\n \"player_version\": self.PLAYER_VERSION,\n \"domain\": urlparse(self.url).netloc,\n \"device_type\": \"mobile\",\n \"browser\": \"chrome\",\n \"browser_version\": CHROME_VERSION,\n \"os\": \"ios\",\n \"gmt\": localnow().strftime(\"%z\"),\n })\n video_format, token_url, url, self.title = self.session.http.get(api_url, schema=validate.Schema(\n validate.parse_json(),\n {\n \"video\": {\n \"workflow\": validate.any(\"token-akamai\", \"dai\"),\n \"format\": validate.any(\"dash\", \"hls\"),\n \"token\": validate.url(),\n \"url\": validate.url(),\n },\n \"meta\": {\n \"title\": str,\n },\n },\n validate.union_get(\n (\"video\", \"format\"),\n (\"video\", \"token\"),\n (\"video\", \"url\"),\n (\"meta\", \"title\"),\n ),\n ))\n\n data_url = update_qsd(token_url, {\n \"url\": url,\n })\n video_url = self.session.http.get(data_url, schema=validate.Schema(\n validate.parse_json(),\n {\"url\": validate.url()},\n validate.get(\"url\"),\n ))\n\n if video_format == \"dash\":\n yield from DASHStream.parse_manifest(self.session, video_url).items()\n elif video_format == \"hls\":\n yield from HLSStream.parse_variant_playlist(self.session, video_url).items()\n\n\n__plugin__ = Pluzz\n", "path": "src/streamlink/plugins/pluzz.py"}], "after_files": [{"content": "\"\"\"\n$description Live TV channels and video on-demand service from france.tv, a French public, state-owned broadcaster.\n$url france.tv\n$url francetvinfo.fr\n$type live, vod\n$region France, Andorra, Monaco\n\"\"\"\n\nimport logging\nimport re\nfrom urllib.parse import urlparse\n\nfrom streamlink.plugin import Plugin, PluginError, pluginmatcher\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.times import localnow\nfrom streamlink.utils.url import update_qsd\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"\"\"\n https?://(?:\n (?:www\\.)?france\\.tv/\n |\n (?:.+\\.)?francetvinfo\\.fr/\n )\n\"\"\", re.VERBOSE))\nclass Pluzz(Plugin):\n PLAYER_VERSION = \"5.51.35\"\n GEO_URL = \"https://geoftv-a.akamaihd.net/ws/edgescape.json\"\n API_URL = \"https://player.webservices.francetelevisions.fr/v1/videos/{video_id}\"\n\n def _get_streams(self):\n self.session.http.headers.update({\n \"User-Agent\": useragents.CHROME,\n })\n CHROME_VERSION = re.compile(r\"Chrome/(\\d+)\").search(useragents.CHROME).group(1)\n\n # Retrieve geolocation data\n country_code = self.session.http.get(self.GEO_URL, schema=validate.Schema(\n validate.parse_json(),\n {\"reponse\": {\"geo_info\": {\n \"country_code\": str,\n }}},\n validate.get((\"reponse\", \"geo_info\", \"country_code\")),\n ))\n log.debug(f\"Country: {country_code}\")\n\n # Retrieve URL page and search for video ID\n video_id = None\n try:\n video_id = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.any(\n validate.all(\n validate.xml_xpath_string(\".//script[contains(text(),'window.FTVPlayerVideos')][1]/text()\"),\n str,\n validate.regex(re.compile(\n r\"window\\.FTVPlayerVideos\\s*=\\s*(?P<json>\\[{.+?}])\\s*;\\s*(?:$|var)\",\n re.DOTALL,\n )),\n validate.get(\"json\"),\n validate.parse_json(),\n [{\"videoId\": str}],\n validate.get((0, \"videoId\")),\n ),\n validate.all(\n validate.xml_xpath_string(\".//script[contains(text(),'new Magnetoscope')][1]/text()\"),\n str,\n validate.regex(re.compile(\n r\"\"\"player\\.load\\s*\\(\\s*{\\s*src\\s*:\\s*(?P<q>['\"])(?P<video_id>.+?)(?P=q)\\s*}\\s*\\)\\s*;\"\"\",\n )),\n validate.get(\"video_id\"),\n ),\n validate.all(\n validate.xml_xpath_string(\".//*[@id][contains(@class,'francetv-player-wrapper')][1]/@id\"),\n str,\n ),\n validate.all(\n validate.xml_xpath_string(\".//*[@data-id][contains(@class,'magneto')][1]/@data-id\"),\n str,\n ),\n ),\n ))\n except PluginError:\n pass\n if not video_id:\n return\n log.debug(f\"Video ID: {video_id}\")\n\n api_url = update_qsd(self.API_URL.format(video_id=video_id), {\n \"country_code\": country_code,\n \"w\": 1920,\n \"h\": 1080,\n \"player_version\": self.PLAYER_VERSION,\n \"domain\": urlparse(self.url).netloc,\n \"device_type\": \"mobile\",\n \"browser\": \"chrome\",\n \"browser_version\": CHROME_VERSION,\n \"os\": \"ios\",\n \"gmt\": localnow().strftime(\"%z\"),\n })\n video_format, token_url, url, self.title = self.session.http.get(api_url, schema=validate.Schema(\n validate.parse_json(),\n {\n \"video\": {\n \"workflow\": validate.any(\"token-akamai\", \"dai\"),\n \"format\": validate.any(\"dash\", \"hls\"),\n \"token\": validate.url(),\n \"url\": validate.url(),\n },\n \"meta\": {\n \"title\": str,\n },\n },\n validate.union_get(\n (\"video\", \"format\"),\n (\"video\", \"token\"),\n (\"video\", \"url\"),\n (\"meta\", \"title\"),\n ),\n ))\n\n data_url = update_qsd(token_url, {\n \"url\": url,\n })\n video_url = self.session.http.get(data_url, schema=validate.Schema(\n validate.parse_json(),\n {\"url\": validate.url()},\n validate.get(\"url\"),\n ))\n\n if video_format == \"dash\":\n yield from DASHStream.parse_manifest(self.session, video_url).items()\n elif video_format == \"hls\":\n yield from HLSStream.parse_variant_playlist(self.session, video_url).items()\n\n\n__plugin__ = Pluzz\n", "path": "src/streamlink/plugins/pluzz.py"}]} | 3,043 | 118 |
gh_patches_debug_12658 | rasdani/github-patches | git_diff | pytorch__text-112 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
python2 iterator bug?
here is the snippet:
```
from __future__ import print_function
from torchtext.datasets import WikiText2
i_train, i_dev, i_test = WikiText2.iters(device='-1', root="data/")
for i in i_train:
print(i)
```
here's the error:
```
Traceback (most recent call last):
File "bug.py", line 5, in <module>
for i in i_train:
File "/u/bosctom/.local/lib/python2.7/site-packages/torchtext/data/iterator.py", line 218, in __iter__
self.batch_size - len(text)))
TypeError: can't multiply sequence by non-int of type 'float'
```
The bug is related to python2's math.ceil() returning a float?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchtext/data/iterator.py`
Content:
```
1 from __future__ import division
2
3 import math
4 import random
5 from contextlib import contextmanager
6 from copy import deepcopy
7
8 from .batch import Batch
9 from .dataset import Dataset
10
11
12 class RandomShuffler(object):
13 """Use random functions while keeping track of the random state to make it
14 reproducible and deterministic."""
15
16 def __init__(self, random_state=None):
17 self._random_state = random_state
18 if self._random_state is None:
19 self._random_state = random.getstate()
20
21 @contextmanager
22 def use_internal_state(self):
23 """Use a specific RNG state."""
24 old_state = random.getstate()
25 random.setstate(self._random_state)
26 yield
27 self._random_state = random.getstate()
28 random.setstate(old_state)
29
30 @property
31 def random_state(self):
32 return deepcopy(self._random_state)
33
34 @random_state.setter
35 def random_state(self, s):
36 self._random_state = s
37
38 def __call__(self, data):
39 """Shuffle and return a new list."""
40 with self.use_internal_state():
41 return random.sample(data, len(data))
42
43
44 class Iterator(object):
45 """Defines an iterator that loads batches of data from a Dataset.
46
47 Attributes:
48 dataset: The Dataset object to load Examples from.
49 batch_size: Batch size.
50 batch_size_fn: Function of three arguments (new example to add, current
51 count of examples in the batch, and current effective batch size)
52 that returns the new effective batch size resulting from adding
53 that example to a batch. This is useful for dynamic batching, where
54 this function would add to the current effective batch size the
55 number of tokens in the new example.
56 sort_key: A key to use for sorting examples in order to batch together
57 examples with similar lengths and minimize padding. The sort_key
58 provided to the Iterator constructor overrides the sort_key
59 attribute of the Dataset, or defers to it if None.
60 train: Whether the iterator represents a train set.
61 repeat: Whether to repeat the iterator for multiple epochs.
62 shuffle: Whether to shuffle examples between epochs.
63 sort: Whether to sort examples according to self.sort_key.
64 Note that repeat, shuffle, and sort default to train, train, and
65 (not train).
66 device: Device to create batches on. Use -1 for CPU and None for the
67 currently active GPU device.
68 """
69
70 def __init__(self, dataset, batch_size, sort_key=None, device=None,
71 batch_size_fn=lambda new, count, sofar: count, train=True,
72 repeat=None, shuffle=None, sort=None):
73 self.batch_size, self.train, self.dataset = batch_size, train, dataset
74 self.batch_size_fn = batch_size_fn
75 self.iterations = 0
76 self.repeat = train if repeat is None else repeat
77 self.shuffle = train if shuffle is None else shuffle
78 self.sort = not train if sort is None else sort
79 if sort_key is None:
80 self.sort_key = dataset.sort_key
81 else:
82 self.sort_key = sort_key
83 self.device = device
84
85 self.random_shuffler = RandomShuffler()
86
87 # For state loading/saving only
88 self._iterations_this_epoch = 0
89 self._random_state_this_epoch = None
90 self._restored_from_state = False
91
92 @classmethod
93 def splits(cls, datasets, batch_sizes=None, **kwargs):
94 """Create Iterator objects for multiple splits of a dataset.
95
96 Arguments:
97 datasets: Tuple of Dataset objects corresponding to the splits. The
98 first such object should be the train set.
99 batch_sizes: Tuple of batch sizes to use for the different splits,
100 or None to use the same batch_size for all splits.
101 Remaining keyword arguments: Passed to the constructor of the
102 iterator class being used.
103 """
104 if batch_sizes is None:
105 batch_sizes = [kwargs.pop('batch_size')] * len(datasets)
106 ret = []
107 for i in range(len(datasets)):
108 train = i == 0
109 ret.append(cls(
110 datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))
111 return tuple(ret)
112
113 def data(self):
114 """Return the examples in the dataset in order, sorted, or shuffled."""
115 if self.sort:
116 xs = sorted(self.dataset, key=self.sort_key)
117 elif self.shuffle:
118 xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]
119 else:
120 xs = self.dataset
121 return xs
122
123 def init_epoch(self):
124 """Set up the batch generator for a new epoch."""
125
126 if self._restored_from_state:
127 self.random_shuffler.random_state = self._random_state_this_epoch
128 else:
129 self._random_state_this_epoch = self.random_shuffler.random_state
130
131 self.create_batches()
132
133 if self._restored_from_state:
134 self._restored_from_state = False
135 else:
136 self._iterations_this_epoch = 0
137
138 if not self.repeat:
139 self.iterations = 0
140
141 def create_batches(self):
142 self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)
143
144 @property
145 def epoch(self):
146 return self.iterations / len(self)
147
148 def __len__(self):
149 return math.ceil(len(self.dataset) / self.batch_size)
150
151 def __iter__(self):
152 while True:
153 self.init_epoch()
154 for idx, minibatch in enumerate(self.batches):
155 # fast-forward if loaded from state
156 if self._iterations_this_epoch > idx:
157 continue
158 self.iterations += 1
159 self._iterations_this_epoch += 1
160 # NOTE: `rnn.pack_padded_sequence` requires that a minibatch be sorted by
161 # decreasing order, which requires reversing relative to typical sort keys
162 minibatch.reverse()
163 yield Batch(minibatch, self.dataset, self.device,
164 self.train)
165 if not self.repeat:
166 raise StopIteration
167
168 def state_dict(self):
169 return {
170 "iterations": self.iterations,
171 "iterations_this_epoch": self._iterations_this_epoch,
172 "random_state_this_epoch": self._random_state_this_epoch}
173
174 def load_state_dict(self, state_dict):
175 self.iterations = state_dict["iterations"]
176 self._iterations_this_epoch = state_dict["iterations_this_epoch"]
177 self._random_state_this_epoch = state_dict["random_state_this_epoch"]
178 self._restored_from_state = True
179
180
181 class BPTTIterator(Iterator):
182 """Defines an iterator for language modeling tasks that use BPTT.
183
184 Provides contiguous streams of examples together with targets that are
185 one timestep further forward, for language modeling training with
186 backpropagation through time (BPTT). Expects a Dataset with a single
187 example and a single field called 'text' and produces Batches with text and
188 target attributes.
189
190 Attributes:
191 dataset: The Dataset object to load Examples from.
192 batch_size: Batch size.
193 bptt_len: Length of sequences for backpropagation through time.
194 sort_key: A key to use for sorting examples in order to batch together
195 examples with similar lengths and minimize padding. The sort_key
196 provided to the Iterator constructor overrides the sort_key
197 attribute of the Dataset, or defers to it if None.
198 train: Whether the iterator represents a train set.
199 repeat: Whether to repeat the iterator for multiple epochs.
200 shuffle: Whether to shuffle examples between epochs.
201 sort: Whether to sort examples according to self.sort_key.
202 Note that repeat, shuffle, and sort default to train, train, and
203 (not train).
204 device: Device to create batches on. Use -1 for CPU and None for the
205 currently active GPU device.
206 """
207
208 def __init__(self, dataset, batch_size, bptt_len, **kwargs):
209 self.bptt_len = bptt_len
210 super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)
211
212 def __len__(self):
213 return math.ceil(len(self.dataset[0].text) /
214 (self.batch_size * self.bptt_len))
215
216 def __iter__(self):
217 text = self.dataset[0].text
218 TEXT = self.dataset.fields['text']
219 TEXT.eos_token = None
220 text = text + ([TEXT.pad_token] * (math.ceil(len(text) / self.batch_size) *
221 self.batch_size - len(text)))
222 data = TEXT.numericalize(
223 [text], device=self.device, train=self.train)
224 data = data.view(self.batch_size, -1).t().contiguous()
225 dataset = Dataset(examples=self.dataset.examples, fields=[
226 ('text', TEXT), ('target', TEXT)])
227 while True:
228 for i in range(0, len(self) * self.bptt_len, self.bptt_len):
229 seq_len = min(self.bptt_len, len(data) - 1 - i)
230 yield Batch.fromvars(
231 dataset, self.batch_size, train=self.train,
232 text=data[i:i + seq_len],
233 target=data[i + 1:i + 1 + seq_len])
234 if not self.repeat:
235 raise StopIteration
236
237
238 class BucketIterator(Iterator):
239 """Defines an iterator that batches examples of similar lengths together.
240
241 Minimizes amount of padding needed while producing freshly shuffled
242 batches for each new epoch. See pool for the bucketing procedure used.
243 """
244
245 def create_batches(self):
246 if self.sort:
247 self.batches = batch(self.data(), self.batch_size,
248 self.batch_size_fn)
249 else:
250 self.batches = pool(self.data(), self.batch_size,
251 self.sort_key, self.batch_size_fn,
252 random_shuffler=self.random_shuffler)
253
254
255 def batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):
256 """Yield elements from data in chunks of batch_size."""
257 minibatch, size_so_far = [], 0
258 for ex in data:
259 minibatch.append(ex)
260 size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
261 if size_so_far == batch_size:
262 yield minibatch
263 minibatch, size_so_far = [], 0
264 elif size_so_far > batch_size:
265 yield minibatch[:-1]
266 minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)
267 if minibatch:
268 yield minibatch
269
270
271 def pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,
272 random_shuffler=None):
273 """Sort within buckets, then batch, then shuffle batches.
274
275 Partitions data into chunks of size 100*batch_size, sorts examples within
276 each chunk using sort_key, then batch these examples and shuffle the
277 batches.
278 """
279 if random_shuffler is None:
280 random_shuffler = random.shuffle
281 for p in batch(data, batch_size * 100, batch_size_fn):
282 p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)
283 for b in random_shuffler(list(p_batch)):
284 yield b
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchtext/data/iterator.py b/torchtext/data/iterator.py
--- a/torchtext/data/iterator.py
+++ b/torchtext/data/iterator.py
@@ -217,8 +217,8 @@
text = self.dataset[0].text
TEXT = self.dataset.fields['text']
TEXT.eos_token = None
- text = text + ([TEXT.pad_token] * (math.ceil(len(text) / self.batch_size) *
- self.batch_size - len(text)))
+ text = text + ([TEXT.pad_token] * int(math.ceil(len(text) / self.batch_size) *
+ self.batch_size - len(text)))
data = TEXT.numericalize(
[text], device=self.device, train=self.train)
data = data.view(self.batch_size, -1).t().contiguous()
| {"golden_diff": "diff --git a/torchtext/data/iterator.py b/torchtext/data/iterator.py\n--- a/torchtext/data/iterator.py\n+++ b/torchtext/data/iterator.py\n@@ -217,8 +217,8 @@\n text = self.dataset[0].text\n TEXT = self.dataset.fields['text']\n TEXT.eos_token = None\n- text = text + ([TEXT.pad_token] * (math.ceil(len(text) / self.batch_size) *\n- self.batch_size - len(text)))\n+ text = text + ([TEXT.pad_token] * int(math.ceil(len(text) / self.batch_size) *\n+ self.batch_size - len(text)))\n data = TEXT.numericalize(\n [text], device=self.device, train=self.train)\n data = data.view(self.batch_size, -1).t().contiguous()\n", "issue": "python2 iterator bug?\nhere is the snippet:\r\n\r\n```\r\nfrom __future__ import print_function\r\nfrom torchtext.datasets import WikiText2\r\n\r\ni_train, i_dev, i_test = WikiText2.iters(device='-1', root=\"data/\")\r\nfor i in i_train:\r\n print(i)\r\n```\r\n\r\nhere's the error:\r\n```\r\nTraceback (most recent call last):\r\n File \"bug.py\", line 5, in <module>\r\n for i in i_train:\r\n File \"/u/bosctom/.local/lib/python2.7/site-packages/torchtext/data/iterator.py\", line 218, in __iter__\r\n self.batch_size - len(text)))\r\nTypeError: can't multiply sequence by non-int of type 'float'\r\n```\r\n\r\nThe bug is related to python2's math.ceil() returning a float?\n", "before_files": [{"content": "from __future__ import division\n\nimport math\nimport random\nfrom contextlib import contextmanager\nfrom copy import deepcopy\n\nfrom .batch import Batch\nfrom .dataset import Dataset\n\n\nclass RandomShuffler(object):\n \"\"\"Use random functions while keeping track of the random state to make it\n reproducible and deterministic.\"\"\"\n\n def __init__(self, random_state=None):\n self._random_state = random_state\n if self._random_state is None:\n self._random_state = random.getstate()\n\n @contextmanager\n def use_internal_state(self):\n \"\"\"Use a specific RNG state.\"\"\"\n old_state = random.getstate()\n random.setstate(self._random_state)\n yield\n self._random_state = random.getstate()\n random.setstate(old_state)\n\n @property\n def random_state(self):\n return deepcopy(self._random_state)\n\n @random_state.setter\n def random_state(self, s):\n self._random_state = s\n\n def __call__(self, data):\n \"\"\"Shuffle and return a new list.\"\"\"\n with self.use_internal_state():\n return random.sample(data, len(data))\n\n\nclass Iterator(object):\n \"\"\"Defines an iterator that loads batches of data from a Dataset.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n batch_size_fn: Function of three arguments (new example to add, current\n count of examples in the batch, and current effective batch size)\n that returns the new effective batch size resulting from adding\n that example to a batch. This is useful for dynamic batching, where\n this function would add to the current effective batch size the\n number of tokens in the new example.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n batch_size_fn=lambda new, count, sofar: count, train=True,\n repeat=None, shuffle=None, sort=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n self.batch_size_fn = batch_size_fn\n self.iterations = 0\n self.repeat = train if repeat is None else repeat\n self.shuffle = train if shuffle is None else shuffle\n self.sort = not train if sort is None else sort\n if sort_key is None:\n self.sort_key = dataset.sort_key\n else:\n self.sort_key = sort_key\n self.device = device\n\n self.random_shuffler = RandomShuffler()\n\n # For state loading/saving only\n self._iterations_this_epoch = 0\n self._random_state_this_epoch = None\n self._restored_from_state = False\n\n @classmethod\n def splits(cls, datasets, batch_sizes=None, **kwargs):\n \"\"\"Create Iterator objects for multiple splits of a dataset.\n\n Arguments:\n datasets: Tuple of Dataset objects corresponding to the splits. The\n first such object should be the train set.\n batch_sizes: Tuple of batch sizes to use for the different splits,\n or None to use the same batch_size for all splits.\n Remaining keyword arguments: Passed to the constructor of the\n iterator class being used.\n \"\"\"\n if batch_sizes is None:\n batch_sizes = [kwargs.pop('batch_size')] * len(datasets)\n ret = []\n for i in range(len(datasets)):\n train = i == 0\n ret.append(cls(\n datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))\n return tuple(ret)\n\n def data(self):\n \"\"\"Return the examples in the dataset in order, sorted, or shuffled.\"\"\"\n if self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n elif self.shuffle:\n xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n else:\n xs = self.dataset\n return xs\n\n def init_epoch(self):\n \"\"\"Set up the batch generator for a new epoch.\"\"\"\n\n if self._restored_from_state:\n self.random_shuffler.random_state = self._random_state_this_epoch\n else:\n self._random_state_this_epoch = self.random_shuffler.random_state\n\n self.create_batches()\n\n if self._restored_from_state:\n self._restored_from_state = False\n else:\n self._iterations_this_epoch = 0\n\n if not self.repeat:\n self.iterations = 0\n\n def create_batches(self):\n self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)\n\n @property\n def epoch(self):\n return self.iterations / len(self)\n\n def __len__(self):\n return math.ceil(len(self.dataset) / self.batch_size)\n\n def __iter__(self):\n while True:\n self.init_epoch()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n # NOTE: `rnn.pack_padded_sequence` requires that a minibatch be sorted by\n # decreasing order, which requires reversing relative to typical sort keys\n minibatch.reverse()\n yield Batch(minibatch, self.dataset, self.device,\n self.train)\n if not self.repeat:\n raise StopIteration\n\n def state_dict(self):\n return {\n \"iterations\": self.iterations,\n \"iterations_this_epoch\": self._iterations_this_epoch,\n \"random_state_this_epoch\": self._random_state_this_epoch}\n\n def load_state_dict(self, state_dict):\n self.iterations = state_dict[\"iterations\"]\n self._iterations_this_epoch = state_dict[\"iterations_this_epoch\"]\n self._random_state_this_epoch = state_dict[\"random_state_this_epoch\"]\n self._restored_from_state = True\n\n\nclass BPTTIterator(Iterator):\n \"\"\"Defines an iterator for language modeling tasks that use BPTT.\n\n Provides contiguous streams of examples together with targets that are\n one timestep further forward, for language modeling training with\n backpropagation through time (BPTT). Expects a Dataset with a single\n example and a single field called 'text' and produces Batches with text and\n target attributes.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n bptt_len: Length of sequences for backpropagation through time.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, bptt_len, **kwargs):\n self.bptt_len = bptt_len\n super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)\n\n def __len__(self):\n return math.ceil(len(self.dataset[0].text) /\n (self.batch_size * self.bptt_len))\n\n def __iter__(self):\n text = self.dataset[0].text\n TEXT = self.dataset.fields['text']\n TEXT.eos_token = None\n text = text + ([TEXT.pad_token] * (math.ceil(len(text) / self.batch_size) *\n self.batch_size - len(text)))\n data = TEXT.numericalize(\n [text], device=self.device, train=self.train)\n data = data.view(self.batch_size, -1).t().contiguous()\n dataset = Dataset(examples=self.dataset.examples, fields=[\n ('text', TEXT), ('target', TEXT)])\n while True:\n for i in range(0, len(self) * self.bptt_len, self.bptt_len):\n seq_len = min(self.bptt_len, len(data) - 1 - i)\n yield Batch.fromvars(\n dataset, self.batch_size, train=self.train,\n text=data[i:i + seq_len],\n target=data[i + 1:i + 1 + seq_len])\n if not self.repeat:\n raise StopIteration\n\n\nclass BucketIterator(Iterator):\n \"\"\"Defines an iterator that batches examples of similar lengths together.\n\n Minimizes amount of padding needed while producing freshly shuffled\n batches for each new epoch. See pool for the bucketing procedure used.\n \"\"\"\n\n def create_batches(self):\n if self.sort:\n self.batches = batch(self.data(), self.batch_size,\n self.batch_size_fn)\n else:\n self.batches = pool(self.data(), self.batch_size,\n self.sort_key, self.batch_size_fn,\n random_shuffler=self.random_shuffler)\n\n\ndef batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)\n if minibatch:\n yield minibatch\n\n\ndef pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,\n random_shuffler=None):\n \"\"\"Sort within buckets, then batch, then shuffle batches.\n\n Partitions data into chunks of size 100*batch_size, sorts examples within\n each chunk using sort_key, then batch these examples and shuffle the\n batches.\n \"\"\"\n if random_shuffler is None:\n random_shuffler = random.shuffle\n for p in batch(data, batch_size * 100, batch_size_fn):\n p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n", "path": "torchtext/data/iterator.py"}], "after_files": [{"content": "from __future__ import division\n\nimport math\nimport random\nfrom contextlib import contextmanager\nfrom copy import deepcopy\n\nfrom .batch import Batch\nfrom .dataset import Dataset\n\n\nclass RandomShuffler(object):\n \"\"\"Use random functions while keeping track of the random state to make it\n reproducible and deterministic.\"\"\"\n\n def __init__(self, random_state=None):\n self._random_state = random_state\n if self._random_state is None:\n self._random_state = random.getstate()\n\n @contextmanager\n def use_internal_state(self):\n \"\"\"Use a specific RNG state.\"\"\"\n old_state = random.getstate()\n random.setstate(self._random_state)\n yield\n self._random_state = random.getstate()\n random.setstate(old_state)\n\n @property\n def random_state(self):\n return deepcopy(self._random_state)\n\n @random_state.setter\n def random_state(self, s):\n self._random_state = s\n\n def __call__(self, data):\n \"\"\"Shuffle and return a new list.\"\"\"\n with self.use_internal_state():\n return random.sample(data, len(data))\n\n\nclass Iterator(object):\n \"\"\"Defines an iterator that loads batches of data from a Dataset.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n batch_size_fn: Function of three arguments (new example to add, current\n count of examples in the batch, and current effective batch size)\n that returns the new effective batch size resulting from adding\n that example to a batch. This is useful for dynamic batching, where\n this function would add to the current effective batch size the\n number of tokens in the new example.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n batch_size_fn=lambda new, count, sofar: count, train=True,\n repeat=None, shuffle=None, sort=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n self.batch_size_fn = batch_size_fn\n self.iterations = 0\n self.repeat = train if repeat is None else repeat\n self.shuffle = train if shuffle is None else shuffle\n self.sort = not train if sort is None else sort\n if sort_key is None:\n self.sort_key = dataset.sort_key\n else:\n self.sort_key = sort_key\n self.device = device\n\n self.random_shuffler = RandomShuffler()\n\n # For state loading/saving only\n self._iterations_this_epoch = 0\n self._random_state_this_epoch = None\n self._restored_from_state = False\n\n @classmethod\n def splits(cls, datasets, batch_sizes=None, **kwargs):\n \"\"\"Create Iterator objects for multiple splits of a dataset.\n\n Arguments:\n datasets: Tuple of Dataset objects corresponding to the splits. The\n first such object should be the train set.\n batch_sizes: Tuple of batch sizes to use for the different splits,\n or None to use the same batch_size for all splits.\n Remaining keyword arguments: Passed to the constructor of the\n iterator class being used.\n \"\"\"\n if batch_sizes is None:\n batch_sizes = [kwargs.pop('batch_size')] * len(datasets)\n ret = []\n for i in range(len(datasets)):\n train = i == 0\n ret.append(cls(\n datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))\n return tuple(ret)\n\n def data(self):\n \"\"\"Return the examples in the dataset in order, sorted, or shuffled.\"\"\"\n if self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n elif self.shuffle:\n xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n else:\n xs = self.dataset\n return xs\n\n def init_epoch(self):\n \"\"\"Set up the batch generator for a new epoch.\"\"\"\n\n if self._restored_from_state:\n self.random_shuffler.random_state = self._random_state_this_epoch\n else:\n self._random_state_this_epoch = self.random_shuffler.random_state\n\n self.create_batches()\n\n if self._restored_from_state:\n self._restored_from_state = False\n else:\n self._iterations_this_epoch = 0\n\n if not self.repeat:\n self.iterations = 0\n\n def create_batches(self):\n self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)\n\n @property\n def epoch(self):\n return self.iterations / len(self)\n\n def __len__(self):\n return math.ceil(len(self.dataset) / self.batch_size)\n\n def __iter__(self):\n while True:\n self.init_epoch()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n # NOTE: `rnn.pack_padded_sequence` requires that a minibatch be sorted by\n # decreasing order, which requires reversing relative to typical sort keys\n minibatch.reverse()\n yield Batch(minibatch, self.dataset, self.device,\n self.train)\n if not self.repeat:\n raise StopIteration\n\n def state_dict(self):\n return {\n \"iterations\": self.iterations,\n \"iterations_this_epoch\": self._iterations_this_epoch,\n \"random_state_this_epoch\": self._random_state_this_epoch}\n\n def load_state_dict(self, state_dict):\n self.iterations = state_dict[\"iterations\"]\n self._iterations_this_epoch = state_dict[\"iterations_this_epoch\"]\n self._random_state_this_epoch = state_dict[\"random_state_this_epoch\"]\n self._restored_from_state = True\n\n\nclass BPTTIterator(Iterator):\n \"\"\"Defines an iterator for language modeling tasks that use BPTT.\n\n Provides contiguous streams of examples together with targets that are\n one timestep further forward, for language modeling training with\n backpropagation through time (BPTT). Expects a Dataset with a single\n example and a single field called 'text' and produces Batches with text and\n target attributes.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n bptt_len: Length of sequences for backpropagation through time.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, bptt_len, **kwargs):\n self.bptt_len = bptt_len\n super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)\n\n def __len__(self):\n return math.ceil(len(self.dataset[0].text) /\n (self.batch_size * self.bptt_len))\n\n def __iter__(self):\n text = self.dataset[0].text\n TEXT = self.dataset.fields['text']\n TEXT.eos_token = None\n text = text + ([TEXT.pad_token] * int(math.ceil(len(text) / self.batch_size) *\n self.batch_size - len(text)))\n data = TEXT.numericalize(\n [text], device=self.device, train=self.train)\n data = data.view(self.batch_size, -1).t().contiguous()\n dataset = Dataset(examples=self.dataset.examples, fields=[\n ('text', TEXT), ('target', TEXT)])\n while True:\n for i in range(0, len(self) * self.bptt_len, self.bptt_len):\n seq_len = min(self.bptt_len, len(data) - 1 - i)\n yield Batch.fromvars(\n dataset, self.batch_size, train=self.train,\n text=data[i:i + seq_len],\n target=data[i + 1:i + 1 + seq_len])\n if not self.repeat:\n raise StopIteration\n\n\nclass BucketIterator(Iterator):\n \"\"\"Defines an iterator that batches examples of similar lengths together.\n\n Minimizes amount of padding needed while producing freshly shuffled\n batches for each new epoch. See pool for the bucketing procedure used.\n \"\"\"\n\n def create_batches(self):\n if self.sort:\n self.batches = batch(self.data(), self.batch_size,\n self.batch_size_fn)\n else:\n self.batches = pool(self.data(), self.batch_size,\n self.sort_key, self.batch_size_fn,\n random_shuffler=self.random_shuffler)\n\n\ndef batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)\n if minibatch:\n yield minibatch\n\n\ndef pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,\n random_shuffler=None):\n \"\"\"Sort within buckets, then batch, then shuffle batches.\n\n Partitions data into chunks of size 100*batch_size, sorts examples within\n each chunk using sort_key, then batch these examples and shuffle the\n batches.\n \"\"\"\n if random_shuffler is None:\n random_shuffler = random.shuffle\n for p in batch(data, batch_size * 100, batch_size_fn):\n p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n", "path": "torchtext/data/iterator.py"}]} | 3,609 | 185 |
gh_patches_debug_30352 | rasdani/github-patches | git_diff | AnalogJ__lexicon-381 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PowerDNS dot notation
From PowerDNS 4.2 onwards, undotted notation is not supported.
From PowerDNS >=4.0 but <=4.1 the dotted notation is _optional_.
I suggest we implement the dotted notation _by default_ since that will work with both dotted and undotted versions.
We could have a flag to turn this behavior for those running some legacy versions (which doesn't support dotted notation) or simply add support for PowerDNS >=4.0.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lexicon/providers/powerdns.py`
Content:
```
1 """
2 Lexicon PowerDNS Provider
3
4 Author: Will Hughes, 2017
5
6 API Docs: https://doc.powerdns.com/md/httpapi/api_spec/
7
8 Implementation notes:
9 * The PowerDNS API does not assign a unique identifier to each record in the way
10 that Lexicon expects. We work around this by creating an ID based on the record
11 name, type and content, which when taken together are always unique
12 * The PowerDNS API has no notion of 'create a single record' or 'delete a single
13 record'. All operations are either 'replace the RRSet with this new set of records'
14 or 'delete all records for this name and type. Similarly, there is no notion of
15 'change the content of this record', because records are identified by their name,
16 type and content.
17 * The API is very picky about the format of values used when creating records:
18 ** CNAMEs must be fully qualified
19 ** TXT, LOC records must be quoted
20 This is why the _clean_content and _unclean_content methods exist, to convert
21 back and forth between the format PowerDNS expects, and the format Lexicon uses
22 """
23 from __future__ import absolute_import
24 import json
25 import logging
26
27 import requests
28 from lexicon.providers.base import Provider as BaseProvider
29
30
31 LOGGER = logging.getLogger(__name__)
32
33 NAMESERVER_DOMAINS = []
34
35
36 def provider_parser(subparser):
37 """Configure provider parser for powerdns"""
38 subparser.add_argument(
39 "--auth-token", help="specify token for authentication")
40 subparser.add_argument("--pdns-server", help="URI for PowerDNS server")
41 subparser.add_argument(
42 "--pdns-server-id", help="Server ID to interact with")
43 subparser.add_argument(
44 "--pdns-disable-notify", help="Disable slave notifications from master")
45
46 class Provider(BaseProvider):
47 """Provider class for PowerDNS"""
48 def __init__(self, config):
49 super(Provider, self).__init__(config)
50
51 self.api_endpoint = self._get_provider_option('pdns_server')
52 self.disable_slave_notify = self._get_provider_option('pdns-disable-notify')
53
54 if self.api_endpoint.endswith('/'):
55 self.api_endpoint = self.api_endpoint[:-1]
56
57 if not self.api_endpoint.endswith("/api/v1"):
58 self.api_endpoint += "/api/v1"
59
60 self.server_id = self._get_provider_option('pdns_server_id')
61 if self.server_id is None:
62 self.server_id = 'localhost'
63
64 self.api_endpoint += "/servers/" + self.server_id
65
66 self.api_key = self._get_provider_option('auth_token')
67 assert self.api_key is not None
68 self._zone_data = None
69
70 def notify_slaves(self):
71 """Checks to see if slaves should be notified, and notifies them if needed"""
72 if self.disable_slave_notify is not None:
73 LOGGER.debug('Slave notifications disabled')
74 return False
75
76 if self.zone_data()['kind'] == 'Master':
77 response_code = self._put('/zones/' + self.domain + '/notify').status_code
78 if response_code == 200:
79 LOGGER.debug('Slave(s) notified')
80 return True
81 LOGGER.debug('Slave notification failed with code %i', response_code)
82 else:
83 LOGGER.debug('Zone type should be \'Master\' for slave notifications')
84 return False
85
86 def zone_data(self):
87 """Get zone data"""
88 if self._zone_data is None:
89 self._zone_data = self._get('/zones/' + self.domain).json()
90 return self._zone_data
91
92 def _authenticate(self):
93 self.zone_data()
94 self.domain_id = self.domain
95
96 def _make_identifier(self, rtype, name, content): # pylint: disable=no-self-use
97 return "{}/{}={}".format(rtype, name, content)
98
99 def _parse_identifier(self, identifier): # pylint: disable=no-self-use
100 parts = identifier.split('/')
101 rtype = parts[0]
102 parts = parts[1].split('=')
103 name = parts[0]
104 content = "=".join(parts[1:])
105 return rtype, name, content
106
107 def _list_records(self, rtype=None, name=None, content=None):
108 records = []
109 for rrset in self.zone_data()['rrsets']:
110 if (name is None or self._fqdn_name(rrset['name']) == self._fqdn_name(
111 name)) and (rtype is None or rrset['type'] == rtype):
112 for record in rrset['records']:
113 if content is None or record['content'] == self._clean_content(rtype, content):
114 records.append({
115 'type': rrset['type'],
116 'name': self._full_name(rrset['name']),
117 'ttl': rrset['ttl'],
118 'content': self._unclean_content(rrset['type'], record['content']),
119 'id': self._make_identifier(rrset['type'],
120 rrset['name'], record['content'])
121 })
122 LOGGER.debug('list_records: %s', records)
123 return records
124
125 def _clean_content(self, rtype, content):
126 if rtype in ("TXT", "LOC"):
127 if content[0] != '"':
128 content = '"' + content
129 if content[-1] != '"':
130 content += '"'
131 elif rtype == "CNAME":
132 content = self._fqdn_name(content)
133 return content
134
135 def _unclean_content(self, rtype, content):
136 if rtype in ("TXT", "LOC"):
137 content = content.strip('"')
138 elif rtype == "CNAME":
139 content = self._full_name(content)
140 return content
141
142 def _create_record(self, rtype, name, content):
143 rname = self._fqdn_name(name)
144 newcontent = self._clean_content(rtype, content)
145
146 updated_data = {
147 'name': rname,
148 'type': rtype,
149 'records': [],
150 'ttl': self._get_lexicon_option('ttl') or 600,
151 'changetype': 'REPLACE'
152 }
153
154 updated_data['records'].append({'content': newcontent, 'disabled': False})
155
156 for rrset in self.zone_data()['rrsets']:
157 if rrset['name'] == rname and rrset['type'] == rtype:
158 updated_data['ttl'] = rrset['ttl']
159
160 for record in rrset['records']:
161 if record['content'] != newcontent:
162 updated_data['records'].append(
163 {
164 'content': record['content'],
165 'disabled': record['disabled']
166 })
167 break
168
169 request = {'rrsets': [updated_data]}
170 LOGGER.debug('request: %s', request)
171
172 self._patch('/zones/' + self.domain, data=request)
173 self.notify_slaves()
174 self._zone_data = None
175 return True
176
177 def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
178 if identifier is not None:
179 rtype, name, content = self._parse_identifier(identifier)
180
181 LOGGER.debug("delete %s %s %s", rtype, name, content)
182 if rtype is None or name is None:
183 raise Exception("Must specify at least both rtype and name")
184
185 for rrset in self.zone_data()['rrsets']:
186 if rrset['type'] == rtype and self._fqdn_name(rrset['name']) == self._fqdn_name(name):
187 update_data = rrset
188
189 if 'comments' in update_data:
190 del update_data['comments']
191
192 if content is None:
193 update_data['records'] = []
194 update_data['changetype'] = 'DELETE'
195 else:
196 new_record_list = []
197 for record in update_data['records']:
198 if self._clean_content(rrset['type'], content) != record['content']:
199 new_record_list.append(record)
200
201 update_data['records'] = new_record_list
202 update_data['changetype'] = 'REPLACE'
203 break
204
205 request = {'rrsets': [update_data]}
206 LOGGER.debug('request: %s', request)
207
208 self._patch('/zones/' + self.domain, data=request)
209 self.notify_slaves()
210 self._zone_data = None
211 return True
212
213 def _update_record(self, identifier, rtype=None, name=None, content=None):
214 self._delete_record(identifier)
215 return self._create_record(rtype, name, content)
216
217 def _patch(self, url='/', data=None, query_params=None):
218 return self._request('PATCH', url, data=data, query_params=query_params)
219
220 def _request(self, action='GET', url='/', data=None, query_params=None):
221 if data is None:
222 data = {}
223 if query_params is None:
224 query_params = {}
225 response = requests.request(action, self.api_endpoint + url, params=query_params,
226 data=json.dumps(data),
227 headers={
228 'X-API-Key': self.api_key,
229 'Content-Type': 'application/json'
230 })
231 LOGGER.debug('response: %s', response.text)
232 response.raise_for_status()
233 return response
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lexicon/providers/powerdns.py b/lexicon/providers/powerdns.py
--- a/lexicon/providers/powerdns.py
+++ b/lexicon/providers/powerdns.py
@@ -86,7 +86,7 @@
def zone_data(self):
"""Get zone data"""
if self._zone_data is None:
- self._zone_data = self._get('/zones/' + self.domain).json()
+ self._zone_data = self._get('/zones/' + self._ensure_dot(self.domain)).json()
return self._zone_data
def _authenticate(self):
@@ -169,7 +169,7 @@
request = {'rrsets': [updated_data]}
LOGGER.debug('request: %s', request)
- self._patch('/zones/' + self.domain, data=request)
+ self._patch('/zones/' + self._ensure_dot(self.domain), data=request)
self.notify_slaves()
self._zone_data = None
return True
@@ -205,8 +205,9 @@
request = {'rrsets': [update_data]}
LOGGER.debug('request: %s', request)
- self._patch('/zones/' + self.domain, data=request)
+ self._patch('/zones/' + self._ensure_dot(self.domain), data=request)
self.notify_slaves()
+
self._zone_data = None
return True
@@ -231,3 +232,12 @@
LOGGER.debug('response: %s', response.text)
response.raise_for_status()
return response
+
+ @classmethod
+ def _ensure_dot(cls, text):
+ """
+ This function makes sure a string contains a dot at the end
+ """
+ if text.endswith("."):
+ return text
+ return text + "."
| {"golden_diff": "diff --git a/lexicon/providers/powerdns.py b/lexicon/providers/powerdns.py\n--- a/lexicon/providers/powerdns.py\n+++ b/lexicon/providers/powerdns.py\n@@ -86,7 +86,7 @@\n def zone_data(self):\n \"\"\"Get zone data\"\"\"\n if self._zone_data is None:\n- self._zone_data = self._get('/zones/' + self.domain).json()\n+ self._zone_data = self._get('/zones/' + self._ensure_dot(self.domain)).json()\n return self._zone_data\n \n def _authenticate(self):\n@@ -169,7 +169,7 @@\n request = {'rrsets': [updated_data]}\n LOGGER.debug('request: %s', request)\n \n- self._patch('/zones/' + self.domain, data=request)\n+ self._patch('/zones/' + self._ensure_dot(self.domain), data=request)\n self.notify_slaves()\n self._zone_data = None\n return True\n@@ -205,8 +205,9 @@\n request = {'rrsets': [update_data]}\n LOGGER.debug('request: %s', request)\n \n- self._patch('/zones/' + self.domain, data=request)\n+ self._patch('/zones/' + self._ensure_dot(self.domain), data=request)\n self.notify_slaves()\n+\n self._zone_data = None\n return True\n \n@@ -231,3 +232,12 @@\n LOGGER.debug('response: %s', response.text)\n response.raise_for_status()\n return response\n+\n+ @classmethod\n+ def _ensure_dot(cls, text):\n+ \"\"\"\n+ This function makes sure a string contains a dot at the end\n+ \"\"\"\n+ if text.endswith(\".\"):\n+ return text\n+ return text + \".\"\n", "issue": "PowerDNS dot notation\nFrom PowerDNS 4.2 onwards, undotted notation is not supported.\r\nFrom PowerDNS >=4.0 but <=4.1 the dotted notation is _optional_.\r\n\r\nI suggest we implement the dotted notation _by default_ since that will work with both dotted and undotted versions. \r\n\r\nWe could have a flag to turn this behavior for those running some legacy versions (which doesn't support dotted notation) or simply add support for PowerDNS >=4.0.\n", "before_files": [{"content": "\"\"\"\nLexicon PowerDNS Provider\n\nAuthor: Will Hughes, 2017\n\nAPI Docs: https://doc.powerdns.com/md/httpapi/api_spec/\n\nImplementation notes:\n* The PowerDNS API does not assign a unique identifier to each record in the way\nthat Lexicon expects. We work around this by creating an ID based on the record\nname, type and content, which when taken together are always unique\n* The PowerDNS API has no notion of 'create a single record' or 'delete a single\nrecord'. All operations are either 'replace the RRSet with this new set of records'\nor 'delete all records for this name and type. Similarly, there is no notion of\n'change the content of this record', because records are identified by their name,\ntype and content.\n* The API is very picky about the format of values used when creating records:\n** CNAMEs must be fully qualified\n** TXT, LOC records must be quoted\nThis is why the _clean_content and _unclean_content methods exist, to convert\nback and forth between the format PowerDNS expects, and the format Lexicon uses\n\"\"\"\nfrom __future__ import absolute_import\nimport json\nimport logging\n\nimport requests\nfrom lexicon.providers.base import Provider as BaseProvider\n\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESERVER_DOMAINS = []\n\n\ndef provider_parser(subparser):\n \"\"\"Configure provider parser for powerdns\"\"\"\n subparser.add_argument(\n \"--auth-token\", help=\"specify token for authentication\")\n subparser.add_argument(\"--pdns-server\", help=\"URI for PowerDNS server\")\n subparser.add_argument(\n \"--pdns-server-id\", help=\"Server ID to interact with\")\n subparser.add_argument(\n \"--pdns-disable-notify\", help=\"Disable slave notifications from master\")\n\nclass Provider(BaseProvider):\n \"\"\"Provider class for PowerDNS\"\"\"\n def __init__(self, config):\n super(Provider, self).__init__(config)\n\n self.api_endpoint = self._get_provider_option('pdns_server')\n self.disable_slave_notify = self._get_provider_option('pdns-disable-notify')\n\n if self.api_endpoint.endswith('/'):\n self.api_endpoint = self.api_endpoint[:-1]\n\n if not self.api_endpoint.endswith(\"/api/v1\"):\n self.api_endpoint += \"/api/v1\"\n\n self.server_id = self._get_provider_option('pdns_server_id')\n if self.server_id is None:\n self.server_id = 'localhost'\n\n self.api_endpoint += \"/servers/\" + self.server_id\n\n self.api_key = self._get_provider_option('auth_token')\n assert self.api_key is not None\n self._zone_data = None\n\n def notify_slaves(self):\n \"\"\"Checks to see if slaves should be notified, and notifies them if needed\"\"\"\n if self.disable_slave_notify is not None:\n LOGGER.debug('Slave notifications disabled')\n return False\n\n if self.zone_data()['kind'] == 'Master':\n response_code = self._put('/zones/' + self.domain + '/notify').status_code\n if response_code == 200:\n LOGGER.debug('Slave(s) notified')\n return True\n LOGGER.debug('Slave notification failed with code %i', response_code)\n else:\n LOGGER.debug('Zone type should be \\'Master\\' for slave notifications')\n return False\n\n def zone_data(self):\n \"\"\"Get zone data\"\"\"\n if self._zone_data is None:\n self._zone_data = self._get('/zones/' + self.domain).json()\n return self._zone_data\n\n def _authenticate(self):\n self.zone_data()\n self.domain_id = self.domain\n\n def _make_identifier(self, rtype, name, content): # pylint: disable=no-self-use\n return \"{}/{}={}\".format(rtype, name, content)\n\n def _parse_identifier(self, identifier): # pylint: disable=no-self-use\n parts = identifier.split('/')\n rtype = parts[0]\n parts = parts[1].split('=')\n name = parts[0]\n content = \"=\".join(parts[1:])\n return rtype, name, content\n\n def _list_records(self, rtype=None, name=None, content=None):\n records = []\n for rrset in self.zone_data()['rrsets']:\n if (name is None or self._fqdn_name(rrset['name']) == self._fqdn_name(\n name)) and (rtype is None or rrset['type'] == rtype):\n for record in rrset['records']:\n if content is None or record['content'] == self._clean_content(rtype, content):\n records.append({\n 'type': rrset['type'],\n 'name': self._full_name(rrset['name']),\n 'ttl': rrset['ttl'],\n 'content': self._unclean_content(rrset['type'], record['content']),\n 'id': self._make_identifier(rrset['type'],\n rrset['name'], record['content'])\n })\n LOGGER.debug('list_records: %s', records)\n return records\n\n def _clean_content(self, rtype, content):\n if rtype in (\"TXT\", \"LOC\"):\n if content[0] != '\"':\n content = '\"' + content\n if content[-1] != '\"':\n content += '\"'\n elif rtype == \"CNAME\":\n content = self._fqdn_name(content)\n return content\n\n def _unclean_content(self, rtype, content):\n if rtype in (\"TXT\", \"LOC\"):\n content = content.strip('\"')\n elif rtype == \"CNAME\":\n content = self._full_name(content)\n return content\n\n def _create_record(self, rtype, name, content):\n rname = self._fqdn_name(name)\n newcontent = self._clean_content(rtype, content)\n\n updated_data = {\n 'name': rname,\n 'type': rtype,\n 'records': [],\n 'ttl': self._get_lexicon_option('ttl') or 600,\n 'changetype': 'REPLACE'\n }\n\n updated_data['records'].append({'content': newcontent, 'disabled': False})\n\n for rrset in self.zone_data()['rrsets']:\n if rrset['name'] == rname and rrset['type'] == rtype:\n updated_data['ttl'] = rrset['ttl']\n\n for record in rrset['records']:\n if record['content'] != newcontent:\n updated_data['records'].append(\n {\n 'content': record['content'],\n 'disabled': record['disabled']\n })\n break\n\n request = {'rrsets': [updated_data]}\n LOGGER.debug('request: %s', request)\n\n self._patch('/zones/' + self.domain, data=request)\n self.notify_slaves()\n self._zone_data = None\n return True\n\n def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n if identifier is not None:\n rtype, name, content = self._parse_identifier(identifier)\n\n LOGGER.debug(\"delete %s %s %s\", rtype, name, content)\n if rtype is None or name is None:\n raise Exception(\"Must specify at least both rtype and name\")\n\n for rrset in self.zone_data()['rrsets']:\n if rrset['type'] == rtype and self._fqdn_name(rrset['name']) == self._fqdn_name(name):\n update_data = rrset\n\n if 'comments' in update_data:\n del update_data['comments']\n\n if content is None:\n update_data['records'] = []\n update_data['changetype'] = 'DELETE'\n else:\n new_record_list = []\n for record in update_data['records']:\n if self._clean_content(rrset['type'], content) != record['content']:\n new_record_list.append(record)\n\n update_data['records'] = new_record_list\n update_data['changetype'] = 'REPLACE'\n break\n\n request = {'rrsets': [update_data]}\n LOGGER.debug('request: %s', request)\n\n self._patch('/zones/' + self.domain, data=request)\n self.notify_slaves()\n self._zone_data = None\n return True\n\n def _update_record(self, identifier, rtype=None, name=None, content=None):\n self._delete_record(identifier)\n return self._create_record(rtype, name, content)\n\n def _patch(self, url='/', data=None, query_params=None):\n return self._request('PATCH', url, data=data, query_params=query_params)\n\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n response = requests.request(action, self.api_endpoint + url, params=query_params,\n data=json.dumps(data),\n headers={\n 'X-API-Key': self.api_key,\n 'Content-Type': 'application/json'\n })\n LOGGER.debug('response: %s', response.text)\n response.raise_for_status()\n return response\n", "path": "lexicon/providers/powerdns.py"}], "after_files": [{"content": "\"\"\"\nLexicon PowerDNS Provider\n\nAuthor: Will Hughes, 2017\n\nAPI Docs: https://doc.powerdns.com/md/httpapi/api_spec/\n\nImplementation notes:\n* The PowerDNS API does not assign a unique identifier to each record in the way\nthat Lexicon expects. We work around this by creating an ID based on the record\nname, type and content, which when taken together are always unique\n* The PowerDNS API has no notion of 'create a single record' or 'delete a single\nrecord'. All operations are either 'replace the RRSet with this new set of records'\nor 'delete all records for this name and type. Similarly, there is no notion of\n'change the content of this record', because records are identified by their name,\ntype and content.\n* The API is very picky about the format of values used when creating records:\n** CNAMEs must be fully qualified\n** TXT, LOC records must be quoted\nThis is why the _clean_content and _unclean_content methods exist, to convert\nback and forth between the format PowerDNS expects, and the format Lexicon uses\n\"\"\"\nfrom __future__ import absolute_import\nimport json\nimport logging\n\nimport requests\nfrom lexicon.providers.base import Provider as BaseProvider\n\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESERVER_DOMAINS = []\n\n\ndef provider_parser(subparser):\n \"\"\"Configure provider parser for powerdns\"\"\"\n subparser.add_argument(\n \"--auth-token\", help=\"specify token for authentication\")\n subparser.add_argument(\"--pdns-server\", help=\"URI for PowerDNS server\")\n subparser.add_argument(\n \"--pdns-server-id\", help=\"Server ID to interact with\")\n subparser.add_argument(\n \"--pdns-disable-notify\", help=\"Disable slave notifications from master\")\n\nclass Provider(BaseProvider):\n \"\"\"Provider class for PowerDNS\"\"\"\n def __init__(self, config):\n super(Provider, self).__init__(config)\n\n self.api_endpoint = self._get_provider_option('pdns_server')\n self.disable_slave_notify = self._get_provider_option('pdns-disable-notify')\n\n if self.api_endpoint.endswith('/'):\n self.api_endpoint = self.api_endpoint[:-1]\n\n if not self.api_endpoint.endswith(\"/api/v1\"):\n self.api_endpoint += \"/api/v1\"\n\n self.server_id = self._get_provider_option('pdns_server_id')\n if self.server_id is None:\n self.server_id = 'localhost'\n\n self.api_endpoint += \"/servers/\" + self.server_id\n\n self.api_key = self._get_provider_option('auth_token')\n assert self.api_key is not None\n self._zone_data = None\n\n def notify_slaves(self):\n \"\"\"Checks to see if slaves should be notified, and notifies them if needed\"\"\"\n if self.disable_slave_notify is not None:\n LOGGER.debug('Slave notifications disabled')\n return False\n\n if self.zone_data()['kind'] == 'Master':\n response_code = self._put('/zones/' + self.domain + '/notify').status_code\n if response_code == 200:\n LOGGER.debug('Slave(s) notified')\n return True\n LOGGER.debug('Slave notification failed with code %i', response_code)\n else:\n LOGGER.debug('Zone type should be \\'Master\\' for slave notifications')\n return False\n\n def zone_data(self):\n \"\"\"Get zone data\"\"\"\n if self._zone_data is None:\n self._zone_data = self._get('/zones/' + self._ensure_dot(self.domain)).json()\n return self._zone_data\n\n def _authenticate(self):\n self.zone_data()\n self.domain_id = self.domain\n\n def _make_identifier(self, rtype, name, content): # pylint: disable=no-self-use\n return \"{}/{}={}\".format(rtype, name, content)\n\n def _parse_identifier(self, identifier): # pylint: disable=no-self-use\n parts = identifier.split('/')\n rtype = parts[0]\n parts = parts[1].split('=')\n name = parts[0]\n content = \"=\".join(parts[1:])\n return rtype, name, content\n\n def _list_records(self, rtype=None, name=None, content=None):\n records = []\n for rrset in self.zone_data()['rrsets']:\n if (name is None or self._fqdn_name(rrset['name']) == self._fqdn_name(\n name)) and (rtype is None or rrset['type'] == rtype):\n for record in rrset['records']:\n if content is None or record['content'] == self._clean_content(rtype, content):\n records.append({\n 'type': rrset['type'],\n 'name': self._full_name(rrset['name']),\n 'ttl': rrset['ttl'],\n 'content': self._unclean_content(rrset['type'], record['content']),\n 'id': self._make_identifier(rrset['type'],\n rrset['name'], record['content'])\n })\n LOGGER.debug('list_records: %s', records)\n return records\n\n def _clean_content(self, rtype, content):\n if rtype in (\"TXT\", \"LOC\"):\n if content[0] != '\"':\n content = '\"' + content\n if content[-1] != '\"':\n content += '\"'\n elif rtype == \"CNAME\":\n content = self._fqdn_name(content)\n return content\n\n def _unclean_content(self, rtype, content):\n if rtype in (\"TXT\", \"LOC\"):\n content = content.strip('\"')\n elif rtype == \"CNAME\":\n content = self._full_name(content)\n return content\n\n def _create_record(self, rtype, name, content):\n rname = self._fqdn_name(name)\n newcontent = self._clean_content(rtype, content)\n\n updated_data = {\n 'name': rname,\n 'type': rtype,\n 'records': [],\n 'ttl': self._get_lexicon_option('ttl') or 600,\n 'changetype': 'REPLACE'\n }\n\n updated_data['records'].append({'content': newcontent, 'disabled': False})\n\n for rrset in self.zone_data()['rrsets']:\n if rrset['name'] == rname and rrset['type'] == rtype:\n updated_data['ttl'] = rrset['ttl']\n\n for record in rrset['records']:\n if record['content'] != newcontent:\n updated_data['records'].append(\n {\n 'content': record['content'],\n 'disabled': record['disabled']\n })\n break\n\n request = {'rrsets': [updated_data]}\n LOGGER.debug('request: %s', request)\n\n self._patch('/zones/' + self._ensure_dot(self.domain), data=request)\n self.notify_slaves()\n self._zone_data = None\n return True\n\n def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n if identifier is not None:\n rtype, name, content = self._parse_identifier(identifier)\n\n LOGGER.debug(\"delete %s %s %s\", rtype, name, content)\n if rtype is None or name is None:\n raise Exception(\"Must specify at least both rtype and name\")\n\n for rrset in self.zone_data()['rrsets']:\n if rrset['type'] == rtype and self._fqdn_name(rrset['name']) == self._fqdn_name(name):\n update_data = rrset\n\n if 'comments' in update_data:\n del update_data['comments']\n\n if content is None:\n update_data['records'] = []\n update_data['changetype'] = 'DELETE'\n else:\n new_record_list = []\n for record in update_data['records']:\n if self._clean_content(rrset['type'], content) != record['content']:\n new_record_list.append(record)\n\n update_data['records'] = new_record_list\n update_data['changetype'] = 'REPLACE'\n break\n\n request = {'rrsets': [update_data]}\n LOGGER.debug('request: %s', request)\n\n self._patch('/zones/' + self._ensure_dot(self.domain), data=request)\n self.notify_slaves()\n\n self._zone_data = None\n return True\n\n def _update_record(self, identifier, rtype=None, name=None, content=None):\n self._delete_record(identifier)\n return self._create_record(rtype, name, content)\n\n def _patch(self, url='/', data=None, query_params=None):\n return self._request('PATCH', url, data=data, query_params=query_params)\n\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n response = requests.request(action, self.api_endpoint + url, params=query_params,\n data=json.dumps(data),\n headers={\n 'X-API-Key': self.api_key,\n 'Content-Type': 'application/json'\n })\n LOGGER.debug('response: %s', response.text)\n response.raise_for_status()\n return response\n\n @classmethod\n def _ensure_dot(cls, text):\n \"\"\"\n This function makes sure a string contains a dot at the end\n \"\"\"\n if text.endswith(\".\"):\n return text\n return text + \".\"\n", "path": "lexicon/providers/powerdns.py"}]} | 2,929 | 400 |
gh_patches_debug_17345 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-3074 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The smt combiner is raising IndexError exceptions in production.
The CpuTopology combiner is throwing a large number of the exception IndexError('list index out of range',) in production.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/combiners/smt.py`
Content:
```
1 """
2 Simultaneous Multithreading (SMT) combiner
3 ==========================================
4
5 Combiner for Simultaneous Multithreading (SMT). It uses the results of the following parsers:
6 :class:`insights.parsers.smt.CpuCoreOnline`,
7 :class:`insights.parsers.smt.CpuSiblings`.
8 """
9
10 from insights.core.plugins import combiner
11 from insights.parsers.smt import CpuCoreOnline, CpuSiblings
12
13
14 @combiner(CpuCoreOnline, CpuSiblings)
15 class CpuTopology(object):
16 """
17 Class for collecting the online/siblings status for all CPU cores.
18
19 Sample output of the ``CpuCoreOnline`` parser is::
20
21 [[Core 0: Online], [Core 1: Online], [Core 2: Online], [Core 3: Online]]
22
23 Sample output of the ``CpuSiblings`` parser is::
24
25 [[Core 0 Siblings: [0, 2]], [Core 1 Siblings: [1, 3]], [Core 2 Siblings: [0, 2]], [Core 3 Siblings: [1, 3]]]
26
27 Attributes:
28 cores (list of dictionaries): List of all cores.
29 all_solitary (bool): True, if hyperthreading is not used.
30
31 Examples:
32 >>> type(cpu_topology)
33 <class 'insights.combiners.smt.CpuTopology'>
34 >>> cpu_topology.cores == [{'online': True, 'siblings': [0, 2]}, {'online': True, 'siblings': [1, 3]}, {'online': True, 'siblings': [0, 2]}, {'online': True, 'siblings': [1, 3]}]
35 True
36 >>> cpu_topology.all_solitary
37 False
38 """
39
40 def __init__(self, cpu_online, cpu_siblings):
41 self.cores = []
42
43 max_cpu_core_id = max([core.core_id for core in cpu_online])
44 for n in range(max_cpu_core_id + 1):
45 online = [core for core in cpu_online if core.core_id == n]
46 online = online[0].on
47 siblings = [sibling for sibling in cpu_siblings if sibling.core_id == n]
48 if len(siblings) != 0:
49 siblings = siblings[0].siblings
50
51 one_core = {"online": online, "siblings": siblings}
52 self.cores.append(one_core)
53
54 self.all_solitary = all([len(core["siblings"]) <= 1 for core in self.cores])
55
56 def online(self, core_id):
57 """
58 Returns bool value obtained from "online" file for given core_id.
59 """
60 if core_id >= len(self.cores) or core_id < 0:
61 return None
62 return self.cores[core_id]["online"]
63
64 def siblings(self, core_id):
65 """
66 Returns list of siblings for given core_id.
67 """
68 if core_id >= len(self.cores) or core_id < 0:
69 return None
70 return self.cores[core_id]["siblings"]
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/combiners/smt.py b/insights/combiners/smt.py
--- a/insights/combiners/smt.py
+++ b/insights/combiners/smt.py
@@ -43,7 +43,13 @@
max_cpu_core_id = max([core.core_id for core in cpu_online])
for n in range(max_cpu_core_id + 1):
online = [core for core in cpu_online if core.core_id == n]
- online = online[0].on
+ # On some boxes cpu0 doesn't have the online file, since technically cpu0 will always
+ # be online. So check if online returns anything before trying to access online[0].
+ # If it returns nothing and n is 0 set online to True.
+ if online:
+ online = online[0].on
+ elif not online and n == 0:
+ online = True
siblings = [sibling for sibling in cpu_siblings if sibling.core_id == n]
if len(siblings) != 0:
siblings = siblings[0].siblings
| {"golden_diff": "diff --git a/insights/combiners/smt.py b/insights/combiners/smt.py\n--- a/insights/combiners/smt.py\n+++ b/insights/combiners/smt.py\n@@ -43,7 +43,13 @@\n max_cpu_core_id = max([core.core_id for core in cpu_online])\n for n in range(max_cpu_core_id + 1):\n online = [core for core in cpu_online if core.core_id == n]\n- online = online[0].on\n+ # On some boxes cpu0 doesn't have the online file, since technically cpu0 will always\n+ # be online. So check if online returns anything before trying to access online[0].\n+ # If it returns nothing and n is 0 set online to True.\n+ if online:\n+ online = online[0].on\n+ elif not online and n == 0:\n+ online = True\n siblings = [sibling for sibling in cpu_siblings if sibling.core_id == n]\n if len(siblings) != 0:\n siblings = siblings[0].siblings\n", "issue": "The smt combiner is raising IndexError exceptions in production.\nThe CpuTopology combiner is throwing a large number of the exception IndexError('list index out of range',) in production.\n", "before_files": [{"content": "\"\"\"\nSimultaneous Multithreading (SMT) combiner\n==========================================\n\nCombiner for Simultaneous Multithreading (SMT). It uses the results of the following parsers:\n:class:`insights.parsers.smt.CpuCoreOnline`,\n:class:`insights.parsers.smt.CpuSiblings`.\n\"\"\"\n\nfrom insights.core.plugins import combiner\nfrom insights.parsers.smt import CpuCoreOnline, CpuSiblings\n\n\n@combiner(CpuCoreOnline, CpuSiblings)\nclass CpuTopology(object):\n \"\"\"\n Class for collecting the online/siblings status for all CPU cores.\n\n Sample output of the ``CpuCoreOnline`` parser is::\n\n [[Core 0: Online], [Core 1: Online], [Core 2: Online], [Core 3: Online]]\n\n Sample output of the ``CpuSiblings`` parser is::\n\n [[Core 0 Siblings: [0, 2]], [Core 1 Siblings: [1, 3]], [Core 2 Siblings: [0, 2]], [Core 3 Siblings: [1, 3]]]\n\n Attributes:\n cores (list of dictionaries): List of all cores.\n all_solitary (bool): True, if hyperthreading is not used.\n\n Examples:\n >>> type(cpu_topology)\n <class 'insights.combiners.smt.CpuTopology'>\n >>> cpu_topology.cores == [{'online': True, 'siblings': [0, 2]}, {'online': True, 'siblings': [1, 3]}, {'online': True, 'siblings': [0, 2]}, {'online': True, 'siblings': [1, 3]}]\n True\n >>> cpu_topology.all_solitary\n False\n \"\"\"\n\n def __init__(self, cpu_online, cpu_siblings):\n self.cores = []\n\n max_cpu_core_id = max([core.core_id for core in cpu_online])\n for n in range(max_cpu_core_id + 1):\n online = [core for core in cpu_online if core.core_id == n]\n online = online[0].on\n siblings = [sibling for sibling in cpu_siblings if sibling.core_id == n]\n if len(siblings) != 0:\n siblings = siblings[0].siblings\n\n one_core = {\"online\": online, \"siblings\": siblings}\n self.cores.append(one_core)\n\n self.all_solitary = all([len(core[\"siblings\"]) <= 1 for core in self.cores])\n\n def online(self, core_id):\n \"\"\"\n Returns bool value obtained from \"online\" file for given core_id.\n \"\"\"\n if core_id >= len(self.cores) or core_id < 0:\n return None\n return self.cores[core_id][\"online\"]\n\n def siblings(self, core_id):\n \"\"\"\n Returns list of siblings for given core_id.\n \"\"\"\n if core_id >= len(self.cores) or core_id < 0:\n return None\n return self.cores[core_id][\"siblings\"]\n", "path": "insights/combiners/smt.py"}], "after_files": [{"content": "\"\"\"\nSimultaneous Multithreading (SMT) combiner\n==========================================\n\nCombiner for Simultaneous Multithreading (SMT). It uses the results of the following parsers:\n:class:`insights.parsers.smt.CpuCoreOnline`,\n:class:`insights.parsers.smt.CpuSiblings`.\n\"\"\"\n\nfrom insights.core.plugins import combiner\nfrom insights.parsers.smt import CpuCoreOnline, CpuSiblings\n\n\n@combiner(CpuCoreOnline, CpuSiblings)\nclass CpuTopology(object):\n \"\"\"\n Class for collecting the online/siblings status for all CPU cores.\n\n Sample output of the ``CpuCoreOnline`` parser is::\n\n [[Core 0: Online], [Core 1: Online], [Core 2: Online], [Core 3: Online]]\n\n Sample output of the ``CpuSiblings`` parser is::\n\n [[Core 0 Siblings: [0, 2]], [Core 1 Siblings: [1, 3]], [Core 2 Siblings: [0, 2]], [Core 3 Siblings: [1, 3]]]\n\n Attributes:\n cores (list of dictionaries): List of all cores.\n all_solitary (bool): True, if hyperthreading is not used.\n\n Examples:\n >>> type(cpu_topology)\n <class 'insights.combiners.smt.CpuTopology'>\n >>> cpu_topology.cores == [{'online': True, 'siblings': [0, 2]}, {'online': True, 'siblings': [1, 3]}, {'online': True, 'siblings': [0, 2]}, {'online': True, 'siblings': [1, 3]}]\n True\n >>> cpu_topology.all_solitary\n False\n \"\"\"\n\n def __init__(self, cpu_online, cpu_siblings):\n self.cores = []\n\n max_cpu_core_id = max([core.core_id for core in cpu_online])\n for n in range(max_cpu_core_id + 1):\n online = [core for core in cpu_online if core.core_id == n]\n # On some boxes cpu0 doesn't have the online file, since technically cpu0 will always\n # be online. So check if online returns anything before trying to access online[0].\n # If it returns nothing and n is 0 set online to True.\n if online:\n online = online[0].on\n elif not online and n == 0:\n online = True\n siblings = [sibling for sibling in cpu_siblings if sibling.core_id == n]\n if len(siblings) != 0:\n siblings = siblings[0].siblings\n\n one_core = {\"online\": online, \"siblings\": siblings}\n self.cores.append(one_core)\n\n self.all_solitary = all([len(core[\"siblings\"]) <= 1 for core in self.cores])\n\n def online(self, core_id):\n \"\"\"\n Returns bool value obtained from \"online\" file for given core_id.\n \"\"\"\n if core_id >= len(self.cores) or core_id < 0:\n return None\n return self.cores[core_id][\"online\"]\n\n def siblings(self, core_id):\n \"\"\"\n Returns list of siblings for given core_id.\n \"\"\"\n if core_id >= len(self.cores) or core_id < 0:\n return None\n return self.cores[core_id][\"siblings\"]\n", "path": "insights/combiners/smt.py"}]} | 1,094 | 247 |
gh_patches_debug_25666 | rasdani/github-patches | git_diff | ephios-dev__ephios-875 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change event type in event form
As a planner I want to change the event's type after creation so I can correct a mistake.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ephios/core/forms/events.py`
Content:
```
1 import operator
2 import re
3 from datetime import date, datetime, timedelta
4
5 from crispy_forms.bootstrap import FormActions
6 from crispy_forms.helper import FormHelper
7 from crispy_forms.layout import Field, Layout, Submit
8 from django import forms
9 from django.contrib.auth import get_user_model
10 from django.contrib.auth.models import Group
11 from django.core.exceptions import ValidationError
12 from django.db.models import Q
13 from django.template.loader import render_to_string
14 from django.utils.timezone import make_aware
15 from django.utils.translation import gettext as _
16 from django_select2.forms import Select2MultipleWidget
17 from dynamic_preferences.forms import PreferenceForm
18 from guardian.shortcuts import assign_perm, get_objects_for_user, get_users_with_perms, remove_perm
19 from recurrence.forms import RecurrenceField
20
21 from ephios.core.dynamic_preferences_registry import event_type_preference_registry
22 from ephios.core.models import Event, EventType, LocalParticipation, Shift, UserProfile
23 from ephios.core.signup.methods import enabled_signup_methods, signup_method_from_slug
24 from ephios.core.widgets import MultiUserProfileWidget
25 from ephios.extra.crispy import AbortLink
26 from ephios.extra.permissions import get_groups_with_perms
27 from ephios.extra.widgets import ColorInput, CustomDateInput, CustomTimeInput
28 from ephios.modellogging.log import add_log_recorder, update_log
29 from ephios.modellogging.recorders import (
30 DerivedFieldsLogRecorder,
31 InstanceActionType,
32 PermissionLogRecorder,
33 )
34
35
36 class EventForm(forms.ModelForm):
37 visible_for = forms.ModelMultipleChoiceField(
38 queryset=Group.objects.none(),
39 label=_("Visible for"),
40 help_text=_(
41 "Select groups which the event shall be visible for. Regardless, the event will be visible for users that already signed up."
42 ),
43 widget=Select2MultipleWidget,
44 required=False,
45 )
46 responsible_users = forms.ModelMultipleChoiceField(
47 queryset=UserProfile.objects.all(),
48 required=False,
49 label=_("Responsible persons"),
50 widget=MultiUserProfileWidget,
51 )
52 responsible_groups = forms.ModelMultipleChoiceField(
53 queryset=Group.objects.all(),
54 required=False,
55 label=_("Responsible groups"),
56 widget=Select2MultipleWidget,
57 )
58
59 class Meta:
60 model = Event
61 fields = ["title", "description", "location"]
62
63 def __init__(self, **kwargs):
64 user = kwargs.pop("user")
65 can_publish_for_groups = get_objects_for_user(user, "publish_event_for_group", klass=Group)
66
67 if (event := kwargs.get("instance", None)) is not None:
68 self.eventtype = event.type
69 responsible_users = get_users_with_perms(
70 event, only_with_perms_in=["change_event"], with_group_users=False
71 )
72 responsible_groups = get_groups_with_perms(event, only_with_perms_in=["change_event"])
73 visible_for = get_groups_with_perms(event, only_with_perms_in=["view_event"]).exclude(
74 id__in=responsible_groups
75 )
76
77 self.locked_visible_for_groups = set(visible_for.exclude(id__in=can_publish_for_groups))
78 kwargs["initial"] = {
79 "visible_for": visible_for.filter(id__in=can_publish_for_groups),
80 "responsible_users": responsible_users,
81 "responsible_groups": responsible_groups,
82 **kwargs.get("initial", {}),
83 }
84 else:
85 self.eventtype = kwargs.pop("eventtype")
86 kwargs["initial"] = {
87 "responsible_users": self.eventtype.preferences.get("responsible_users")
88 or get_user_model().objects.filter(pk=user.pk),
89 "responsible_groups": self.eventtype.preferences.get("responsible_groups"),
90 "visible_for": self.eventtype.preferences.get("visible_for")
91 or get_objects_for_user(user, "publish_event_for_group", klass=Group),
92 }
93 self.locked_visible_for_groups = set()
94
95 super().__init__(**kwargs)
96
97 self.fields["visible_for"].queryset = can_publish_for_groups
98 self.fields["visible_for"].disabled = not can_publish_for_groups
99 if self.locked_visible_for_groups:
100 self.fields["visible_for"].help_text = _(
101 "Select groups which the event shall be visible for. "
102 "This event is also visible for <b>{groups}</b>, "
103 "but you don't have the permission to change visibility "
104 "for those groups."
105 ).format(groups=", ".join(group.name for group in self.locked_visible_for_groups))
106
107 def save(self, commit=True):
108 self.instance.type = self.eventtype
109 event: Event = super().save(commit=commit)
110
111 add_log_recorder(event, PermissionLogRecorder("view_event", _("Visible for")))
112 add_log_recorder(event, PermissionLogRecorder("change_event", _("Responsibles")))
113
114 # delete existing permissions
115 # (better implement https://github.com/django-guardian/django-guardian/issues/654)
116 for group in get_groups_with_perms(
117 event, only_with_perms_in=["view_event", "change_event"]
118 ):
119 remove_perm("view_event", group, event)
120 remove_perm("change_event", group, event)
121 for user in get_users_with_perms(event, only_with_perms_in=["view_event", "change_event"]):
122 remove_perm("view_event", user, event)
123 remove_perm("change_event", user, event)
124
125 # assign designated permissions
126 assign_perm(
127 "view_event",
128 Group.objects.filter(
129 Q(id__in=self.cleaned_data["visible_for"])
130 | Q(id__in=self.cleaned_data["responsible_groups"])
131 | Q(id__in=(g.id for g in self.locked_visible_for_groups))
132 ),
133 event,
134 )
135 assign_perm("change_event", self.cleaned_data["responsible_groups"], event)
136 assign_perm("change_event", self.cleaned_data["responsible_users"], event)
137
138 # Assign view_event to responsible users and to non-responsible users
139 # that already have some sort of participation for the event
140 # (-> they saw and interacted with it)
141 # We can't just do users that aren't included by group permissions,
142 # as they might get removed from that group.
143 assign_perm(
144 "view_event",
145 UserProfile.objects.filter(
146 Q(pk__in=self.cleaned_data["responsible_users"])
147 | Q(
148 pk__in=LocalParticipation.objects.filter(
149 shift_id__in=event.shifts.all()
150 ).values_list("user", flat=True)
151 )
152 ),
153 event,
154 )
155
156 update_log(event, InstanceActionType.CHANGE)
157 return event
158
159
160 class ShiftForm(forms.ModelForm):
161 date = forms.DateField(widget=CustomDateInput, label=_("Date"))
162 meeting_time = forms.TimeField(widget=CustomTimeInput, label=_("Meeting time"))
163 start_time = forms.TimeField(widget=CustomTimeInput, label=_("Start time"))
164 end_time = forms.TimeField(widget=CustomTimeInput, label=_("End time"))
165
166 field_order = ["date", "meeting_time", "start_time", "end_time", "signup_method_slug"]
167
168 class Meta:
169 model = Shift
170 fields = ["meeting_time", "start_time", "end_time", "signup_method_slug"]
171
172 def __init__(self, *args, **kwargs):
173 super().__init__(*args, **kwargs)
174 signup_methods = list(enabled_signup_methods())
175
176 # make sure that if a shift uses a disabled but installed method, it is also available in the list
177 if self.instance and (method_slug := self.instance.signup_method_slug):
178 if method_slug not in map(operator.attrgetter("slug"), signup_methods):
179 try:
180 signup_methods.append(signup_method_from_slug(method_slug, self.instance))
181 except ValueError: # not installed
182 pass
183
184 self.fields["signup_method_slug"].widget = forms.Select(
185 choices=((method.slug, method.verbose_name) for method in signup_methods)
186 )
187 # this recorder may cause db queries, so it's added on Shift init, but here in the form
188 # pylint: disable=undefined-variable
189 add_log_recorder(
190 self.instance,
191 DerivedFieldsLogRecorder(
192 lambda shift: method.get_signup_info() if (method := shift.signup_method) else {}
193 ),
194 )
195
196 def clean(self):
197 cleaned_data = super().clean()
198 if {"date", "meeting_time", "start_time", "end_time"} <= set(cleaned_data.keys()):
199 cleaned_data["meeting_time"] = make_aware(
200 datetime.combine(cleaned_data["date"], cleaned_data["meeting_time"])
201 )
202 cleaned_data["start_time"] = make_aware(
203 datetime.combine(cleaned_data["date"], cleaned_data["start_time"])
204 )
205 cleaned_data["end_time"] = make_aware(
206 datetime.combine(self.cleaned_data["date"], cleaned_data["end_time"])
207 )
208 if self.cleaned_data["end_time"] <= self.cleaned_data["start_time"]:
209 cleaned_data["end_time"] = cleaned_data["end_time"] + timedelta(days=1)
210 if not cleaned_data["meeting_time"] <= cleaned_data["start_time"]:
211 raise ValidationError(_("Meeting time must not be after start time!"))
212 return cleaned_data
213
214
215 class EventDuplicationForm(forms.Form):
216 start_date = forms.DateField(
217 widget=CustomDateInput,
218 initial=date.today(),
219 help_text=_(
220 "This date will be used as the start date for recurring events that you create below, e.g. daily events will be created from this date onwards."
221 ),
222 label=_("Start date"),
223 )
224 recurrence = RecurrenceField(required=False)
225
226
227 class EventTypeForm(forms.ModelForm):
228 class Meta:
229 model = EventType
230 fields = ["title", "color"]
231 widgets = {"color": ColorInput()}
232
233 def clean_color(self):
234 regex = re.compile(r"#[a-fA-F\d]{6}")
235 if not regex.match(self.cleaned_data["color"]):
236 raise ValidationError(_("You need to enter a valid color"))
237 return self.cleaned_data["color"]
238
239
240 class EventTypePreferenceForm(PreferenceForm):
241 registry = event_type_preference_registry
242
243
244 class BasePluginFormMixin:
245 @property
246 def heading(self):
247 raise NotImplementedError
248
249 def render(self):
250 try:
251 self.helper.form_tag = False
252 except AttributeError:
253 self.helper = FormHelper(self)
254 self.helper.form_tag = False
255 return render_to_string("core/fragments/plugin_form.html", context={"form": self})
256
257 def is_function_active(self):
258 """
259 When building forms for additional features, return whether that feature is enabled for the form instance.
260 With the default template, if this is True, the collapse is expanded on page load.
261 """
262 return False
263
264
265 class EventNotificationForm(forms.Form):
266 NEW_EVENT = "new"
267 REMINDER = "remind"
268 PARTICIPANTS = "participants"
269 action = forms.ChoiceField(
270 choices=[
271 (NEW_EVENT, _("Send notification about new event to everyone")),
272 (REMINDER, _("Send reminder to everyone that is not participating")),
273 (PARTICIPANTS, _("Send a message to all participants")),
274 ],
275 widget=forms.RadioSelect,
276 label=False,
277 )
278 mail_content = forms.CharField(required=False, widget=forms.Textarea, label=_("Mail content"))
279
280 def __init__(self, *args, **kwargs):
281 self.event = kwargs.pop("event")
282 super().__init__(*args, **kwargs)
283 self.helper = FormHelper(self)
284 self.helper.layout = Layout(
285 Field("action"),
286 Field("mail_content"),
287 FormActions(
288 Submit("submit", _("Send"), css_class="float-end"),
289 AbortLink(href=self.event.get_absolute_url()),
290 ),
291 )
292
293 def clean(self):
294 if (
295 self.cleaned_data.get("action") == self.PARTICIPANTS
296 and not self.cleaned_data["mail_content"]
297 ):
298 raise ValidationError(_("You cannot send an empty mail."))
299 return super().clean()
300
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ephios/core/forms/events.py b/ephios/core/forms/events.py
--- a/ephios/core/forms/events.py
+++ b/ephios/core/forms/events.py
@@ -58,7 +58,7 @@
class Meta:
model = Event
- fields = ["title", "description", "location"]
+ fields = ["title", "type", "description", "location"]
def __init__(self, **kwargs):
user = kwargs.pop("user")
@@ -94,6 +94,8 @@
super().__init__(**kwargs)
+ if event is None:
+ self.fields.pop("type")
self.fields["visible_for"].queryset = can_publish_for_groups
self.fields["visible_for"].disabled = not can_publish_for_groups
if self.locked_visible_for_groups:
@@ -105,7 +107,8 @@
).format(groups=", ".join(group.name for group in self.locked_visible_for_groups))
def save(self, commit=True):
- self.instance.type = self.eventtype
+ if not self.instance.pk:
+ self.instance.type = self.eventtype
event: Event = super().save(commit=commit)
add_log_recorder(event, PermissionLogRecorder("view_event", _("Visible for")))
| {"golden_diff": "diff --git a/ephios/core/forms/events.py b/ephios/core/forms/events.py\n--- a/ephios/core/forms/events.py\n+++ b/ephios/core/forms/events.py\n@@ -58,7 +58,7 @@\n \n class Meta:\n model = Event\n- fields = [\"title\", \"description\", \"location\"]\n+ fields = [\"title\", \"type\", \"description\", \"location\"]\n \n def __init__(self, **kwargs):\n user = kwargs.pop(\"user\")\n@@ -94,6 +94,8 @@\n \n super().__init__(**kwargs)\n \n+ if event is None:\n+ self.fields.pop(\"type\")\n self.fields[\"visible_for\"].queryset = can_publish_for_groups\n self.fields[\"visible_for\"].disabled = not can_publish_for_groups\n if self.locked_visible_for_groups:\n@@ -105,7 +107,8 @@\n ).format(groups=\", \".join(group.name for group in self.locked_visible_for_groups))\n \n def save(self, commit=True):\n- self.instance.type = self.eventtype\n+ if not self.instance.pk:\n+ self.instance.type = self.eventtype\n event: Event = super().save(commit=commit)\n \n add_log_recorder(event, PermissionLogRecorder(\"view_event\", _(\"Visible for\")))\n", "issue": "Change event type in event form\nAs a planner I want to change the event's type after creation so I can correct a mistake.\n", "before_files": [{"content": "import operator\nimport re\nfrom datetime import date, datetime, timedelta\n\nfrom crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Field, Layout, Submit\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.template.loader import render_to_string\nfrom django.utils.timezone import make_aware\nfrom django.utils.translation import gettext as _\nfrom django_select2.forms import Select2MultipleWidget\nfrom dynamic_preferences.forms import PreferenceForm\nfrom guardian.shortcuts import assign_perm, get_objects_for_user, get_users_with_perms, remove_perm\nfrom recurrence.forms import RecurrenceField\n\nfrom ephios.core.dynamic_preferences_registry import event_type_preference_registry\nfrom ephios.core.models import Event, EventType, LocalParticipation, Shift, UserProfile\nfrom ephios.core.signup.methods import enabled_signup_methods, signup_method_from_slug\nfrom ephios.core.widgets import MultiUserProfileWidget\nfrom ephios.extra.crispy import AbortLink\nfrom ephios.extra.permissions import get_groups_with_perms\nfrom ephios.extra.widgets import ColorInput, CustomDateInput, CustomTimeInput\nfrom ephios.modellogging.log import add_log_recorder, update_log\nfrom ephios.modellogging.recorders import (\n DerivedFieldsLogRecorder,\n InstanceActionType,\n PermissionLogRecorder,\n)\n\n\nclass EventForm(forms.ModelForm):\n visible_for = forms.ModelMultipleChoiceField(\n queryset=Group.objects.none(),\n label=_(\"Visible for\"),\n help_text=_(\n \"Select groups which the event shall be visible for. Regardless, the event will be visible for users that already signed up.\"\n ),\n widget=Select2MultipleWidget,\n required=False,\n )\n responsible_users = forms.ModelMultipleChoiceField(\n queryset=UserProfile.objects.all(),\n required=False,\n label=_(\"Responsible persons\"),\n widget=MultiUserProfileWidget,\n )\n responsible_groups = forms.ModelMultipleChoiceField(\n queryset=Group.objects.all(),\n required=False,\n label=_(\"Responsible groups\"),\n widget=Select2MultipleWidget,\n )\n\n class Meta:\n model = Event\n fields = [\"title\", \"description\", \"location\"]\n\n def __init__(self, **kwargs):\n user = kwargs.pop(\"user\")\n can_publish_for_groups = get_objects_for_user(user, \"publish_event_for_group\", klass=Group)\n\n if (event := kwargs.get(\"instance\", None)) is not None:\n self.eventtype = event.type\n responsible_users = get_users_with_perms(\n event, only_with_perms_in=[\"change_event\"], with_group_users=False\n )\n responsible_groups = get_groups_with_perms(event, only_with_perms_in=[\"change_event\"])\n visible_for = get_groups_with_perms(event, only_with_perms_in=[\"view_event\"]).exclude(\n id__in=responsible_groups\n )\n\n self.locked_visible_for_groups = set(visible_for.exclude(id__in=can_publish_for_groups))\n kwargs[\"initial\"] = {\n \"visible_for\": visible_for.filter(id__in=can_publish_for_groups),\n \"responsible_users\": responsible_users,\n \"responsible_groups\": responsible_groups,\n **kwargs.get(\"initial\", {}),\n }\n else:\n self.eventtype = kwargs.pop(\"eventtype\")\n kwargs[\"initial\"] = {\n \"responsible_users\": self.eventtype.preferences.get(\"responsible_users\")\n or get_user_model().objects.filter(pk=user.pk),\n \"responsible_groups\": self.eventtype.preferences.get(\"responsible_groups\"),\n \"visible_for\": self.eventtype.preferences.get(\"visible_for\")\n or get_objects_for_user(user, \"publish_event_for_group\", klass=Group),\n }\n self.locked_visible_for_groups = set()\n\n super().__init__(**kwargs)\n\n self.fields[\"visible_for\"].queryset = can_publish_for_groups\n self.fields[\"visible_for\"].disabled = not can_publish_for_groups\n if self.locked_visible_for_groups:\n self.fields[\"visible_for\"].help_text = _(\n \"Select groups which the event shall be visible for. \"\n \"This event is also visible for <b>{groups}</b>, \"\n \"but you don't have the permission to change visibility \"\n \"for those groups.\"\n ).format(groups=\", \".join(group.name for group in self.locked_visible_for_groups))\n\n def save(self, commit=True):\n self.instance.type = self.eventtype\n event: Event = super().save(commit=commit)\n\n add_log_recorder(event, PermissionLogRecorder(\"view_event\", _(\"Visible for\")))\n add_log_recorder(event, PermissionLogRecorder(\"change_event\", _(\"Responsibles\")))\n\n # delete existing permissions\n # (better implement https://github.com/django-guardian/django-guardian/issues/654)\n for group in get_groups_with_perms(\n event, only_with_perms_in=[\"view_event\", \"change_event\"]\n ):\n remove_perm(\"view_event\", group, event)\n remove_perm(\"change_event\", group, event)\n for user in get_users_with_perms(event, only_with_perms_in=[\"view_event\", \"change_event\"]):\n remove_perm(\"view_event\", user, event)\n remove_perm(\"change_event\", user, event)\n\n # assign designated permissions\n assign_perm(\n \"view_event\",\n Group.objects.filter(\n Q(id__in=self.cleaned_data[\"visible_for\"])\n | Q(id__in=self.cleaned_data[\"responsible_groups\"])\n | Q(id__in=(g.id for g in self.locked_visible_for_groups))\n ),\n event,\n )\n assign_perm(\"change_event\", self.cleaned_data[\"responsible_groups\"], event)\n assign_perm(\"change_event\", self.cleaned_data[\"responsible_users\"], event)\n\n # Assign view_event to responsible users and to non-responsible users\n # that already have some sort of participation for the event\n # (-> they saw and interacted with it)\n # We can't just do users that aren't included by group permissions,\n # as they might get removed from that group.\n assign_perm(\n \"view_event\",\n UserProfile.objects.filter(\n Q(pk__in=self.cleaned_data[\"responsible_users\"])\n | Q(\n pk__in=LocalParticipation.objects.filter(\n shift_id__in=event.shifts.all()\n ).values_list(\"user\", flat=True)\n )\n ),\n event,\n )\n\n update_log(event, InstanceActionType.CHANGE)\n return event\n\n\nclass ShiftForm(forms.ModelForm):\n date = forms.DateField(widget=CustomDateInput, label=_(\"Date\"))\n meeting_time = forms.TimeField(widget=CustomTimeInput, label=_(\"Meeting time\"))\n start_time = forms.TimeField(widget=CustomTimeInput, label=_(\"Start time\"))\n end_time = forms.TimeField(widget=CustomTimeInput, label=_(\"End time\"))\n\n field_order = [\"date\", \"meeting_time\", \"start_time\", \"end_time\", \"signup_method_slug\"]\n\n class Meta:\n model = Shift\n fields = [\"meeting_time\", \"start_time\", \"end_time\", \"signup_method_slug\"]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n signup_methods = list(enabled_signup_methods())\n\n # make sure that if a shift uses a disabled but installed method, it is also available in the list\n if self.instance and (method_slug := self.instance.signup_method_slug):\n if method_slug not in map(operator.attrgetter(\"slug\"), signup_methods):\n try:\n signup_methods.append(signup_method_from_slug(method_slug, self.instance))\n except ValueError: # not installed\n pass\n\n self.fields[\"signup_method_slug\"].widget = forms.Select(\n choices=((method.slug, method.verbose_name) for method in signup_methods)\n )\n # this recorder may cause db queries, so it's added on Shift init, but here in the form\n # pylint: disable=undefined-variable\n add_log_recorder(\n self.instance,\n DerivedFieldsLogRecorder(\n lambda shift: method.get_signup_info() if (method := shift.signup_method) else {}\n ),\n )\n\n def clean(self):\n cleaned_data = super().clean()\n if {\"date\", \"meeting_time\", \"start_time\", \"end_time\"} <= set(cleaned_data.keys()):\n cleaned_data[\"meeting_time\"] = make_aware(\n datetime.combine(cleaned_data[\"date\"], cleaned_data[\"meeting_time\"])\n )\n cleaned_data[\"start_time\"] = make_aware(\n datetime.combine(cleaned_data[\"date\"], cleaned_data[\"start_time\"])\n )\n cleaned_data[\"end_time\"] = make_aware(\n datetime.combine(self.cleaned_data[\"date\"], cleaned_data[\"end_time\"])\n )\n if self.cleaned_data[\"end_time\"] <= self.cleaned_data[\"start_time\"]:\n cleaned_data[\"end_time\"] = cleaned_data[\"end_time\"] + timedelta(days=1)\n if not cleaned_data[\"meeting_time\"] <= cleaned_data[\"start_time\"]:\n raise ValidationError(_(\"Meeting time must not be after start time!\"))\n return cleaned_data\n\n\nclass EventDuplicationForm(forms.Form):\n start_date = forms.DateField(\n widget=CustomDateInput,\n initial=date.today(),\n help_text=_(\n \"This date will be used as the start date for recurring events that you create below, e.g. daily events will be created from this date onwards.\"\n ),\n label=_(\"Start date\"),\n )\n recurrence = RecurrenceField(required=False)\n\n\nclass EventTypeForm(forms.ModelForm):\n class Meta:\n model = EventType\n fields = [\"title\", \"color\"]\n widgets = {\"color\": ColorInput()}\n\n def clean_color(self):\n regex = re.compile(r\"#[a-fA-F\\d]{6}\")\n if not regex.match(self.cleaned_data[\"color\"]):\n raise ValidationError(_(\"You need to enter a valid color\"))\n return self.cleaned_data[\"color\"]\n\n\nclass EventTypePreferenceForm(PreferenceForm):\n registry = event_type_preference_registry\n\n\nclass BasePluginFormMixin:\n @property\n def heading(self):\n raise NotImplementedError\n\n def render(self):\n try:\n self.helper.form_tag = False\n except AttributeError:\n self.helper = FormHelper(self)\n self.helper.form_tag = False\n return render_to_string(\"core/fragments/plugin_form.html\", context={\"form\": self})\n\n def is_function_active(self):\n \"\"\"\n When building forms for additional features, return whether that feature is enabled for the form instance.\n With the default template, if this is True, the collapse is expanded on page load.\n \"\"\"\n return False\n\n\nclass EventNotificationForm(forms.Form):\n NEW_EVENT = \"new\"\n REMINDER = \"remind\"\n PARTICIPANTS = \"participants\"\n action = forms.ChoiceField(\n choices=[\n (NEW_EVENT, _(\"Send notification about new event to everyone\")),\n (REMINDER, _(\"Send reminder to everyone that is not participating\")),\n (PARTICIPANTS, _(\"Send a message to all participants\")),\n ],\n widget=forms.RadioSelect,\n label=False,\n )\n mail_content = forms.CharField(required=False, widget=forms.Textarea, label=_(\"Mail content\"))\n\n def __init__(self, *args, **kwargs):\n self.event = kwargs.pop(\"event\")\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.layout = Layout(\n Field(\"action\"),\n Field(\"mail_content\"),\n FormActions(\n Submit(\"submit\", _(\"Send\"), css_class=\"float-end\"),\n AbortLink(href=self.event.get_absolute_url()),\n ),\n )\n\n def clean(self):\n if (\n self.cleaned_data.get(\"action\") == self.PARTICIPANTS\n and not self.cleaned_data[\"mail_content\"]\n ):\n raise ValidationError(_(\"You cannot send an empty mail.\"))\n return super().clean()\n", "path": "ephios/core/forms/events.py"}], "after_files": [{"content": "import operator\nimport re\nfrom datetime import date, datetime, timedelta\n\nfrom crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Field, Layout, Submit\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.template.loader import render_to_string\nfrom django.utils.timezone import make_aware\nfrom django.utils.translation import gettext as _\nfrom django_select2.forms import Select2MultipleWidget\nfrom dynamic_preferences.forms import PreferenceForm\nfrom guardian.shortcuts import assign_perm, get_objects_for_user, get_users_with_perms, remove_perm\nfrom recurrence.forms import RecurrenceField\n\nfrom ephios.core.dynamic_preferences_registry import event_type_preference_registry\nfrom ephios.core.models import Event, EventType, LocalParticipation, Shift, UserProfile\nfrom ephios.core.signup.methods import enabled_signup_methods, signup_method_from_slug\nfrom ephios.core.widgets import MultiUserProfileWidget\nfrom ephios.extra.crispy import AbortLink\nfrom ephios.extra.permissions import get_groups_with_perms\nfrom ephios.extra.widgets import ColorInput, CustomDateInput, CustomTimeInput\nfrom ephios.modellogging.log import add_log_recorder, update_log\nfrom ephios.modellogging.recorders import (\n DerivedFieldsLogRecorder,\n InstanceActionType,\n PermissionLogRecorder,\n)\n\n\nclass EventForm(forms.ModelForm):\n visible_for = forms.ModelMultipleChoiceField(\n queryset=Group.objects.none(),\n label=_(\"Visible for\"),\n help_text=_(\n \"Select groups which the event shall be visible for. Regardless, the event will be visible for users that already signed up.\"\n ),\n widget=Select2MultipleWidget,\n required=False,\n )\n responsible_users = forms.ModelMultipleChoiceField(\n queryset=UserProfile.objects.all(),\n required=False,\n label=_(\"Responsible persons\"),\n widget=MultiUserProfileWidget,\n )\n responsible_groups = forms.ModelMultipleChoiceField(\n queryset=Group.objects.all(),\n required=False,\n label=_(\"Responsible groups\"),\n widget=Select2MultipleWidget,\n )\n\n class Meta:\n model = Event\n fields = [\"title\", \"type\", \"description\", \"location\"]\n\n def __init__(self, **kwargs):\n user = kwargs.pop(\"user\")\n can_publish_for_groups = get_objects_for_user(user, \"publish_event_for_group\", klass=Group)\n\n if (event := kwargs.get(\"instance\", None)) is not None:\n self.eventtype = event.type\n responsible_users = get_users_with_perms(\n event, only_with_perms_in=[\"change_event\"], with_group_users=False\n )\n responsible_groups = get_groups_with_perms(event, only_with_perms_in=[\"change_event\"])\n visible_for = get_groups_with_perms(event, only_with_perms_in=[\"view_event\"]).exclude(\n id__in=responsible_groups\n )\n\n self.locked_visible_for_groups = set(visible_for.exclude(id__in=can_publish_for_groups))\n kwargs[\"initial\"] = {\n \"visible_for\": visible_for.filter(id__in=can_publish_for_groups),\n \"responsible_users\": responsible_users,\n \"responsible_groups\": responsible_groups,\n **kwargs.get(\"initial\", {}),\n }\n else:\n self.eventtype = kwargs.pop(\"eventtype\")\n kwargs[\"initial\"] = {\n \"responsible_users\": self.eventtype.preferences.get(\"responsible_users\")\n or get_user_model().objects.filter(pk=user.pk),\n \"responsible_groups\": self.eventtype.preferences.get(\"responsible_groups\"),\n \"visible_for\": self.eventtype.preferences.get(\"visible_for\")\n or get_objects_for_user(user, \"publish_event_for_group\", klass=Group),\n }\n self.locked_visible_for_groups = set()\n\n super().__init__(**kwargs)\n\n if event is None:\n self.fields.pop(\"type\")\n self.fields[\"visible_for\"].queryset = can_publish_for_groups\n self.fields[\"visible_for\"].disabled = not can_publish_for_groups\n if self.locked_visible_for_groups:\n self.fields[\"visible_for\"].help_text = _(\n \"Select groups which the event shall be visible for. \"\n \"This event is also visible for <b>{groups}</b>, \"\n \"but you don't have the permission to change visibility \"\n \"for those groups.\"\n ).format(groups=\", \".join(group.name for group in self.locked_visible_for_groups))\n\n def save(self, commit=True):\n if not self.instance.pk:\n self.instance.type = self.eventtype\n event: Event = super().save(commit=commit)\n\n add_log_recorder(event, PermissionLogRecorder(\"view_event\", _(\"Visible for\")))\n add_log_recorder(event, PermissionLogRecorder(\"change_event\", _(\"Responsibles\")))\n\n # delete existing permissions\n # (better implement https://github.com/django-guardian/django-guardian/issues/654)\n for group in get_groups_with_perms(\n event, only_with_perms_in=[\"view_event\", \"change_event\"]\n ):\n remove_perm(\"view_event\", group, event)\n remove_perm(\"change_event\", group, event)\n for user in get_users_with_perms(event, only_with_perms_in=[\"view_event\", \"change_event\"]):\n remove_perm(\"view_event\", user, event)\n remove_perm(\"change_event\", user, event)\n\n # assign designated permissions\n assign_perm(\n \"view_event\",\n Group.objects.filter(\n Q(id__in=self.cleaned_data[\"visible_for\"])\n | Q(id__in=self.cleaned_data[\"responsible_groups\"])\n | Q(id__in=(g.id for g in self.locked_visible_for_groups))\n ),\n event,\n )\n assign_perm(\"change_event\", self.cleaned_data[\"responsible_groups\"], event)\n assign_perm(\"change_event\", self.cleaned_data[\"responsible_users\"], event)\n\n # Assign view_event to responsible users and to non-responsible users\n # that already have some sort of participation for the event\n # (-> they saw and interacted with it)\n # We can't just do users that aren't included by group permissions,\n # as they might get removed from that group.\n assign_perm(\n \"view_event\",\n UserProfile.objects.filter(\n Q(pk__in=self.cleaned_data[\"responsible_users\"])\n | Q(\n pk__in=LocalParticipation.objects.filter(\n shift_id__in=event.shifts.all()\n ).values_list(\"user\", flat=True)\n )\n ),\n event,\n )\n\n update_log(event, InstanceActionType.CHANGE)\n return event\n\n\nclass ShiftForm(forms.ModelForm):\n date = forms.DateField(widget=CustomDateInput, label=_(\"Date\"))\n meeting_time = forms.TimeField(widget=CustomTimeInput, label=_(\"Meeting time\"))\n start_time = forms.TimeField(widget=CustomTimeInput, label=_(\"Start time\"))\n end_time = forms.TimeField(widget=CustomTimeInput, label=_(\"End time\"))\n\n field_order = [\"date\", \"meeting_time\", \"start_time\", \"end_time\", \"signup_method_slug\"]\n\n class Meta:\n model = Shift\n fields = [\"meeting_time\", \"start_time\", \"end_time\", \"signup_method_slug\"]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n signup_methods = list(enabled_signup_methods())\n\n # make sure that if a shift uses a disabled but installed method, it is also available in the list\n if self.instance and (method_slug := self.instance.signup_method_slug):\n if method_slug not in map(operator.attrgetter(\"slug\"), signup_methods):\n try:\n signup_methods.append(signup_method_from_slug(method_slug, self.instance))\n except ValueError: # not installed\n pass\n\n self.fields[\"signup_method_slug\"].widget = forms.Select(\n choices=((method.slug, method.verbose_name) for method in signup_methods)\n )\n # this recorder may cause db queries, so it's added on Shift init, but here in the form\n # pylint: disable=undefined-variable\n add_log_recorder(\n self.instance,\n DerivedFieldsLogRecorder(\n lambda shift: method.get_signup_info() if (method := shift.signup_method) else {}\n ),\n )\n\n def clean(self):\n cleaned_data = super().clean()\n if {\"date\", \"meeting_time\", \"start_time\", \"end_time\"} <= set(cleaned_data.keys()):\n cleaned_data[\"meeting_time\"] = make_aware(\n datetime.combine(cleaned_data[\"date\"], cleaned_data[\"meeting_time\"])\n )\n cleaned_data[\"start_time\"] = make_aware(\n datetime.combine(cleaned_data[\"date\"], cleaned_data[\"start_time\"])\n )\n cleaned_data[\"end_time\"] = make_aware(\n datetime.combine(self.cleaned_data[\"date\"], cleaned_data[\"end_time\"])\n )\n if self.cleaned_data[\"end_time\"] <= self.cleaned_data[\"start_time\"]:\n cleaned_data[\"end_time\"] = cleaned_data[\"end_time\"] + timedelta(days=1)\n if not cleaned_data[\"meeting_time\"] <= cleaned_data[\"start_time\"]:\n raise ValidationError(_(\"Meeting time must not be after start time!\"))\n return cleaned_data\n\n\nclass EventDuplicationForm(forms.Form):\n start_date = forms.DateField(\n widget=CustomDateInput,\n initial=date.today(),\n help_text=_(\n \"This date will be used as the start date for recurring events that you create below, e.g. daily events will be created from this date onwards.\"\n ),\n label=_(\"Start date\"),\n )\n recurrence = RecurrenceField(required=False)\n\n\nclass EventTypeForm(forms.ModelForm):\n class Meta:\n model = EventType\n fields = [\"title\", \"color\"]\n widgets = {\"color\": ColorInput()}\n\n def clean_color(self):\n regex = re.compile(r\"#[a-fA-F\\d]{6}\")\n if not regex.match(self.cleaned_data[\"color\"]):\n raise ValidationError(_(\"You need to enter a valid color\"))\n return self.cleaned_data[\"color\"]\n\n\nclass EventTypePreferenceForm(PreferenceForm):\n registry = event_type_preference_registry\n\n\nclass BasePluginFormMixin:\n @property\n def heading(self):\n raise NotImplementedError\n\n def render(self):\n try:\n self.helper.form_tag = False\n except AttributeError:\n self.helper = FormHelper(self)\n self.helper.form_tag = False\n return render_to_string(\"core/fragments/plugin_form.html\", context={\"form\": self})\n\n def is_function_active(self):\n \"\"\"\n When building forms for additional features, return whether that feature is enabled for the form instance.\n With the default template, if this is True, the collapse is expanded on page load.\n \"\"\"\n return False\n\n\nclass EventNotificationForm(forms.Form):\n NEW_EVENT = \"new\"\n REMINDER = \"remind\"\n PARTICIPANTS = \"participants\"\n action = forms.ChoiceField(\n choices=[\n (NEW_EVENT, _(\"Send notification about new event to everyone\")),\n (REMINDER, _(\"Send reminder to everyone that is not participating\")),\n (PARTICIPANTS, _(\"Send a message to all participants\")),\n ],\n widget=forms.RadioSelect,\n label=False,\n )\n mail_content = forms.CharField(required=False, widget=forms.Textarea, label=_(\"Mail content\"))\n\n def __init__(self, *args, **kwargs):\n self.event = kwargs.pop(\"event\")\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.layout = Layout(\n Field(\"action\"),\n Field(\"mail_content\"),\n FormActions(\n Submit(\"submit\", _(\"Send\"), css_class=\"float-end\"),\n AbortLink(href=self.event.get_absolute_url()),\n ),\n )\n\n def clean(self):\n if (\n self.cleaned_data.get(\"action\") == self.PARTICIPANTS\n and not self.cleaned_data[\"mail_content\"]\n ):\n raise ValidationError(_(\"You cannot send an empty mail.\"))\n return super().clean()\n", "path": "ephios/core/forms/events.py"}]} | 3,600 | 287 |
gh_patches_debug_39470 | rasdani/github-patches | git_diff | microsoft__onnxscript-120 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set up linters for the project
It helps if we set up linters early in the development process (less big PRs for fixes in the future). We may consider: mypy, pylint, black, isort, pydocstyle, flake8, bandit and xdoctest.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License.
4 # --------------------------------------------------------------------------
5
6 # -*- coding: utf-8 -*-
7
8 from distutils.core import setup
9 from setuptools import find_packages
10 import os
11 this = os.path.dirname(__file__)
12
13 with open(os.path.join(this, "requirements.txt"), "r") as f:
14 requirements = [_ for _ in [_.strip("\r\n ")
15 for _ in f.readlines()] if _ is not None]
16
17 packages = find_packages()
18 assert packages
19
20 # read version from the package file.
21 version_str = '1.0.0'
22 with (open(os.path.join(this, 'onnxscript/__init__.py'), "r")) as f:
23 line = [_ for _ in [_.strip("\r\n ")
24 for _ in f.readlines()] if _.startswith("__version__")]
25 if len(line) > 0:
26 version_str = line[0].split('=')[1].strip('" ')
27
28 README = os.path.join(os.getcwd(), "README.md")
29 with open(README) as f:
30 long_description = f.read()
31 start_pos = long_description.find('## Contributing')
32 if start_pos >= 0:
33 long_description = long_description[:start_pos]
34
35 setup(
36 name='onnx-script',
37 version=version_str,
38 description="Authoring ONNX functions in Python",
39 long_description=long_description,
40 long_description_content_type='text/markdown',
41 license='Apache License v2.0',
42 author='Microsoft Corporation',
43 author_email='[email protected]',
44 url='https://github.com/onnx/onnx-script',
45 packages=packages,
46 include_package_data=True,
47 install_requires=requirements,
48 classifiers=[
49 'Development Status :: 4 - Beta',
50 'Environment :: Console',
51 'Intended Audience :: Developers',
52 'Operating System :: MacOS :: MacOS X',
53 'Operating System :: Microsoft :: Windows',
54 'Programming Language :: Python',
55 'Programming Language :: Python :: 3.7',
56 'Programming Language :: Python :: 3.8',
57 'Programming Language :: Python :: 3.9',
58 'License :: OSI Approved :: Apache Software License'],
59 )
60
```
Path: `onnxscript/__init__.py`
Content:
```
1 # -------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License.
4 # --------------------------------------------------------------------------
5
6 __version__ = '0.1'
7
8 from .main import script, export_onnx_lib, OnnxFunction
9 from .backend.onnx_export import export2python as proto2python
10
11 __all__ = [script, export_onnx_lib, OnnxFunction, proto2python]
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onnxscript/__init__.py b/onnxscript/__init__.py
--- a/onnxscript/__init__.py
+++ b/onnxscript/__init__.py
@@ -3,9 +3,22 @@
# Licensed under the MIT License.
# --------------------------------------------------------------------------
-__version__ = '0.1'
-
+import sys
from .main import script, export_onnx_lib, OnnxFunction
from .backend.onnx_export import export2python as proto2python
-__all__ = [script, export_onnx_lib, OnnxFunction, proto2python]
+if sys.version_info[0:2] >= (3, 8):
+ import importlib.metadata as importlib_metadata
+else:
+ # TODO: Remove this when Python 3.7 is deprecated
+ import importlib_metadata
+
+try:
+ # TODO: should we algin the folder name with package name?
+ # It's onnxscript and onnx-script now. That way, we can use __package__ here.
+ __version__ = importlib_metadata.version("onnx-script")
+except importlib_metadata.PackageNotFoundError:
+ __version__ = None
+
+
+__all__ = ["script", "export_onnx_lib", "OnnxFunction", "proto2python"]
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,55 +5,26 @@
# -*- coding: utf-8 -*-
-from distutils.core import setup
-from setuptools import find_packages
import os
-this = os.path.dirname(__file__)
+import setuptools
-with open(os.path.join(this, "requirements.txt"), "r") as f:
- requirements = [_ for _ in [_.strip("\r\n ")
- for _ in f.readlines()] if _ is not None]
+this = os.path.dirname(__file__)
-packages = find_packages()
+packages = setuptools.find_packages()
assert packages
-# read version from the package file.
-version_str = '1.0.0'
-with (open(os.path.join(this, 'onnxscript/__init__.py'), "r")) as f:
- line = [_ for _ in [_.strip("\r\n ")
- for _ in f.readlines()] if _.startswith("__version__")]
- if len(line) > 0:
- version_str = line[0].split('=')[1].strip('" ')
-
README = os.path.join(os.getcwd(), "README.md")
-with open(README) as f:
+with open(README, encoding="utf-8") as f:
long_description = f.read()
start_pos = long_description.find('## Contributing')
if start_pos >= 0:
long_description = long_description[:start_pos]
-setup(
- name='onnx-script',
- version=version_str,
- description="Authoring ONNX functions in Python",
+setuptools.setup(
long_description=long_description,
long_description_content_type='text/markdown',
- license='Apache License v2.0',
- author='Microsoft Corporation',
- author_email='[email protected]',
url='https://github.com/onnx/onnx-script',
packages=packages,
include_package_data=True,
- install_requires=requirements,
- classifiers=[
- 'Development Status :: 4 - Beta',
- 'Environment :: Console',
- 'Intended Audience :: Developers',
- 'Operating System :: MacOS :: MacOS X',
- 'Operating System :: Microsoft :: Windows',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8',
- 'Programming Language :: Python :: 3.9',
- 'License :: OSI Approved :: Apache Software License'],
+ package_data={"onnx-script": ["py.typed"], "onnx": ["py.typed"],},
)
| {"golden_diff": "diff --git a/onnxscript/__init__.py b/onnxscript/__init__.py\n--- a/onnxscript/__init__.py\n+++ b/onnxscript/__init__.py\n@@ -3,9 +3,22 @@\n # Licensed under the MIT License.\n # --------------------------------------------------------------------------\n \n-__version__ = '0.1'\n-\n+import sys\n from .main import script, export_onnx_lib, OnnxFunction\n from .backend.onnx_export import export2python as proto2python\n \n-__all__ = [script, export_onnx_lib, OnnxFunction, proto2python]\n+if sys.version_info[0:2] >= (3, 8):\n+ import importlib.metadata as importlib_metadata\n+else:\n+ # TODO: Remove this when Python 3.7 is deprecated\n+ import importlib_metadata\n+\n+try:\n+ # TODO: should we algin the folder name with package name?\n+ # It's onnxscript and onnx-script now. That way, we can use __package__ here.\n+ __version__ = importlib_metadata.version(\"onnx-script\")\n+except importlib_metadata.PackageNotFoundError:\n+ __version__ = None\n+\n+\n+__all__ = [\"script\", \"export_onnx_lib\", \"OnnxFunction\", \"proto2python\"]\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,55 +5,26 @@\n \n # -*- coding: utf-8 -*-\n \n-from distutils.core import setup\n-from setuptools import find_packages\n import os\n-this = os.path.dirname(__file__)\n+import setuptools\n \n-with open(os.path.join(this, \"requirements.txt\"), \"r\") as f:\n- requirements = [_ for _ in [_.strip(\"\\r\\n \")\n- for _ in f.readlines()] if _ is not None]\n+this = os.path.dirname(__file__)\n \n-packages = find_packages()\n+packages = setuptools.find_packages()\n assert packages\n \n-# read version from the package file.\n-version_str = '1.0.0'\n-with (open(os.path.join(this, 'onnxscript/__init__.py'), \"r\")) as f:\n- line = [_ for _ in [_.strip(\"\\r\\n \")\n- for _ in f.readlines()] if _.startswith(\"__version__\")]\n- if len(line) > 0:\n- version_str = line[0].split('=')[1].strip('\" ')\n-\n README = os.path.join(os.getcwd(), \"README.md\")\n-with open(README) as f:\n+with open(README, encoding=\"utf-8\") as f:\n long_description = f.read()\n start_pos = long_description.find('## Contributing')\n if start_pos >= 0:\n long_description = long_description[:start_pos]\n \n-setup(\n- name='onnx-script',\n- version=version_str,\n- description=\"Authoring ONNX functions in Python\",\n+setuptools.setup(\n long_description=long_description,\n long_description_content_type='text/markdown',\n- license='Apache License v2.0',\n- author='Microsoft Corporation',\n- author_email='[email protected]',\n url='https://github.com/onnx/onnx-script',\n packages=packages,\n include_package_data=True,\n- install_requires=requirements,\n- classifiers=[\n- 'Development Status :: 4 - Beta',\n- 'Environment :: Console',\n- 'Intended Audience :: Developers',\n- 'Operating System :: MacOS :: MacOS X',\n- 'Operating System :: Microsoft :: Windows',\n- 'Programming Language :: Python',\n- 'Programming Language :: Python :: 3.7',\n- 'Programming Language :: Python :: 3.8',\n- 'Programming Language :: Python :: 3.9',\n- 'License :: OSI Approved :: Apache Software License'],\n+ package_data={\"onnx-script\": [\"py.typed\"], \"onnx\": [\"py.typed\"],},\n )\n", "issue": "Set up linters for the project\nIt helps if we set up linters early in the development process (less big PRs for fixes in the future). We may consider: mypy, pylint, black, isort, pydocstyle, flake8, bandit and xdoctest.\n", "before_files": [{"content": "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# --------------------------------------------------------------------------\n\n# -*- coding: utf-8 -*-\n\nfrom distutils.core import setup\nfrom setuptools import find_packages\nimport os\nthis = os.path.dirname(__file__)\n\nwith open(os.path.join(this, \"requirements.txt\"), \"r\") as f:\n requirements = [_ for _ in [_.strip(\"\\r\\n \")\n for _ in f.readlines()] if _ is not None]\n\npackages = find_packages()\nassert packages\n\n# read version from the package file.\nversion_str = '1.0.0'\nwith (open(os.path.join(this, 'onnxscript/__init__.py'), \"r\")) as f:\n line = [_ for _ in [_.strip(\"\\r\\n \")\n for _ in f.readlines()] if _.startswith(\"__version__\")]\n if len(line) > 0:\n version_str = line[0].split('=')[1].strip('\" ')\n\nREADME = os.path.join(os.getcwd(), \"README.md\")\nwith open(README) as f:\n long_description = f.read()\n start_pos = long_description.find('## Contributing')\n if start_pos >= 0:\n long_description = long_description[:start_pos]\n\nsetup(\n name='onnx-script',\n version=version_str,\n description=\"Authoring ONNX functions in Python\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n license='Apache License v2.0',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/onnx/onnx-script',\n packages=packages,\n include_package_data=True,\n install_requires=requirements,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'License :: OSI Approved :: Apache Software License'],\n)\n", "path": "setup.py"}, {"content": "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# --------------------------------------------------------------------------\n\n__version__ = '0.1'\n\nfrom .main import script, export_onnx_lib, OnnxFunction\nfrom .backend.onnx_export import export2python as proto2python\n\n__all__ = [script, export_onnx_lib, OnnxFunction, proto2python]\n", "path": "onnxscript/__init__.py"}], "after_files": [{"content": "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# --------------------------------------------------------------------------\n\n# -*- coding: utf-8 -*-\n\nimport os\nimport setuptools\n\nthis = os.path.dirname(__file__)\n\npackages = setuptools.find_packages()\nassert packages\n\nREADME = os.path.join(os.getcwd(), \"README.md\")\nwith open(README, encoding=\"utf-8\") as f:\n long_description = f.read()\n start_pos = long_description.find('## Contributing')\n if start_pos >= 0:\n long_description = long_description[:start_pos]\n\nsetuptools.setup(\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/onnx/onnx-script',\n packages=packages,\n include_package_data=True,\n package_data={\"onnx-script\": [\"py.typed\"], \"onnx\": [\"py.typed\"],},\n)\n", "path": "setup.py"}, {"content": "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# --------------------------------------------------------------------------\n\nimport sys\nfrom .main import script, export_onnx_lib, OnnxFunction\nfrom .backend.onnx_export import export2python as proto2python\n\nif sys.version_info[0:2] >= (3, 8):\n import importlib.metadata as importlib_metadata\nelse:\n # TODO: Remove this when Python 3.7 is deprecated\n import importlib_metadata\n\ntry:\n # TODO: should we algin the folder name with package name?\n # It's onnxscript and onnx-script now. That way, we can use __package__ here.\n __version__ = importlib_metadata.version(\"onnx-script\")\nexcept importlib_metadata.PackageNotFoundError:\n __version__ = None\n\n\n__all__ = [\"script\", \"export_onnx_lib\", \"OnnxFunction\", \"proto2python\"]\n", "path": "onnxscript/__init__.py"}]} | 1,011 | 844 |
gh_patches_debug_1350 | rasdani/github-patches | git_diff | fossasia__open-event-server-7659 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Preset roles deletion is allowed
**Describe the bug**
Currently the preset roles like "organizer, coorganizer etc" should not be deleted from the db. But right now it is possible to delete these entries.
**To Reproduce**
Steps to reproduce the behavior:
1. Hit the delete endpoint for role
2. Choose any of the ids pointing to any of the 7 preset roles
3. You can find deletion to be successful
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
**Stacktrace**
<!-- If applicable, add stacktrace to help explain your problem. -->
**Additional details (please complete the following information):**
- OS: [e.g. MacOS, Ubuntu, CentOS]
- Python Version [e.g. `3.5`, `3.6`]
- `HEAD` Commit hash [e.g. `4629c62`]
**Additional context**
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/roles.py`
Content:
```
1 from flask_rest_jsonapi import ResourceDetail, ResourceList
2
3 from app.api.bootstrap import api
4 from app.api.helpers.db import safe_query_kwargs
5 from app.api.helpers.errors import UnprocessableEntityError
6 from app.api.schema.roles import RoleSchema
7 from app.models import db
8 from app.models.role import Role
9 from app.models.role_invite import RoleInvite
10 from app.models.users_events_role import UsersEventsRoles
11
12
13 class RoleList(ResourceList):
14 """
15 List and create role
16 """
17
18 decorators = (api.has_permission('is_admin', methods="POST"),)
19 schema = RoleSchema
20 data_layer = {'session': db.session, 'model': Role}
21
22
23 class RoleDetail(ResourceDetail):
24 """
25 Role detail by id
26 """
27
28 def before_get_object(self, view_kwargs):
29 """
30 before get method to get the resource id for fetching details
31 :param view_kwargs:
32 :return:
33 """
34 if view_kwargs.get('role_invite_id') is not None:
35 role_invite = safe_query_kwargs(RoleInvite, view_kwargs, 'role_invite_id')
36 if role_invite.role_id is not None:
37 view_kwargs['id'] = role_invite.role_id
38 else:
39 view_kwargs['id'] = None
40
41 if view_kwargs.get('users_events_roles_id') is not None:
42 users_events_role = safe_query_kwargs(
43 UsersEventsRoles,
44 view_kwargs,
45 'users_events_roles_id',
46 )
47
48 if users_events_role.role_id is not None:
49 view_kwargs['id'] = users_events_role.role_id
50 else:
51 view_kwargs['id'] = None
52
53 def before_update_object(self, role, data, view_kwargs):
54 """
55 Method to edit object
56 :param role:
57 :param data:
58 :param view_kwargs:
59 :return:
60 """
61 if data.get('name'):
62 if data['name'] in [
63 'owner',
64 'organizer',
65 'coorganizer',
66 'registrar',
67 'moderator',
68 'attendee',
69 'track_organizer',
70 ]:
71 raise UnprocessableEntityError(
72 {'data': 'name'}, "The given name cannot be updated"
73 )
74
75 def before_delete_object(self, obj, kwargs):
76 """
77 method to check proper resource name before deleting
78 :param obj:
79 :param kwargs:
80 :return:
81 """
82 if obj.name in [
83 'owner',
84 'organizer',
85 'coorganizer',
86 'registrar',
87 'moderator',
88 'attendee',
89 'track_organizer',
90 ]:
91 raise UnprocessableEntityError(
92 {'data': 'name'}, "The resource with given name cannot be deleted"
93 )
94
95 decorators = (api.has_permission('is_admin', methods="PATCH,DELETE"),)
96 schema = RoleSchema
97 data_layer = {
98 'session': db.session,
99 'model': Role,
100 'methods': {'before_get_object': before_get_object},
101 }
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/roles.py b/app/api/roles.py
--- a/app/api/roles.py
+++ b/app/api/roles.py
@@ -97,5 +97,8 @@
data_layer = {
'session': db.session,
'model': Role,
- 'methods': {'before_get_object': before_get_object},
+ 'methods': {
+ 'before_get_object': before_get_object,
+ 'before_delete_object': before_delete_object,
+ },
}
| {"golden_diff": "diff --git a/app/api/roles.py b/app/api/roles.py\n--- a/app/api/roles.py\n+++ b/app/api/roles.py\n@@ -97,5 +97,8 @@\n data_layer = {\n 'session': db.session,\n 'model': Role,\n- 'methods': {'before_get_object': before_get_object},\n+ 'methods': {\n+ 'before_get_object': before_get_object,\n+ 'before_delete_object': before_delete_object,\n+ },\n }\n", "issue": "Preset roles deletion is allowed\n**Describe the bug**\r\nCurrently the preset roles like \"organizer, coorganizer etc\" should not be deleted from the db. But right now it is possible to delete these entries.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Hit the delete endpoint for role \r\n2. Choose any of the ids pointing to any of the 7 preset roles\r\n3. You can find deletion to be successful\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Stacktrace**\r\n<!-- If applicable, add stacktrace to help explain your problem. -->\r\n\r\n**Additional details (please complete the following information):**\r\n - OS: [e.g. MacOS, Ubuntu, CentOS]\r\n - Python Version [e.g. `3.5`, `3.6`]\r\n - `HEAD` Commit hash [e.g. `4629c62`]\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query_kwargs\nfrom app.api.helpers.errors import UnprocessableEntityError\nfrom app.api.schema.roles import RoleSchema\nfrom app.models import db\nfrom app.models.role import Role\nfrom app.models.role_invite import RoleInvite\nfrom app.models.users_events_role import UsersEventsRoles\n\n\nclass RoleList(ResourceList):\n \"\"\"\n List and create role\n \"\"\"\n\n decorators = (api.has_permission('is_admin', methods=\"POST\"),)\n schema = RoleSchema\n data_layer = {'session': db.session, 'model': Role}\n\n\nclass RoleDetail(ResourceDetail):\n \"\"\"\n Role detail by id\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('role_invite_id') is not None:\n role_invite = safe_query_kwargs(RoleInvite, view_kwargs, 'role_invite_id')\n if role_invite.role_id is not None:\n view_kwargs['id'] = role_invite.role_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_roles_id') is not None:\n users_events_role = safe_query_kwargs(\n UsersEventsRoles,\n view_kwargs,\n 'users_events_roles_id',\n )\n\n if users_events_role.role_id is not None:\n view_kwargs['id'] = users_events_role.role_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, role, data, view_kwargs):\n \"\"\"\n Method to edit object\n :param role:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if data.get('name'):\n if data['name'] in [\n 'owner',\n 'organizer',\n 'coorganizer',\n 'registrar',\n 'moderator',\n 'attendee',\n 'track_organizer',\n ]:\n raise UnprocessableEntityError(\n {'data': 'name'}, \"The given name cannot be updated\"\n )\n\n def before_delete_object(self, obj, kwargs):\n \"\"\"\n method to check proper resource name before deleting\n :param obj:\n :param kwargs:\n :return:\n \"\"\"\n if obj.name in [\n 'owner',\n 'organizer',\n 'coorganizer',\n 'registrar',\n 'moderator',\n 'attendee',\n 'track_organizer',\n ]:\n raise UnprocessableEntityError(\n {'data': 'name'}, \"The resource with given name cannot be deleted\"\n )\n\n decorators = (api.has_permission('is_admin', methods=\"PATCH,DELETE\"),)\n schema = RoleSchema\n data_layer = {\n 'session': db.session,\n 'model': Role,\n 'methods': {'before_get_object': before_get_object},\n }\n", "path": "app/api/roles.py"}], "after_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query_kwargs\nfrom app.api.helpers.errors import UnprocessableEntityError\nfrom app.api.schema.roles import RoleSchema\nfrom app.models import db\nfrom app.models.role import Role\nfrom app.models.role_invite import RoleInvite\nfrom app.models.users_events_role import UsersEventsRoles\n\n\nclass RoleList(ResourceList):\n \"\"\"\n List and create role\n \"\"\"\n\n decorators = (api.has_permission('is_admin', methods=\"POST\"),)\n schema = RoleSchema\n data_layer = {'session': db.session, 'model': Role}\n\n\nclass RoleDetail(ResourceDetail):\n \"\"\"\n Role detail by id\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('role_invite_id') is not None:\n role_invite = safe_query_kwargs(RoleInvite, view_kwargs, 'role_invite_id')\n if role_invite.role_id is not None:\n view_kwargs['id'] = role_invite.role_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_roles_id') is not None:\n users_events_role = safe_query_kwargs(\n UsersEventsRoles,\n view_kwargs,\n 'users_events_roles_id',\n )\n\n if users_events_role.role_id is not None:\n view_kwargs['id'] = users_events_role.role_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, role, data, view_kwargs):\n \"\"\"\n Method to edit object\n :param role:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if data.get('name'):\n if data['name'] in [\n 'owner',\n 'organizer',\n 'coorganizer',\n 'registrar',\n 'moderator',\n 'attendee',\n 'track_organizer',\n ]:\n raise UnprocessableEntityError(\n {'data': 'name'}, \"The given name cannot be updated\"\n )\n\n def before_delete_object(self, obj, kwargs):\n \"\"\"\n method to check proper resource name before deleting\n :param obj:\n :param kwargs:\n :return:\n \"\"\"\n if obj.name in [\n 'owner',\n 'organizer',\n 'coorganizer',\n 'registrar',\n 'moderator',\n 'attendee',\n 'track_organizer',\n ]:\n raise UnprocessableEntityError(\n {'data': 'name'}, \"The resource with given name cannot be deleted\"\n )\n\n decorators = (api.has_permission('is_admin', methods=\"PATCH,DELETE\"),)\n schema = RoleSchema\n data_layer = {\n 'session': db.session,\n 'model': Role,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_delete_object': before_delete_object,\n },\n }\n", "path": "app/api/roles.py"}]} | 1,299 | 109 |
gh_patches_debug_7221 | rasdani/github-patches | git_diff | StackStorm__st2-3038 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cache filters API response for web UI
As the number of actions, rules, etc has grown in our StackStorm cluster, the UI is becoming slower. In particular, the filters view (which we rely on heavily with this many executions) is very slow to become visible and usable. It might help to cache this `/api/v1/executions/views/filters` API response and asynchronously reload this value outside of the request thread, since this is slowly-changing data.
Our typical workflow is to
1. load the main execution page
2. wait for the filters to appear
3. apply some set of filters
4. wait for the next page to load
Here's a waterfall showing a 20s load time for the filters response. This is pretty common for us now.

For reference, we have 572 rules, 1200 actions, 143 triggers, 19 trigger types, and 600k+ executions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2common/st2common/models/db/execution.py`
Content:
```
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import copy
17
18 import mongoengine as me
19
20 from st2common import log as logging
21 from st2common.models.db import stormbase
22 from st2common.fields import ComplexDateTimeField
23 from st2common.util import date as date_utils
24 from st2common.util.secrets import get_secret_parameters
25 from st2common.util.secrets import mask_secret_parameters
26 from st2common.constants.types import ResourceType
27
28 __all__ = [
29 'ActionExecutionDB'
30 ]
31
32
33 LOG = logging.getLogger(__name__)
34
35
36 class ActionExecutionDB(stormbase.StormFoundationDB):
37 RESOURCE_TYPE = ResourceType.EXECUTION
38 UID_FIELDS = ['id']
39
40 trigger = stormbase.EscapedDictField()
41 trigger_type = stormbase.EscapedDictField()
42 trigger_instance = stormbase.EscapedDictField()
43 rule = stormbase.EscapedDictField()
44 action = stormbase.EscapedDictField(required=True)
45 runner = stormbase.EscapedDictField(required=True)
46 # Only the diff between the liveaction type and what is replicated
47 # in the ActionExecutionDB object.
48 liveaction = stormbase.EscapedDictField(required=True)
49 status = me.StringField(
50 required=True,
51 help_text='The current status of the liveaction.')
52 start_timestamp = ComplexDateTimeField(
53 default=date_utils.get_datetime_utc_now,
54 help_text='The timestamp when the liveaction was created.')
55 end_timestamp = ComplexDateTimeField(
56 help_text='The timestamp when the liveaction has finished.')
57 parameters = stormbase.EscapedDynamicField(
58 default={},
59 help_text='The key-value pairs passed as to the action runner & action.')
60 result = stormbase.EscapedDynamicField(
61 default={},
62 help_text='Action defined result.')
63 context = me.DictField(
64 default={},
65 help_text='Contextual information on the action execution.')
66 parent = me.StringField()
67 children = me.ListField(field=me.StringField())
68 log = me.ListField(field=me.DictField())
69 # Do not use URLField for web_url. If host doesn't have FQDN set, URLField validation blows.
70 web_url = me.StringField(required=False)
71
72 meta = {
73 'indexes': [
74 {'fields': ['rule.ref']},
75 {'fields': ['action.ref']},
76 {'fields': ['liveaction.id']},
77 {'fields': ['start_timestamp']},
78 {'fields': ['end_timestamp']},
79 {'fields': ['status']},
80 {'fields': ['parent']},
81 {'fields': ['-start_timestamp', 'action.ref', 'status']}
82 ]
83 }
84
85 def get_uid(self):
86 # TODO Construct od from non id field:
87 uid = [self.RESOURCE_TYPE, str(self.id)]
88 return ':'.join(uid)
89
90 def mask_secrets(self, value):
91 result = copy.deepcopy(value)
92
93 execution_parameters = value['parameters']
94 parameters = {}
95 # pylint: disable=no-member
96 parameters.update(value.get('action', {}).get('parameters', {}))
97 parameters.update(value.get('runner', {}).get('runner_parameters', {}))
98
99 secret_parameters = get_secret_parameters(parameters=parameters)
100 result['parameters'] = mask_secret_parameters(parameters=execution_parameters,
101 secret_parameters=secret_parameters)
102 return result
103
104 def get_masked_parameters(self):
105 """
106 Retrieve parameters with the secrets masked.
107
108 :rtype: ``dict``
109 """
110 serializable_dict = self.to_serializable_dict(mask_secrets=True)
111 return serializable_dict['parameters']
112
113
114 MODELS = [ActionExecutionDB]
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/st2common/st2common/models/db/execution.py b/st2common/st2common/models/db/execution.py
--- a/st2common/st2common/models/db/execution.py
+++ b/st2common/st2common/models/db/execution.py
@@ -78,6 +78,11 @@
{'fields': ['end_timestamp']},
{'fields': ['status']},
{'fields': ['parent']},
+ {'fields': ['rule.name']},
+ {'fields': ['runner.name']},
+ {'fields': ['trigger.name']},
+ {'fields': ['trigger_type.name']},
+ {'fields': ['context.user']},
{'fields': ['-start_timestamp', 'action.ref', 'status']}
]
}
| {"golden_diff": "diff --git a/st2common/st2common/models/db/execution.py b/st2common/st2common/models/db/execution.py\n--- a/st2common/st2common/models/db/execution.py\n+++ b/st2common/st2common/models/db/execution.py\n@@ -78,6 +78,11 @@\n {'fields': ['end_timestamp']},\n {'fields': ['status']},\n {'fields': ['parent']},\n+ {'fields': ['rule.name']},\n+ {'fields': ['runner.name']},\n+ {'fields': ['trigger.name']},\n+ {'fields': ['trigger_type.name']},\n+ {'fields': ['context.user']},\n {'fields': ['-start_timestamp', 'action.ref', 'status']}\n ]\n }\n", "issue": "Cache filters API response for web UI\nAs the number of actions, rules, etc has grown in our StackStorm cluster, the UI is becoming slower. In particular, the filters view (which we rely on heavily with this many executions) is very slow to become visible and usable. It might help to cache this `/api/v1/executions/views/filters` API response and asynchronously reload this value outside of the request thread, since this is slowly-changing data.\n\nOur typical workflow is to\n1. load the main execution page\n2. wait for the filters to appear\n3. apply some set of filters\n4. wait for the next page to load\n\nHere's a waterfall showing a 20s load time for the filters response. This is pretty common for us now.\n\n\nFor reference, we have 572 rules, 1200 actions, 143 triggers, 19 trigger types, and 600k+ executions.\n\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\n\nimport mongoengine as me\n\nfrom st2common import log as logging\nfrom st2common.models.db import stormbase\nfrom st2common.fields import ComplexDateTimeField\nfrom st2common.util import date as date_utils\nfrom st2common.util.secrets import get_secret_parameters\nfrom st2common.util.secrets import mask_secret_parameters\nfrom st2common.constants.types import ResourceType\n\n__all__ = [\n 'ActionExecutionDB'\n]\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass ActionExecutionDB(stormbase.StormFoundationDB):\n RESOURCE_TYPE = ResourceType.EXECUTION\n UID_FIELDS = ['id']\n\n trigger = stormbase.EscapedDictField()\n trigger_type = stormbase.EscapedDictField()\n trigger_instance = stormbase.EscapedDictField()\n rule = stormbase.EscapedDictField()\n action = stormbase.EscapedDictField(required=True)\n runner = stormbase.EscapedDictField(required=True)\n # Only the diff between the liveaction type and what is replicated\n # in the ActionExecutionDB object.\n liveaction = stormbase.EscapedDictField(required=True)\n status = me.StringField(\n required=True,\n help_text='The current status of the liveaction.')\n start_timestamp = ComplexDateTimeField(\n default=date_utils.get_datetime_utc_now,\n help_text='The timestamp when the liveaction was created.')\n end_timestamp = ComplexDateTimeField(\n help_text='The timestamp when the liveaction has finished.')\n parameters = stormbase.EscapedDynamicField(\n default={},\n help_text='The key-value pairs passed as to the action runner & action.')\n result = stormbase.EscapedDynamicField(\n default={},\n help_text='Action defined result.')\n context = me.DictField(\n default={},\n help_text='Contextual information on the action execution.')\n parent = me.StringField()\n children = me.ListField(field=me.StringField())\n log = me.ListField(field=me.DictField())\n # Do not use URLField for web_url. If host doesn't have FQDN set, URLField validation blows.\n web_url = me.StringField(required=False)\n\n meta = {\n 'indexes': [\n {'fields': ['rule.ref']},\n {'fields': ['action.ref']},\n {'fields': ['liveaction.id']},\n {'fields': ['start_timestamp']},\n {'fields': ['end_timestamp']},\n {'fields': ['status']},\n {'fields': ['parent']},\n {'fields': ['-start_timestamp', 'action.ref', 'status']}\n ]\n }\n\n def get_uid(self):\n # TODO Construct od from non id field:\n uid = [self.RESOURCE_TYPE, str(self.id)]\n return ':'.join(uid)\n\n def mask_secrets(self, value):\n result = copy.deepcopy(value)\n\n execution_parameters = value['parameters']\n parameters = {}\n # pylint: disable=no-member\n parameters.update(value.get('action', {}).get('parameters', {}))\n parameters.update(value.get('runner', {}).get('runner_parameters', {}))\n\n secret_parameters = get_secret_parameters(parameters=parameters)\n result['parameters'] = mask_secret_parameters(parameters=execution_parameters,\n secret_parameters=secret_parameters)\n return result\n\n def get_masked_parameters(self):\n \"\"\"\n Retrieve parameters with the secrets masked.\n\n :rtype: ``dict``\n \"\"\"\n serializable_dict = self.to_serializable_dict(mask_secrets=True)\n return serializable_dict['parameters']\n\n\nMODELS = [ActionExecutionDB]\n", "path": "st2common/st2common/models/db/execution.py"}], "after_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\n\nimport mongoengine as me\n\nfrom st2common import log as logging\nfrom st2common.models.db import stormbase\nfrom st2common.fields import ComplexDateTimeField\nfrom st2common.util import date as date_utils\nfrom st2common.util.secrets import get_secret_parameters\nfrom st2common.util.secrets import mask_secret_parameters\nfrom st2common.constants.types import ResourceType\n\n__all__ = [\n 'ActionExecutionDB'\n]\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass ActionExecutionDB(stormbase.StormFoundationDB):\n RESOURCE_TYPE = ResourceType.EXECUTION\n UID_FIELDS = ['id']\n\n trigger = stormbase.EscapedDictField()\n trigger_type = stormbase.EscapedDictField()\n trigger_instance = stormbase.EscapedDictField()\n rule = stormbase.EscapedDictField()\n action = stormbase.EscapedDictField(required=True)\n runner = stormbase.EscapedDictField(required=True)\n # Only the diff between the liveaction type and what is replicated\n # in the ActionExecutionDB object.\n liveaction = stormbase.EscapedDictField(required=True)\n status = me.StringField(\n required=True,\n help_text='The current status of the liveaction.')\n start_timestamp = ComplexDateTimeField(\n default=date_utils.get_datetime_utc_now,\n help_text='The timestamp when the liveaction was created.')\n end_timestamp = ComplexDateTimeField(\n help_text='The timestamp when the liveaction has finished.')\n parameters = stormbase.EscapedDynamicField(\n default={},\n help_text='The key-value pairs passed as to the action runner & action.')\n result = stormbase.EscapedDynamicField(\n default={},\n help_text='Action defined result.')\n context = me.DictField(\n default={},\n help_text='Contextual information on the action execution.')\n parent = me.StringField()\n children = me.ListField(field=me.StringField())\n log = me.ListField(field=me.DictField())\n # Do not use URLField for web_url. If host doesn't have FQDN set, URLField validation blows.\n web_url = me.StringField(required=False)\n\n meta = {\n 'indexes': [\n {'fields': ['rule.ref']},\n {'fields': ['action.ref']},\n {'fields': ['liveaction.id']},\n {'fields': ['start_timestamp']},\n {'fields': ['end_timestamp']},\n {'fields': ['status']},\n {'fields': ['parent']},\n {'fields': ['rule.name']},\n {'fields': ['runner.name']},\n {'fields': ['trigger.name']},\n {'fields': ['trigger_type.name']},\n {'fields': ['context.user']},\n {'fields': ['-start_timestamp', 'action.ref', 'status']}\n ]\n }\n\n def get_uid(self):\n # TODO Construct od from non id field:\n uid = [self.RESOURCE_TYPE, str(self.id)]\n return ':'.join(uid)\n\n def mask_secrets(self, value):\n result = copy.deepcopy(value)\n\n execution_parameters = value['parameters']\n parameters = {}\n # pylint: disable=no-member\n parameters.update(value.get('action', {}).get('parameters', {}))\n parameters.update(value.get('runner', {}).get('runner_parameters', {}))\n\n secret_parameters = get_secret_parameters(parameters=parameters)\n result['parameters'] = mask_secret_parameters(parameters=execution_parameters,\n secret_parameters=secret_parameters)\n return result\n\n def get_masked_parameters(self):\n \"\"\"\n Retrieve parameters with the secrets masked.\n\n :rtype: ``dict``\n \"\"\"\n serializable_dict = self.to_serializable_dict(mask_secrets=True)\n return serializable_dict['parameters']\n\n\nMODELS = [ActionExecutionDB]\n", "path": "st2common/st2common/models/db/execution.py"}]} | 1,707 | 156 |
gh_patches_debug_946 | rasdani/github-patches | git_diff | xonsh__xonsh-2332 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
xoreutils: echo fails with KeyError: 'help'
Any `echo` invocation fails:
```shell
$ $XONSH_SHOW_TRACEBACK = True
$ echo
xonsh: To log full traceback to a file set: $XONSH_TRACEBACK_LOGFILE = <filename>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 13061, in run
r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec)
File "/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 12896, in proxy_four
return f(args, stdin, stdout, stderr)
File "/usr/local/lib/python3.6/site-packages/xonsh/xoreutils/echo.py", line 9, in echo
if opts['help']:
KeyError: 'help'
$ echo foo
xonsh: To log full traceback to a file set: $XONSH_TRACEBACK_LOGFILE = <filename>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 13061, in run
r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec)
File "/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 12896, in proxy_four
return f(args, stdin, stdout, stderr)
File "/usr/local/lib/python3.6/site-packages/xonsh/xoreutils/echo.py", line 9, in echo
if opts['help']:
KeyError: 'help'
$ echo "foo"
xonsh: To log full traceback to a file set: $XONSH_TRACEBACK_LOGFILE = <filename>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 13061, in run
r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec)
File "/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 12896, in proxy_four
return f(args, stdin, stdout, stderr)
File "/usr/local/lib/python3.6/site-packages/xonsh/xoreutils/echo.py", line 9, in echo
if opts['help']:
KeyError: 'help'
```
Obviously, the problem is that `help` is looked up but missing: http://xon.sh/_modules/xonsh/xoreutils/echo.html#echo
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/xoreutils/echo.py`
Content:
```
1 """Implements a simple echo command for xonsh."""
2
3
4 def echo(args, stdin, stdout, stderr):
5 """A simple echo command."""
6 opts = _echo_parse_args(args)
7 if opts is None:
8 return
9 if opts['help']:
10 print(ECHO_HELP, file=stdout)
11 return 0
12 ender = opts['end']
13 args = map(str, args)
14 if opts['escapes']:
15 args = map(lambda x: x.encode().decode('unicode_escape'), args)
16 print(*args, end=ender, file=stdout)
17
18
19 def _echo_parse_args(args):
20 out = {'escapes': False, 'end': '\n'}
21 if '-e' in args:
22 args.remove('-e')
23 out['escapes'] = True
24 if '-E' in args:
25 args.remove('-E')
26 out['escapes'] = False
27 if '-n' in args:
28 args.remove('-n')
29 out['end'] = ''
30 if '-h' in args or '--help' in args:
31 out['help'] = True
32 return out
33
34
35 ECHO_HELP = """Usage: echo [OPTIONS]... [STRING]...
36 Echo the STRING(s) to standard output.
37
38 -n do not include the trailing newline
39 -e enable interpretation of backslash escapes
40 -E disable interpretation of backslash escapes (default)
41 -h --help display this message and exit
42
43 This version of echo was written in Python for the xonsh project: http://xon.sh
44 Based on echo from GNU coreutils: http://www.gnu.org/software/coreutils/"""
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/xoreutils/echo.py b/xonsh/xoreutils/echo.py
--- a/xonsh/xoreutils/echo.py
+++ b/xonsh/xoreutils/echo.py
@@ -17,7 +17,7 @@
def _echo_parse_args(args):
- out = {'escapes': False, 'end': '\n'}
+ out = {'escapes': False, 'end': '\n', 'help': False}
if '-e' in args:
args.remove('-e')
out['escapes'] = True
| {"golden_diff": "diff --git a/xonsh/xoreutils/echo.py b/xonsh/xoreutils/echo.py\n--- a/xonsh/xoreutils/echo.py\n+++ b/xonsh/xoreutils/echo.py\n@@ -17,7 +17,7 @@\n \n \n def _echo_parse_args(args):\n- out = {'escapes': False, 'end': '\\n'}\n+ out = {'escapes': False, 'end': '\\n', 'help': False}\n if '-e' in args:\n args.remove('-e')\n out['escapes'] = True\n", "issue": "xoreutils: echo fails with KeyError: 'help'\nAny `echo` invocation fails:\r\n\r\n```shell\r\n$ $XONSH_SHOW_TRACEBACK = True\r\n$ echo\r\nxonsh: To log full traceback to a file set: $XONSH_TRACEBACK_LOGFILE = <filename>\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py\", line 13061, in run\r\n r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec)\r\n File \"/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py\", line 12896, in proxy_four\r\n return f(args, stdin, stdout, stderr)\r\n File \"/usr/local/lib/python3.6/site-packages/xonsh/xoreutils/echo.py\", line 9, in echo\r\n if opts['help']:\r\nKeyError: 'help'\r\n$ echo foo\r\nxonsh: To log full traceback to a file set: $XONSH_TRACEBACK_LOGFILE = <filename>\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py\", line 13061, in run\r\n r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec)\r\n File \"/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py\", line 12896, in proxy_four\r\n return f(args, stdin, stdout, stderr)\r\n File \"/usr/local/lib/python3.6/site-packages/xonsh/xoreutils/echo.py\", line 9, in echo\r\n if opts['help']:\r\nKeyError: 'help'\r\n$ echo \"foo\"\r\nxonsh: To log full traceback to a file set: $XONSH_TRACEBACK_LOGFILE = <filename>\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py\", line 13061, in run\r\n r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec)\r\n File \"/usr/local/lib/python3.6/site-packages/xonsh/__amalgam__.py\", line 12896, in proxy_four\r\n return f(args, stdin, stdout, stderr)\r\n File \"/usr/local/lib/python3.6/site-packages/xonsh/xoreutils/echo.py\", line 9, in echo\r\n if opts['help']:\r\nKeyError: 'help'\r\n```\r\n\r\nObviously, the problem is that `help` is looked up but missing: http://xon.sh/_modules/xonsh/xoreutils/echo.html#echo\n", "before_files": [{"content": "\"\"\"Implements a simple echo command for xonsh.\"\"\"\n\n\ndef echo(args, stdin, stdout, stderr):\n \"\"\"A simple echo command.\"\"\"\n opts = _echo_parse_args(args)\n if opts is None:\n return\n if opts['help']:\n print(ECHO_HELP, file=stdout)\n return 0\n ender = opts['end']\n args = map(str, args)\n if opts['escapes']:\n args = map(lambda x: x.encode().decode('unicode_escape'), args)\n print(*args, end=ender, file=stdout)\n\n\ndef _echo_parse_args(args):\n out = {'escapes': False, 'end': '\\n'}\n if '-e' in args:\n args.remove('-e')\n out['escapes'] = True\n if '-E' in args:\n args.remove('-E')\n out['escapes'] = False\n if '-n' in args:\n args.remove('-n')\n out['end'] = ''\n if '-h' in args or '--help' in args:\n out['help'] = True\n return out\n\n\nECHO_HELP = \"\"\"Usage: echo [OPTIONS]... [STRING]...\nEcho the STRING(s) to standard output.\n\n -n do not include the trailing newline\n -e enable interpretation of backslash escapes\n -E disable interpretation of backslash escapes (default)\n -h --help display this message and exit\n\nThis version of echo was written in Python for the xonsh project: http://xon.sh\nBased on echo from GNU coreutils: http://www.gnu.org/software/coreutils/\"\"\"\n", "path": "xonsh/xoreutils/echo.py"}], "after_files": [{"content": "\"\"\"Implements a simple echo command for xonsh.\"\"\"\n\n\ndef echo(args, stdin, stdout, stderr):\n \"\"\"A simple echo command.\"\"\"\n opts = _echo_parse_args(args)\n if opts is None:\n return\n if opts['help']:\n print(ECHO_HELP, file=stdout)\n return 0\n ender = opts['end']\n args = map(str, args)\n if opts['escapes']:\n args = map(lambda x: x.encode().decode('unicode_escape'), args)\n print(*args, end=ender, file=stdout)\n\n\ndef _echo_parse_args(args):\n out = {'escapes': False, 'end': '\\n', 'help': False}\n if '-e' in args:\n args.remove('-e')\n out['escapes'] = True\n if '-E' in args:\n args.remove('-E')\n out['escapes'] = False\n if '-n' in args:\n args.remove('-n')\n out['end'] = ''\n if '-h' in args or '--help' in args:\n out['help'] = True\n return out\n\n\nECHO_HELP = \"\"\"Usage: echo [OPTIONS]... [STRING]...\nEcho the STRING(s) to standard output.\n\n -n do not include the trailing newline\n -e enable interpretation of backslash escapes\n -E disable interpretation of backslash escapes (default)\n -h --help display this message and exit\n\nThis version of echo was written in Python for the xonsh project: http://xon.sh\nBased on echo from GNU coreutils: http://www.gnu.org/software/coreutils/\"\"\"\n", "path": "xonsh/xoreutils/echo.py"}]} | 1,293 | 128 |
gh_patches_debug_19919 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1923 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add checkout details to context
* Cookiecutter version: 2.1.1
* Template project url: N/A
* Python version: 3.9.13
* Operating System: macOS
### Description:
I would like the context output available in `{{cookiecutter | pprint }}` to include the `--checkout` argument passed to the CLI, under a '_checkout' field. This will allow me to perform different actions in my post generation hooks if a non-default branch, tag, or commit was passed.
### What I've run:
```
from collections import OrderedDict
cookiecutter_context = {{cookiecutter | pprint }}
print(cookiecutter_context)
```
Related to https://github.com/cookiecutter/cookiecutter/issues/1422
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/main.py`
Content:
```
1 """
2 Main entry point for the `cookiecutter` command.
3
4 The code in this module is also a good example of how to use Cookiecutter as a
5 library rather than a script.
6 """
7 import logging
8 import os
9 import re
10 import sys
11 from copy import copy
12
13 from cookiecutter.config import get_user_config
14 from cookiecutter.exceptions import InvalidModeException
15 from cookiecutter.generate import generate_context, generate_files
16 from cookiecutter.prompt import prompt_for_config
17 from cookiecutter.replay import dump, load
18 from cookiecutter.repository import determine_repo_dir
19 from cookiecutter.utils import rmtree
20
21 logger = logging.getLogger(__name__)
22
23
24 def cookiecutter(
25 template,
26 checkout=None,
27 no_input=False,
28 extra_context=None,
29 replay=None,
30 overwrite_if_exists=False,
31 output_dir='.',
32 config_file=None,
33 default_config=False,
34 password=None,
35 directory=None,
36 skip_if_file_exists=False,
37 accept_hooks=True,
38 keep_project_on_failure=False,
39 ):
40 """
41 Run Cookiecutter just as if using it from the command line.
42
43 :param template: A directory containing a project template directory,
44 or a URL to a git repository.
45 :param checkout: The branch, tag or commit ID to checkout after clone.
46 :param no_input: Do not prompt for user input.
47 Use default values for template parameters taken from `cookiecutter.json`, user
48 config and `extra_dict`. Force a refresh of cached resources.
49 :param extra_context: A dictionary of context that overrides default
50 and user configuration.
51 :param replay: Do not prompt for input, instead read from saved json. If
52 ``True`` read from the ``replay_dir``.
53 if it exists
54 :param output_dir: Where to output the generated project dir into.
55 :param config_file: User configuration file path.
56 :param default_config: Use default values rather than a config file.
57 :param password: The password to use when extracting the repository.
58 :param directory: Relative path to a cookiecutter template in a repository.
59 :param accept_hooks: Accept pre and post hooks if set to `True`.
60 :param keep_project_on_failure: If `True` keep generated project directory even when
61 generation fails
62 """
63 if replay and ((no_input is not False) or (extra_context is not None)):
64 err_msg = (
65 "You can not use both replay and no_input or extra_context "
66 "at the same time."
67 )
68 raise InvalidModeException(err_msg)
69
70 config_dict = get_user_config(
71 config_file=config_file,
72 default_config=default_config,
73 )
74
75 repo_dir, cleanup = determine_repo_dir(
76 template=template,
77 abbreviations=config_dict['abbreviations'],
78 clone_to_dir=config_dict['cookiecutters_dir'],
79 checkout=checkout,
80 no_input=no_input,
81 password=password,
82 directory=directory,
83 )
84 import_patch = _patch_import_path_for_repo(repo_dir)
85
86 template_name = os.path.basename(os.path.abspath(repo_dir))
87
88 if replay:
89 with import_patch:
90 if isinstance(replay, bool):
91 context_from_replayfile = load(config_dict['replay_dir'], template_name)
92 else:
93 path, template_name = os.path.split(os.path.splitext(replay)[0])
94 context_from_replayfile = load(path, template_name)
95
96 context_file = os.path.join(repo_dir, 'cookiecutter.json')
97 logger.debug('context_file is %s', context_file)
98
99 if replay:
100 context = generate_context(
101 context_file=context_file,
102 default_context=config_dict['default_context'],
103 extra_context=None,
104 )
105 logger.debug('replayfile context: %s', context_from_replayfile)
106 items_for_prompting = {
107 k: v
108 for k, v in context['cookiecutter'].items()
109 if k not in context_from_replayfile['cookiecutter'].keys()
110 }
111 context_for_prompting = {}
112 context_for_prompting['cookiecutter'] = items_for_prompting
113 context = context_from_replayfile
114 logger.debug('prompting context: %s', context_for_prompting)
115 else:
116 context = generate_context(
117 context_file=context_file,
118 default_context=config_dict['default_context'],
119 extra_context=extra_context,
120 )
121 context_for_prompting = context
122 # preserve the original cookiecutter options
123 # print(context['cookiecutter'])
124 context['_cookiecutter'] = {
125 k: v for k, v in context['cookiecutter'].items() if not k.startswith("_")
126 }
127
128 # prompt the user to manually configure at the command line.
129 # except when 'no-input' flag is set
130
131 with import_patch:
132 if context_for_prompting['cookiecutter']:
133 context['cookiecutter'].update(
134 prompt_for_config(context_for_prompting, no_input)
135 )
136 if "template" in context["cookiecutter"]:
137 nested_template = re.search(
138 r'\((.*?)\)', context["cookiecutter"]["template"]
139 ).group(1)
140 return cookiecutter(
141 template=os.path.join(template, nested_template),
142 checkout=checkout,
143 no_input=no_input,
144 extra_context=extra_context,
145 replay=replay,
146 overwrite_if_exists=overwrite_if_exists,
147 output_dir=output_dir,
148 config_file=config_file,
149 default_config=default_config,
150 password=password,
151 directory=directory,
152 skip_if_file_exists=skip_if_file_exists,
153 accept_hooks=accept_hooks,
154 keep_project_on_failure=keep_project_on_failure,
155 )
156
157 logger.debug('contex is %s', context)
158
159 # include template dir or url in the context dict
160 context['cookiecutter']['_template'] = template
161
162 # include output+dir in the context dict
163 context['cookiecutter']['_output_dir'] = os.path.abspath(output_dir)
164 # include repo dir or url in the context dict
165 context['cookiecutter']['_repo_dir'] = repo_dir
166
167 dump(config_dict['replay_dir'], template_name, context)
168
169 # Create project from local context and project template.
170 with import_patch:
171 result = generate_files(
172 repo_dir=repo_dir,
173 context=context,
174 overwrite_if_exists=overwrite_if_exists,
175 skip_if_file_exists=skip_if_file_exists,
176 output_dir=output_dir,
177 accept_hooks=accept_hooks,
178 keep_project_on_failure=keep_project_on_failure,
179 )
180
181 # Cleanup (if required)
182 if cleanup:
183 rmtree(repo_dir)
184
185 return result
186
187
188 class _patch_import_path_for_repo:
189 def __init__(self, repo_dir):
190 self._repo_dir = repo_dir
191 self._path = None
192
193 def __enter__(self):
194 self._path = copy(sys.path)
195 sys.path.append(self._repo_dir)
196
197 def __exit__(self, type, value, traceback):
198 sys.path = self._path
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/main.py b/cookiecutter/main.py
--- a/cookiecutter/main.py
+++ b/cookiecutter/main.py
@@ -154,16 +154,20 @@
keep_project_on_failure=keep_project_on_failure,
)
- logger.debug('contex is %s', context)
+ logger.debug('context is %s', context)
# include template dir or url in the context dict
context['cookiecutter']['_template'] = template
# include output+dir in the context dict
context['cookiecutter']['_output_dir'] = os.path.abspath(output_dir)
+
# include repo dir or url in the context dict
context['cookiecutter']['_repo_dir'] = repo_dir
+ # include checkout details in the context dict
+ context['cookiecutter']['_checkout'] = checkout
+
dump(config_dict['replay_dir'], template_name, context)
# Create project from local context and project template.
| {"golden_diff": "diff --git a/cookiecutter/main.py b/cookiecutter/main.py\n--- a/cookiecutter/main.py\n+++ b/cookiecutter/main.py\n@@ -154,16 +154,20 @@\n keep_project_on_failure=keep_project_on_failure,\n )\n \n- logger.debug('contex is %s', context)\n+ logger.debug('context is %s', context)\n \n # include template dir or url in the context dict\n context['cookiecutter']['_template'] = template\n \n # include output+dir in the context dict\n context['cookiecutter']['_output_dir'] = os.path.abspath(output_dir)\n+\n # include repo dir or url in the context dict\n context['cookiecutter']['_repo_dir'] = repo_dir\n \n+ # include checkout details in the context dict\n+ context['cookiecutter']['_checkout'] = checkout\n+\n dump(config_dict['replay_dir'], template_name, context)\n \n # Create project from local context and project template.\n", "issue": "Add checkout details to context\n* Cookiecutter version: 2.1.1\r\n* Template project url: N/A\r\n* Python version: 3.9.13\r\n* Operating System: macOS\r\n\r\n### Description:\r\n\r\nI would like the context output available in `{{cookiecutter | pprint }}` to include the `--checkout` argument passed to the CLI, under a '_checkout' field. This will allow me to perform different actions in my post generation hooks if a non-default branch, tag, or commit was passed.\r\n\r\n### What I've run:\r\n\r\n```\r\nfrom collections import OrderedDict\r\ncookiecutter_context = {{cookiecutter | pprint }}\r\nprint(cookiecutter_context)\r\n```\r\n\r\nRelated to https://github.com/cookiecutter/cookiecutter/issues/1422\n", "before_files": [{"content": "\"\"\"\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\nimport logging\nimport os\nimport re\nimport sys\nfrom copy import copy\n\nfrom cookiecutter.config import get_user_config\nfrom cookiecutter.exceptions import InvalidModeException\nfrom cookiecutter.generate import generate_context, generate_files\nfrom cookiecutter.prompt import prompt_for_config\nfrom cookiecutter.replay import dump, load\nfrom cookiecutter.repository import determine_repo_dir\nfrom cookiecutter.utils import rmtree\n\nlogger = logging.getLogger(__name__)\n\n\ndef cookiecutter(\n template,\n checkout=None,\n no_input=False,\n extra_context=None,\n replay=None,\n overwrite_if_exists=False,\n output_dir='.',\n config_file=None,\n default_config=False,\n password=None,\n directory=None,\n skip_if_file_exists=False,\n accept_hooks=True,\n keep_project_on_failure=False,\n):\n \"\"\"\n Run Cookiecutter just as if using it from the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Do not prompt for user input.\n Use default values for template parameters taken from `cookiecutter.json`, user\n config and `extra_dict`. Force a refresh of cached resources.\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param replay: Do not prompt for input, instead read from saved json. If\n ``True`` read from the ``replay_dir``.\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n :param default_config: Use default values rather than a config file.\n :param password: The password to use when extracting the repository.\n :param directory: Relative path to a cookiecutter template in a repository.\n :param accept_hooks: Accept pre and post hooks if set to `True`.\n :param keep_project_on_failure: If `True` keep generated project directory even when\n generation fails\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n config_dict = get_user_config(\n config_file=config_file,\n default_config=default_config,\n )\n\n repo_dir, cleanup = determine_repo_dir(\n template=template,\n abbreviations=config_dict['abbreviations'],\n clone_to_dir=config_dict['cookiecutters_dir'],\n checkout=checkout,\n no_input=no_input,\n password=password,\n directory=directory,\n )\n import_patch = _patch_import_path_for_repo(repo_dir)\n\n template_name = os.path.basename(os.path.abspath(repo_dir))\n\n if replay:\n with import_patch:\n if isinstance(replay, bool):\n context_from_replayfile = load(config_dict['replay_dir'], template_name)\n else:\n path, template_name = os.path.split(os.path.splitext(replay)[0])\n context_from_replayfile = load(path, template_name)\n\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logger.debug('context_file is %s', context_file)\n\n if replay:\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=None,\n )\n logger.debug('replayfile context: %s', context_from_replayfile)\n items_for_prompting = {\n k: v\n for k, v in context['cookiecutter'].items()\n if k not in context_from_replayfile['cookiecutter'].keys()\n }\n context_for_prompting = {}\n context_for_prompting['cookiecutter'] = items_for_prompting\n context = context_from_replayfile\n logger.debug('prompting context: %s', context_for_prompting)\n else:\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n context_for_prompting = context\n # preserve the original cookiecutter options\n # print(context['cookiecutter'])\n context['_cookiecutter'] = {\n k: v for k, v in context['cookiecutter'].items() if not k.startswith(\"_\")\n }\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n\n with import_patch:\n if context_for_prompting['cookiecutter']:\n context['cookiecutter'].update(\n prompt_for_config(context_for_prompting, no_input)\n )\n if \"template\" in context[\"cookiecutter\"]:\n nested_template = re.search(\n r'\\((.*?)\\)', context[\"cookiecutter\"][\"template\"]\n ).group(1)\n return cookiecutter(\n template=os.path.join(template, nested_template),\n checkout=checkout,\n no_input=no_input,\n extra_context=extra_context,\n replay=replay,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir,\n config_file=config_file,\n default_config=default_config,\n password=password,\n directory=directory,\n skip_if_file_exists=skip_if_file_exists,\n accept_hooks=accept_hooks,\n keep_project_on_failure=keep_project_on_failure,\n )\n\n logger.debug('contex is %s', context)\n\n # include template dir or url in the context dict\n context['cookiecutter']['_template'] = template\n\n # include output+dir in the context dict\n context['cookiecutter']['_output_dir'] = os.path.abspath(output_dir)\n # include repo dir or url in the context dict\n context['cookiecutter']['_repo_dir'] = repo_dir\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n with import_patch:\n result = generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n skip_if_file_exists=skip_if_file_exists,\n output_dir=output_dir,\n accept_hooks=accept_hooks,\n keep_project_on_failure=keep_project_on_failure,\n )\n\n # Cleanup (if required)\n if cleanup:\n rmtree(repo_dir)\n\n return result\n\n\nclass _patch_import_path_for_repo:\n def __init__(self, repo_dir):\n self._repo_dir = repo_dir\n self._path = None\n\n def __enter__(self):\n self._path = copy(sys.path)\n sys.path.append(self._repo_dir)\n\n def __exit__(self, type, value, traceback):\n sys.path = self._path\n", "path": "cookiecutter/main.py"}], "after_files": [{"content": "\"\"\"\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\nimport logging\nimport os\nimport re\nimport sys\nfrom copy import copy\n\nfrom cookiecutter.config import get_user_config\nfrom cookiecutter.exceptions import InvalidModeException\nfrom cookiecutter.generate import generate_context, generate_files\nfrom cookiecutter.prompt import prompt_for_config\nfrom cookiecutter.replay import dump, load\nfrom cookiecutter.repository import determine_repo_dir\nfrom cookiecutter.utils import rmtree\n\nlogger = logging.getLogger(__name__)\n\n\ndef cookiecutter(\n template,\n checkout=None,\n no_input=False,\n extra_context=None,\n replay=None,\n overwrite_if_exists=False,\n output_dir='.',\n config_file=None,\n default_config=False,\n password=None,\n directory=None,\n skip_if_file_exists=False,\n accept_hooks=True,\n keep_project_on_failure=False,\n):\n \"\"\"\n Run Cookiecutter just as if using it from the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Do not prompt for user input.\n Use default values for template parameters taken from `cookiecutter.json`, user\n config and `extra_dict`. Force a refresh of cached resources.\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param replay: Do not prompt for input, instead read from saved json. If\n ``True`` read from the ``replay_dir``.\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n :param default_config: Use default values rather than a config file.\n :param password: The password to use when extracting the repository.\n :param directory: Relative path to a cookiecutter template in a repository.\n :param accept_hooks: Accept pre and post hooks if set to `True`.\n :param keep_project_on_failure: If `True` keep generated project directory even when\n generation fails\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n config_dict = get_user_config(\n config_file=config_file,\n default_config=default_config,\n )\n\n repo_dir, cleanup = determine_repo_dir(\n template=template,\n abbreviations=config_dict['abbreviations'],\n clone_to_dir=config_dict['cookiecutters_dir'],\n checkout=checkout,\n no_input=no_input,\n password=password,\n directory=directory,\n )\n import_patch = _patch_import_path_for_repo(repo_dir)\n\n template_name = os.path.basename(os.path.abspath(repo_dir))\n\n if replay:\n with import_patch:\n if isinstance(replay, bool):\n context_from_replayfile = load(config_dict['replay_dir'], template_name)\n else:\n path, template_name = os.path.split(os.path.splitext(replay)[0])\n context_from_replayfile = load(path, template_name)\n\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logger.debug('context_file is %s', context_file)\n\n if replay:\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=None,\n )\n logger.debug('replayfile context: %s', context_from_replayfile)\n items_for_prompting = {\n k: v\n for k, v in context['cookiecutter'].items()\n if k not in context_from_replayfile['cookiecutter'].keys()\n }\n context_for_prompting = {}\n context_for_prompting['cookiecutter'] = items_for_prompting\n context = context_from_replayfile\n logger.debug('prompting context: %s', context_for_prompting)\n else:\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n context_for_prompting = context\n # preserve the original cookiecutter options\n # print(context['cookiecutter'])\n context['_cookiecutter'] = {\n k: v for k, v in context['cookiecutter'].items() if not k.startswith(\"_\")\n }\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n\n with import_patch:\n if context_for_prompting['cookiecutter']:\n context['cookiecutter'].update(\n prompt_for_config(context_for_prompting, no_input)\n )\n if \"template\" in context[\"cookiecutter\"]:\n nested_template = re.search(\n r'\\((.*?)\\)', context[\"cookiecutter\"][\"template\"]\n ).group(1)\n return cookiecutter(\n template=os.path.join(template, nested_template),\n checkout=checkout,\n no_input=no_input,\n extra_context=extra_context,\n replay=replay,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir,\n config_file=config_file,\n default_config=default_config,\n password=password,\n directory=directory,\n skip_if_file_exists=skip_if_file_exists,\n accept_hooks=accept_hooks,\n keep_project_on_failure=keep_project_on_failure,\n )\n\n logger.debug('context is %s', context)\n\n # include template dir or url in the context dict\n context['cookiecutter']['_template'] = template\n\n # include output+dir in the context dict\n context['cookiecutter']['_output_dir'] = os.path.abspath(output_dir)\n\n # include repo dir or url in the context dict\n context['cookiecutter']['_repo_dir'] = repo_dir\n\n # include checkout details in the context dict\n context['cookiecutter']['_checkout'] = checkout\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n with import_patch:\n result = generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n skip_if_file_exists=skip_if_file_exists,\n output_dir=output_dir,\n accept_hooks=accept_hooks,\n keep_project_on_failure=keep_project_on_failure,\n )\n\n # Cleanup (if required)\n if cleanup:\n rmtree(repo_dir)\n\n return result\n\n\nclass _patch_import_path_for_repo:\n def __init__(self, repo_dir):\n self._repo_dir = repo_dir\n self._path = None\n\n def __enter__(self):\n self._path = copy(sys.path)\n sys.path.append(self._repo_dir)\n\n def __exit__(self, type, value, traceback):\n sys.path = self._path\n", "path": "cookiecutter/main.py"}]} | 2,412 | 226 |
gh_patches_debug_56453 | rasdani/github-patches | git_diff | netket__netket-506 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ExactSampler is not resetting at construction time
Exact Sampler should call Reset at construction time
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netket/sampler/exact_sampler.py`
Content:
```
1 import numpy as _np
2 from .abstract_sampler import AbstractSampler
3 from ..machine.density_matrix import AbstractDensityMatrix
4 from ..hilbert import DoubledHilbert
5 import netket.random
6
7
8 class ExactSampler(AbstractSampler):
9 r"""
10 This sampler generates i.i.d. samples from $$|\Psi(s)|^2$$.
11 In order to perform exact sampling, $$|\Psi(s)|^2$$ is precomputed an all
12 the possible values of the quantum numbers $$s$$. This sampler has thus an
13 exponential cost with the number of degrees of freedom, and cannot be used
14 for large systems, where Metropolis-based sampling are instead a viable
15 option.
16 """
17
18 def __init__(self, machine, sample_size=16):
19 r"""
20 Constructs a new ``ExactSampler`` given a machine.
21
22 Args:
23 machine: A machine $$\Psi(s)$$ used for the sampling.
24 The probability distribution being sampled
25 from is $$F(\Psi(s))$$, where the function
26 $$F(X)$$, is arbitrary, by default $$F(X)=|X|^2$$.
27
28 sample_size: The number of independent samples to be generated at each invocation of __next__.
29 """
30 super().__init__(machine, sample_size)
31 if isinstance(machine, AbstractDensityMatrix):
32 self.hilbert = DoubledHilbert(machine.hilbert)
33 else:
34 self.hilbert = machine.hilbert
35 self._machine_pow = 2.0
36
37 def reset(self, init_random=False):
38 self._prob = _np.absolute(self.machine.to_array()) ** self.machine_pow
39 self._prob /= self._prob.sum()
40
41 def __next__(self):
42 numbers = netket.random.choice(
43 self._prob.size, size=self.sample_shape[0], replace=True, p=self._prob
44 )
45 return self.hilbert.numbers_to_states(numbers)
46
47 def generate_samples(self, n_samples, init_random=False, samples=None):
48
49 if samples is None:
50 samples = _np.zeros((n_samples, self.sample_shape[0], self.sample_shape[1]))
51
52 numbers = netket.random.choice(
53 self._prob.size,
54 size=self.sample_shape[0] * n_samples,
55 replace=True,
56 p=self._prob,
57 )
58 samples[:] = self.hilbert.numbers_to_states(numbers).reshape(samples.shape)
59
60 return samples
61
62 @property
63 def machine_pow(self):
64 return self._machine_pow
65
66 @machine_pow.setter
67 def machine_pow(self, m_power):
68 self._machine_pow = m_power
69 self.reset()
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netket/sampler/exact_sampler.py b/netket/sampler/exact_sampler.py
--- a/netket/sampler/exact_sampler.py
+++ b/netket/sampler/exact_sampler.py
@@ -33,6 +33,7 @@
else:
self.hilbert = machine.hilbert
self._machine_pow = 2.0
+ self.reset()
def reset(self, init_random=False):
self._prob = _np.absolute(self.machine.to_array()) ** self.machine_pow
| {"golden_diff": "diff --git a/netket/sampler/exact_sampler.py b/netket/sampler/exact_sampler.py\n--- a/netket/sampler/exact_sampler.py\n+++ b/netket/sampler/exact_sampler.py\n@@ -33,6 +33,7 @@\n else:\n self.hilbert = machine.hilbert\n self._machine_pow = 2.0\n+ self.reset()\n \n def reset(self, init_random=False):\n self._prob = _np.absolute(self.machine.to_array()) ** self.machine_pow\n", "issue": "ExactSampler is not resetting at construction time\nExact Sampler should call Reset at construction time \r\n\n", "before_files": [{"content": "import numpy as _np\nfrom .abstract_sampler import AbstractSampler\nfrom ..machine.density_matrix import AbstractDensityMatrix\nfrom ..hilbert import DoubledHilbert\nimport netket.random\n\n\nclass ExactSampler(AbstractSampler):\n r\"\"\"\n This sampler generates i.i.d. samples from $$|\\Psi(s)|^2$$.\n In order to perform exact sampling, $$|\\Psi(s)|^2$$ is precomputed an all\n the possible values of the quantum numbers $$s$$. This sampler has thus an\n exponential cost with the number of degrees of freedom, and cannot be used\n for large systems, where Metropolis-based sampling are instead a viable\n option.\n \"\"\"\n\n def __init__(self, machine, sample_size=16):\n r\"\"\"\n Constructs a new ``ExactSampler`` given a machine.\n\n Args:\n machine: A machine $$\\Psi(s)$$ used for the sampling.\n The probability distribution being sampled\n from is $$F(\\Psi(s))$$, where the function\n $$F(X)$$, is arbitrary, by default $$F(X)=|X|^2$$.\n\n sample_size: The number of independent samples to be generated at each invocation of __next__.\n \"\"\"\n super().__init__(machine, sample_size)\n if isinstance(machine, AbstractDensityMatrix):\n self.hilbert = DoubledHilbert(machine.hilbert)\n else:\n self.hilbert = machine.hilbert\n self._machine_pow = 2.0\n\n def reset(self, init_random=False):\n self._prob = _np.absolute(self.machine.to_array()) ** self.machine_pow\n self._prob /= self._prob.sum()\n\n def __next__(self):\n numbers = netket.random.choice(\n self._prob.size, size=self.sample_shape[0], replace=True, p=self._prob\n )\n return self.hilbert.numbers_to_states(numbers)\n\n def generate_samples(self, n_samples, init_random=False, samples=None):\n\n if samples is None:\n samples = _np.zeros((n_samples, self.sample_shape[0], self.sample_shape[1]))\n\n numbers = netket.random.choice(\n self._prob.size,\n size=self.sample_shape[0] * n_samples,\n replace=True,\n p=self._prob,\n )\n samples[:] = self.hilbert.numbers_to_states(numbers).reshape(samples.shape)\n\n return samples\n\n @property\n def machine_pow(self):\n return self._machine_pow\n\n @machine_pow.setter\n def machine_pow(self, m_power):\n self._machine_pow = m_power\n self.reset()\n", "path": "netket/sampler/exact_sampler.py"}], "after_files": [{"content": "import numpy as _np\nfrom .abstract_sampler import AbstractSampler\nfrom ..machine.density_matrix import AbstractDensityMatrix\nfrom ..hilbert import DoubledHilbert\nimport netket.random\n\n\nclass ExactSampler(AbstractSampler):\n r\"\"\"\n This sampler generates i.i.d. samples from $$|\\Psi(s)|^2$$.\n In order to perform exact sampling, $$|\\Psi(s)|^2$$ is precomputed an all\n the possible values of the quantum numbers $$s$$. This sampler has thus an\n exponential cost with the number of degrees of freedom, and cannot be used\n for large systems, where Metropolis-based sampling are instead a viable\n option.\n \"\"\"\n\n def __init__(self, machine, sample_size=16):\n r\"\"\"\n Constructs a new ``ExactSampler`` given a machine.\n\n Args:\n machine: A machine $$\\Psi(s)$$ used for the sampling.\n The probability distribution being sampled\n from is $$F(\\Psi(s))$$, where the function\n $$F(X)$$, is arbitrary, by default $$F(X)=|X|^2$$.\n\n sample_size: The number of independent samples to be generated at each invocation of __next__.\n \"\"\"\n super().__init__(machine, sample_size)\n if isinstance(machine, AbstractDensityMatrix):\n self.hilbert = DoubledHilbert(machine.hilbert)\n else:\n self.hilbert = machine.hilbert\n self._machine_pow = 2.0\n self.reset()\n\n def reset(self, init_random=False):\n self._prob = _np.absolute(self.machine.to_array()) ** self.machine_pow\n self._prob /= self._prob.sum()\n\n def __next__(self):\n numbers = netket.random.choice(\n self._prob.size, size=self.sample_shape[0], replace=True, p=self._prob\n )\n return self.hilbert.numbers_to_states(numbers)\n\n def generate_samples(self, n_samples, init_random=False, samples=None):\n\n if samples is None:\n samples = _np.zeros((n_samples, self.sample_shape[0], self.sample_shape[1]))\n\n numbers = netket.random.choice(\n self._prob.size,\n size=self.sample_shape[0] * n_samples,\n replace=True,\n p=self._prob,\n )\n samples[:] = self.hilbert.numbers_to_states(numbers).reshape(samples.shape)\n\n return samples\n\n @property\n def machine_pow(self):\n return self._machine_pow\n\n @machine_pow.setter\n def machine_pow(self, m_power):\n self._machine_pow = m_power\n self.reset()\n", "path": "netket/sampler/exact_sampler.py"}]} | 982 | 114 |
gh_patches_debug_41803 | rasdani/github-patches | git_diff | mindsdb__mindsdb-943 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UUID endpoint
We need a UUID endpoint, say, `/api/config/reporting_uuid` that returns the mindsdb_native telemtry uuid to scout.
This will make less sense when/if mindsdb native starts wrapping over multiple machines running native, but we'll cross that bridge when we get there.
For now the impl is simple:
* When mindsdb starts create a predictor with a placeholder name
* Make the uuid endpoint call `.get_model_data` on that predictor and get the `report_uuid` key
* Send this key to the frontend in a json: `{"report_uuid": "{{value_of_the_report_uuid_key}}" }
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/interfaces/native/predictor_process.py`
Content:
```
1 import torch.multiprocessing as mp
2 from mindsdb.interfaces.database.database import DatabaseWrapper
3
4
5 ctx = mp.get_context('spawn')
6
7
8 class PredictorProcess(ctx.Process):
9 daemon = True
10
11 def __init__(self, *args):
12 super(PredictorProcess, self).__init__(args=args)
13
14 def run(self):
15 '''
16 running at subprocess due to
17 ValueError: signal only works in main thread
18
19 this is work for celery worker here?
20 '''
21 import mindsdb_native
22
23 name, from_data, to_predict, kwargs, config, trx_type = self._args
24
25 mdb = mindsdb_native.Predictor(name=name)
26
27 if trx_type == 'learn':
28 to_predict = to_predict if isinstance(to_predict, list) else [to_predict]
29 data_source = getattr(mindsdb_native, from_data['class'])(*from_data['args'], **from_data['kwargs'])
30 mdb.learn(
31 from_data=data_source,
32 to_predict=to_predict,
33 **kwargs
34 )
35
36 stats = mindsdb_native.F.get_model_data(name)['data_analysis_v2']
37
38 DatabaseWrapper(config).register_predictors([{
39 'name': name,
40 'predict': to_predict,
41 'data_analysis': stats
42 }], setup=False)
43
44 if trx_type == 'predict':
45 if isinstance(from_data, dict):
46 when_data = from_data
47 else:
48 when_data = getattr(mindsdb_native, from_data['class'])(*from_data['args'], **from_data['kwargs'])
49
50 predictions = mdb.predict(
51 when_data=when_data,
52 **kwargs
53 )
54
55 # @TODO Figure out a way to recover this since we are using `spawn` here... simple Queue or instiating a Multiprocessing manager and registering a value in a dict using that. Or using map from a multiprocessing pool with 1x process (though using a custom process there might be it's own bucket of annoying)
56 return predictions
57
```
Path: `mindsdb/interfaces/native/mindsdb.py`
Content:
```
1 # Mindsdb native interface
2 from pathlib import Path
3 import json
4
5 from dateutil.parser import parse as parse_datetime
6
7 import mindsdb_native
8 from mindsdb_native import F
9 from mindsdb.utilities.fs import create_directory
10 from mindsdb_native.libs.constants.mindsdb import DATA_SUBTYPES
11 from mindsdb.interfaces.native.predictor_process import PredictorProcess
12 from mindsdb.interfaces.database.database import DatabaseWrapper
13
14
15 class MindsdbNative():
16 def __init__(self, config):
17 self.config = config
18 self.dbw = DatabaseWrapper(self.config)
19
20 def learn(self, name, from_data, to_predict, kwargs={}):
21 join_learn_process = kwargs.get('join_learn_process', False)
22 if 'join_learn_process' in kwargs:
23 del kwargs['join_learn_process']
24
25 predictor_dir = Path(self.config.paths['predictors']).joinpath(name)
26 create_directory(predictor_dir)
27 versions_file_path = predictor_dir.joinpath('versions.json')
28 with open(str(versions_file_path), 'wt') as f:
29 json.dump(self.config.versions, f, indent=4, sort_keys=True)
30
31 p = PredictorProcess(name, from_data, to_predict, kwargs, self.config.get_all(), 'learn')
32 p.start()
33 if join_learn_process is True:
34 p.join()
35 if p.exitcode != 0:
36 raise Exception('Learning process failed !')
37
38 def predict(self, name, when_data=None, kwargs={}):
39 # @TODO Separate into two paths, one for "normal" predictions and one for "real time" predictions. Use the multiprocessing code commented out bellow for normal (once we figure out how to return the prediction object... else use the inline code but with the "real time" predict functionality of mindsdb_native taht will be implemented later)
40 '''
41 from_data = when if when is not None else when_data
42 p = PredictorProcess(name, from_data, to_predict=None, kwargs=kwargs, config=self.config.get_all(), 'predict')
43 p.start()
44 predictions = p.join()
45 '''
46 mdb = mindsdb_native.Predictor(name=name)
47
48 predictions = mdb.predict(
49 when_data=when_data,
50 **kwargs
51 )
52
53 return predictions
54
55 def analyse_dataset(self, ds):
56 return F.analyse_dataset(ds)
57
58 def get_model_data(self, name, native_view=False):
59 model = F.get_model_data(name)
60 if native_view:
61 return model
62
63 data_analysis = model['data_analysis_v2']
64 for column in data_analysis['columns']:
65 if len(data_analysis[column]) == 0 or data_analysis[column].get('empty', {}).get('is_empty', False):
66 data_analysis[column]['typing'] = {
67 'data_subtype': DATA_SUBTYPES.INT
68 }
69
70 return model
71
72 def get_models(self, status='any'):
73 models = F.get_models()
74 if status != 'any':
75 models = [x for x in models if x['status'] == status]
76
77 for i in range(len(models)):
78 for k in ['train_end_at', 'updated_at', 'created_at']:
79 if k in models[i] and models[i][k] is not None:
80 try:
81 models[i][k] = parse_datetime(str(models[i][k]).split('.')[0])
82 except Exception:
83 models[i][k] = parse_datetime(str(models[i][k]))
84 return models
85
86 def delete_model(self, name):
87 F.delete_model(name)
88 self.dbw.unregister_predictor(name)
89
90 def rename_model(self, name, new_name):
91 self.dbw.unregister_predictor(self.get_model_data(name))
92 F.rename_model(name, new_name)
93 self.dbw.register_predictors(self.get_model_data(new_name), setup=False)
94
95 def load_model(self, fpath):
96 F.import_model(model_archive_path=fpath)
97 # @TODO How do we figure out the name here ?
98 # dbw.register_predictors(...)
99
100 def export_model(self, name):
101 F.export_predictor(model_name=name)
102
```
Path: `mindsdb/api/http/namespaces/util.py`
Content:
```
1 from flask import request
2 from flask_restx import Resource, abort
3
4 from mindsdb.api.http.namespaces.configs.util import ns_conf
5 from mindsdb import __about__
6
7 @ns_conf.route('/ping')
8 class Ping(Resource):
9 @ns_conf.doc('get_ping')
10 def get(self):
11 '''Checks server avaliable'''
12 return {'status': 'ok'}
13
14
15 @ns_conf.route('/shutdown')
16 class Shutdown(Resource):
17 @ns_conf.doc('get_shutdown')
18 def get(self):
19 '''Shutdown server'''
20 if request.host.startswith('127.0.0.1') or request.host.startswith('localhost'):
21 func = request.environ.get('werkzeug.server.shutdown')
22 if func is None:
23 return '', 500
24 func()
25 return '', 200
26 abort(403, "")
27
28
29 @ns_conf.route('/util/version')
30 class Version(Resource):
31 @ns_conf.doc('get_endpoint')
32 def get(self):
33 '''Check endpoint'''
34 return {'mindsdb': "{__about__.__version__}"}
35
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mindsdb/api/http/namespaces/util.py b/mindsdb/api/http/namespaces/util.py
--- a/mindsdb/api/http/namespaces/util.py
+++ b/mindsdb/api/http/namespaces/util.py
@@ -1,5 +1,6 @@
from flask import request
from flask_restx import Resource, abort
+from flask import current_app as ca
from mindsdb.api.http.namespaces.configs.util import ns_conf
from mindsdb import __about__
@@ -11,25 +12,12 @@
'''Checks server avaliable'''
return {'status': 'ok'}
-
-@ns_conf.route('/shutdown')
-class Shutdown(Resource):
- @ns_conf.doc('get_shutdown')
- def get(self):
- '''Shutdown server'''
- if request.host.startswith('127.0.0.1') or request.host.startswith('localhost'):
- func = request.environ.get('werkzeug.server.shutdown')
- if func is None:
- return '', 500
- func()
- return '', 200
- abort(403, "")
-
-
-@ns_conf.route('/util/version')
-class Version(Resource):
- @ns_conf.doc('get_endpoint')
+@ns_conf.route('/report_uuid')
+class ReportUUID(Resource):
+ @ns_conf.doc('get_report_uuid')
def get(self):
- '''Check endpoint'''
- return {'mindsdb': "{__about__.__version__}"}
-
+ metamodel_name = '___monitroing_metamodel___'
+ predictor = ca.mindsdb_native.create(metamodel_name)
+ return {
+ 'report_uuid': predictor.report_uuid
+ }
diff --git a/mindsdb/interfaces/native/mindsdb.py b/mindsdb/interfaces/native/mindsdb.py
--- a/mindsdb/interfaces/native/mindsdb.py
+++ b/mindsdb/interfaces/native/mindsdb.py
@@ -17,16 +17,24 @@
self.config = config
self.dbw = DatabaseWrapper(self.config)
+ def _setup_for_creation(self, name):
+ predictor_dir = Path(self.config.paths['predictors']).joinpath(name)
+ create_directory(predictor_dir)
+ versions_file_path = predictor_dir.joinpath('versions.json')
+ with open(str(versions_file_path), 'wt') as f:
+ json.dump(self.config.versions, f, indent=4, sort_keys=True)
+
+ def create(self, name):
+ self._setup_for_creation(name)
+ predictor = mindsdb_native.Predictor(name=name, run_env={'trigger': 'mindsdb'})
+ return predictor
+
def learn(self, name, from_data, to_predict, kwargs={}):
join_learn_process = kwargs.get('join_learn_process', False)
if 'join_learn_process' in kwargs:
del kwargs['join_learn_process']
- predictor_dir = Path(self.config.paths['predictors']).joinpath(name)
- create_directory(predictor_dir)
- versions_file_path = predictor_dir.joinpath('versions.json')
- with open(str(versions_file_path), 'wt') as f:
- json.dump(self.config.versions, f, indent=4, sort_keys=True)
+ self._setup_for_creation(name)
p = PredictorProcess(name, from_data, to_predict, kwargs, self.config.get_all(), 'learn')
p.start()
@@ -43,7 +51,7 @@
p.start()
predictions = p.join()
'''
- mdb = mindsdb_native.Predictor(name=name)
+ mdb = mindsdb_native.Predictor(name=name, run_env={'trigger': 'mindsdb'})
predictions = mdb.predict(
when_data=when_data,
diff --git a/mindsdb/interfaces/native/predictor_process.py b/mindsdb/interfaces/native/predictor_process.py
--- a/mindsdb/interfaces/native/predictor_process.py
+++ b/mindsdb/interfaces/native/predictor_process.py
@@ -22,7 +22,7 @@
name, from_data, to_predict, kwargs, config, trx_type = self._args
- mdb = mindsdb_native.Predictor(name=name)
+ mdb = mindsdb_native.Predictor(name=name, run_env={'trigger': 'mindsdb'})
if trx_type == 'learn':
to_predict = to_predict if isinstance(to_predict, list) else [to_predict]
| {"golden_diff": "diff --git a/mindsdb/api/http/namespaces/util.py b/mindsdb/api/http/namespaces/util.py\n--- a/mindsdb/api/http/namespaces/util.py\n+++ b/mindsdb/api/http/namespaces/util.py\n@@ -1,5 +1,6 @@\n from flask import request\n from flask_restx import Resource, abort\n+from flask import current_app as ca\n \n from mindsdb.api.http.namespaces.configs.util import ns_conf\n from mindsdb import __about__\n@@ -11,25 +12,12 @@\n '''Checks server avaliable'''\n return {'status': 'ok'}\n \n-\n-@ns_conf.route('/shutdown')\n-class Shutdown(Resource):\n- @ns_conf.doc('get_shutdown')\n- def get(self):\n- '''Shutdown server'''\n- if request.host.startswith('127.0.0.1') or request.host.startswith('localhost'):\n- func = request.environ.get('werkzeug.server.shutdown')\n- if func is None:\n- return '', 500\n- func()\n- return '', 200\n- abort(403, \"\")\n-\n- \n-@ns_conf.route('/util/version')\n-class Version(Resource):\n- @ns_conf.doc('get_endpoint')\n+@ns_conf.route('/report_uuid')\n+class ReportUUID(Resource):\n+ @ns_conf.doc('get_report_uuid')\n def get(self):\n- '''Check endpoint'''\n- return {'mindsdb': \"{__about__.__version__}\"}\n- \n+ metamodel_name = '___monitroing_metamodel___'\n+ predictor = ca.mindsdb_native.create(metamodel_name)\n+ return {\n+ 'report_uuid': predictor.report_uuid\n+ }\ndiff --git a/mindsdb/interfaces/native/mindsdb.py b/mindsdb/interfaces/native/mindsdb.py\n--- a/mindsdb/interfaces/native/mindsdb.py\n+++ b/mindsdb/interfaces/native/mindsdb.py\n@@ -17,16 +17,24 @@\n self.config = config\n self.dbw = DatabaseWrapper(self.config)\n \n+ def _setup_for_creation(self, name):\n+ predictor_dir = Path(self.config.paths['predictors']).joinpath(name)\n+ create_directory(predictor_dir)\n+ versions_file_path = predictor_dir.joinpath('versions.json')\n+ with open(str(versions_file_path), 'wt') as f:\n+ json.dump(self.config.versions, f, indent=4, sort_keys=True)\n+\n+ def create(self, name):\n+ self._setup_for_creation(name)\n+ predictor = mindsdb_native.Predictor(name=name, run_env={'trigger': 'mindsdb'})\n+ return predictor\n+\n def learn(self, name, from_data, to_predict, kwargs={}):\n join_learn_process = kwargs.get('join_learn_process', False)\n if 'join_learn_process' in kwargs:\n del kwargs['join_learn_process']\n \n- predictor_dir = Path(self.config.paths['predictors']).joinpath(name)\n- create_directory(predictor_dir)\n- versions_file_path = predictor_dir.joinpath('versions.json')\n- with open(str(versions_file_path), 'wt') as f:\n- json.dump(self.config.versions, f, indent=4, sort_keys=True)\n+ self._setup_for_creation(name)\n \n p = PredictorProcess(name, from_data, to_predict, kwargs, self.config.get_all(), 'learn')\n p.start()\n@@ -43,7 +51,7 @@\n p.start()\n predictions = p.join()\n '''\n- mdb = mindsdb_native.Predictor(name=name)\n+ mdb = mindsdb_native.Predictor(name=name, run_env={'trigger': 'mindsdb'})\n \n predictions = mdb.predict(\n when_data=when_data,\ndiff --git a/mindsdb/interfaces/native/predictor_process.py b/mindsdb/interfaces/native/predictor_process.py\n--- a/mindsdb/interfaces/native/predictor_process.py\n+++ b/mindsdb/interfaces/native/predictor_process.py\n@@ -22,7 +22,7 @@\n \n name, from_data, to_predict, kwargs, config, trx_type = self._args\n \n- mdb = mindsdb_native.Predictor(name=name)\n+ mdb = mindsdb_native.Predictor(name=name, run_env={'trigger': 'mindsdb'})\n \n if trx_type == 'learn':\n to_predict = to_predict if isinstance(to_predict, list) else [to_predict]\n", "issue": "UUID endpoint\nWe need a UUID endpoint, say, `/api/config/reporting_uuid` that returns the mindsdb_native telemtry uuid to scout.\r\n\r\nThis will make less sense when/if mindsdb native starts wrapping over multiple machines running native, but we'll cross that bridge when we get there.\r\n\r\nFor now the impl is simple:\r\n\r\n* When mindsdb starts create a predictor with a placeholder name\r\n* Make the uuid endpoint call `.get_model_data` on that predictor and get the `report_uuid` key\r\n* Send this key to the frontend in a json: `{\"report_uuid\": \"{{value_of_the_report_uuid_key}}\" }\r\n\n", "before_files": [{"content": "import torch.multiprocessing as mp\nfrom mindsdb.interfaces.database.database import DatabaseWrapper\n\n\nctx = mp.get_context('spawn')\n\n\nclass PredictorProcess(ctx.Process):\n daemon = True\n\n def __init__(self, *args):\n super(PredictorProcess, self).__init__(args=args)\n\n def run(self):\n '''\n running at subprocess due to\n ValueError: signal only works in main thread\n\n this is work for celery worker here?\n '''\n import mindsdb_native\n\n name, from_data, to_predict, kwargs, config, trx_type = self._args\n\n mdb = mindsdb_native.Predictor(name=name)\n\n if trx_type == 'learn':\n to_predict = to_predict if isinstance(to_predict, list) else [to_predict]\n data_source = getattr(mindsdb_native, from_data['class'])(*from_data['args'], **from_data['kwargs'])\n mdb.learn(\n from_data=data_source,\n to_predict=to_predict,\n **kwargs\n )\n\n stats = mindsdb_native.F.get_model_data(name)['data_analysis_v2']\n\n DatabaseWrapper(config).register_predictors([{\n 'name': name,\n 'predict': to_predict,\n 'data_analysis': stats\n }], setup=False)\n\n if trx_type == 'predict':\n if isinstance(from_data, dict):\n when_data = from_data\n else:\n when_data = getattr(mindsdb_native, from_data['class'])(*from_data['args'], **from_data['kwargs'])\n\n predictions = mdb.predict(\n when_data=when_data,\n **kwargs\n )\n\n # @TODO Figure out a way to recover this since we are using `spawn` here... simple Queue or instiating a Multiprocessing manager and registering a value in a dict using that. Or using map from a multiprocessing pool with 1x process (though using a custom process there might be it's own bucket of annoying)\n return predictions\n", "path": "mindsdb/interfaces/native/predictor_process.py"}, {"content": "# Mindsdb native interface\nfrom pathlib import Path\nimport json\n\nfrom dateutil.parser import parse as parse_datetime\n\nimport mindsdb_native\nfrom mindsdb_native import F\nfrom mindsdb.utilities.fs import create_directory\nfrom mindsdb_native.libs.constants.mindsdb import DATA_SUBTYPES\nfrom mindsdb.interfaces.native.predictor_process import PredictorProcess\nfrom mindsdb.interfaces.database.database import DatabaseWrapper\n\n\nclass MindsdbNative():\n def __init__(self, config):\n self.config = config\n self.dbw = DatabaseWrapper(self.config)\n\n def learn(self, name, from_data, to_predict, kwargs={}):\n join_learn_process = kwargs.get('join_learn_process', False)\n if 'join_learn_process' in kwargs:\n del kwargs['join_learn_process']\n\n predictor_dir = Path(self.config.paths['predictors']).joinpath(name)\n create_directory(predictor_dir)\n versions_file_path = predictor_dir.joinpath('versions.json')\n with open(str(versions_file_path), 'wt') as f:\n json.dump(self.config.versions, f, indent=4, sort_keys=True)\n\n p = PredictorProcess(name, from_data, to_predict, kwargs, self.config.get_all(), 'learn')\n p.start()\n if join_learn_process is True:\n p.join()\n if p.exitcode != 0:\n raise Exception('Learning process failed !')\n\n def predict(self, name, when_data=None, kwargs={}):\n # @TODO Separate into two paths, one for \"normal\" predictions and one for \"real time\" predictions. Use the multiprocessing code commented out bellow for normal (once we figure out how to return the prediction object... else use the inline code but with the \"real time\" predict functionality of mindsdb_native taht will be implemented later)\n '''\n from_data = when if when is not None else when_data\n p = PredictorProcess(name, from_data, to_predict=None, kwargs=kwargs, config=self.config.get_all(), 'predict')\n p.start()\n predictions = p.join()\n '''\n mdb = mindsdb_native.Predictor(name=name)\n\n predictions = mdb.predict(\n when_data=when_data,\n **kwargs\n )\n\n return predictions\n\n def analyse_dataset(self, ds):\n return F.analyse_dataset(ds)\n\n def get_model_data(self, name, native_view=False):\n model = F.get_model_data(name)\n if native_view:\n return model\n\n data_analysis = model['data_analysis_v2']\n for column in data_analysis['columns']:\n if len(data_analysis[column]) == 0 or data_analysis[column].get('empty', {}).get('is_empty', False):\n data_analysis[column]['typing'] = {\n 'data_subtype': DATA_SUBTYPES.INT\n }\n\n return model\n\n def get_models(self, status='any'):\n models = F.get_models()\n if status != 'any':\n models = [x for x in models if x['status'] == status]\n\n for i in range(len(models)):\n for k in ['train_end_at', 'updated_at', 'created_at']:\n if k in models[i] and models[i][k] is not None:\n try:\n models[i][k] = parse_datetime(str(models[i][k]).split('.')[0])\n except Exception:\n models[i][k] = parse_datetime(str(models[i][k]))\n return models\n\n def delete_model(self, name):\n F.delete_model(name)\n self.dbw.unregister_predictor(name)\n\n def rename_model(self, name, new_name):\n self.dbw.unregister_predictor(self.get_model_data(name))\n F.rename_model(name, new_name)\n self.dbw.register_predictors(self.get_model_data(new_name), setup=False)\n\n def load_model(self, fpath):\n F.import_model(model_archive_path=fpath)\n # @TODO How do we figure out the name here ?\n # dbw.register_predictors(...)\n\n def export_model(self, name):\n F.export_predictor(model_name=name)\n", "path": "mindsdb/interfaces/native/mindsdb.py"}, {"content": "from flask import request\nfrom flask_restx import Resource, abort\n\nfrom mindsdb.api.http.namespaces.configs.util import ns_conf\nfrom mindsdb import __about__\n\n@ns_conf.route('/ping')\nclass Ping(Resource):\n @ns_conf.doc('get_ping')\n def get(self):\n '''Checks server avaliable'''\n return {'status': 'ok'}\n\n\n@ns_conf.route('/shutdown')\nclass Shutdown(Resource):\n @ns_conf.doc('get_shutdown')\n def get(self):\n '''Shutdown server'''\n if request.host.startswith('127.0.0.1') or request.host.startswith('localhost'):\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n return '', 500\n func()\n return '', 200\n abort(403, \"\")\n\n \n@ns_conf.route('/util/version')\nclass Version(Resource):\n @ns_conf.doc('get_endpoint')\n def get(self):\n '''Check endpoint'''\n return {'mindsdb': \"{__about__.__version__}\"}\n \n", "path": "mindsdb/api/http/namespaces/util.py"}], "after_files": [{"content": "import torch.multiprocessing as mp\nfrom mindsdb.interfaces.database.database import DatabaseWrapper\n\n\nctx = mp.get_context('spawn')\n\n\nclass PredictorProcess(ctx.Process):\n daemon = True\n\n def __init__(self, *args):\n super(PredictorProcess, self).__init__(args=args)\n\n def run(self):\n '''\n running at subprocess due to\n ValueError: signal only works in main thread\n\n this is work for celery worker here?\n '''\n import mindsdb_native\n\n name, from_data, to_predict, kwargs, config, trx_type = self._args\n\n mdb = mindsdb_native.Predictor(name=name, run_env={'trigger': 'mindsdb'})\n\n if trx_type == 'learn':\n to_predict = to_predict if isinstance(to_predict, list) else [to_predict]\n data_source = getattr(mindsdb_native, from_data['class'])(*from_data['args'], **from_data['kwargs'])\n mdb.learn(\n from_data=data_source,\n to_predict=to_predict,\n **kwargs\n )\n\n stats = mindsdb_native.F.get_model_data(name)['data_analysis_v2']\n\n DatabaseWrapper(config).register_predictors([{\n 'name': name,\n 'predict': to_predict,\n 'data_analysis': stats\n }], setup=False)\n\n if trx_type == 'predict':\n if isinstance(from_data, dict):\n when_data = from_data\n else:\n when_data = getattr(mindsdb_native, from_data['class'])(*from_data['args'], **from_data['kwargs'])\n\n predictions = mdb.predict(\n when_data=when_data,\n **kwargs\n )\n\n # @TODO Figure out a way to recover this since we are using `spawn` here... simple Queue or instiating a Multiprocessing manager and registering a value in a dict using that. Or using map from a multiprocessing pool with 1x process (though using a custom process there might be it's own bucket of annoying)\n return predictions\n", "path": "mindsdb/interfaces/native/predictor_process.py"}, {"content": "# Mindsdb native interface\nfrom pathlib import Path\nimport json\n\nfrom dateutil.parser import parse as parse_datetime\n\nimport mindsdb_native\nfrom mindsdb_native import F\nfrom mindsdb.utilities.fs import create_directory\nfrom mindsdb_native.libs.constants.mindsdb import DATA_SUBTYPES\nfrom mindsdb.interfaces.native.predictor_process import PredictorProcess\nfrom mindsdb.interfaces.database.database import DatabaseWrapper\n\n\nclass MindsdbNative():\n def __init__(self, config):\n self.config = config\n self.dbw = DatabaseWrapper(self.config)\n\n def _setup_for_creation(self, name):\n predictor_dir = Path(self.config.paths['predictors']).joinpath(name)\n create_directory(predictor_dir)\n versions_file_path = predictor_dir.joinpath('versions.json')\n with open(str(versions_file_path), 'wt') as f:\n json.dump(self.config.versions, f, indent=4, sort_keys=True)\n\n def create(self, name):\n self._setup_for_creation(name)\n predictor = mindsdb_native.Predictor(name=name, run_env={'trigger': 'mindsdb'})\n return predictor\n\n def learn(self, name, from_data, to_predict, kwargs={}):\n join_learn_process = kwargs.get('join_learn_process', False)\n if 'join_learn_process' in kwargs:\n del kwargs['join_learn_process']\n\n self._setup_for_creation(name)\n\n p = PredictorProcess(name, from_data, to_predict, kwargs, self.config.get_all(), 'learn')\n p.start()\n if join_learn_process is True:\n p.join()\n if p.exitcode != 0:\n raise Exception('Learning process failed !')\n\n def predict(self, name, when_data=None, kwargs={}):\n # @TODO Separate into two paths, one for \"normal\" predictions and one for \"real time\" predictions. Use the multiprocessing code commented out bellow for normal (once we figure out how to return the prediction object... else use the inline code but with the \"real time\" predict functionality of mindsdb_native taht will be implemented later)\n '''\n from_data = when if when is not None else when_data\n p = PredictorProcess(name, from_data, to_predict=None, kwargs=kwargs, config=self.config.get_all(), 'predict')\n p.start()\n predictions = p.join()\n '''\n mdb = mindsdb_native.Predictor(name=name, run_env={'trigger': 'mindsdb'})\n\n predictions = mdb.predict(\n when_data=when_data,\n **kwargs\n )\n\n return predictions\n\n def analyse_dataset(self, ds):\n return F.analyse_dataset(ds)\n\n def get_model_data(self, name, native_view=False):\n model = F.get_model_data(name)\n if native_view:\n return model\n\n data_analysis = model['data_analysis_v2']\n for column in data_analysis['columns']:\n if len(data_analysis[column]) == 0 or data_analysis[column].get('empty', {}).get('is_empty', False):\n data_analysis[column]['typing'] = {\n 'data_subtype': DATA_SUBTYPES.INT\n }\n\n return model\n\n def get_models(self, status='any'):\n models = F.get_models()\n if status != 'any':\n models = [x for x in models if x['status'] == status]\n\n for i in range(len(models)):\n for k in ['train_end_at', 'updated_at', 'created_at']:\n if k in models[i] and models[i][k] is not None:\n try:\n models[i][k] = parse_datetime(str(models[i][k]).split('.')[0])\n except Exception:\n models[i][k] = parse_datetime(str(models[i][k]))\n return models\n\n def delete_model(self, name):\n F.delete_model(name)\n self.dbw.unregister_predictor(name)\n\n def rename_model(self, name, new_name):\n self.dbw.unregister_predictor(self.get_model_data(name))\n F.rename_model(name, new_name)\n self.dbw.register_predictors(self.get_model_data(new_name), setup=False)\n\n def load_model(self, fpath):\n F.import_model(model_archive_path=fpath)\n # @TODO How do we figure out the name here ?\n # dbw.register_predictors(...)\n\n def export_model(self, name):\n F.export_predictor(model_name=name)\n", "path": "mindsdb/interfaces/native/mindsdb.py"}, {"content": "from flask import request\nfrom flask_restx import Resource, abort\nfrom flask import current_app as ca\n\nfrom mindsdb.api.http.namespaces.configs.util import ns_conf\nfrom mindsdb import __about__\n\n@ns_conf.route('/ping')\nclass Ping(Resource):\n @ns_conf.doc('get_ping')\n def get(self):\n '''Checks server avaliable'''\n return {'status': 'ok'}\n\n@ns_conf.route('/report_uuid')\nclass ReportUUID(Resource):\n @ns_conf.doc('get_report_uuid')\n def get(self):\n metamodel_name = '___monitroing_metamodel___'\n predictor = ca.mindsdb_native.create(metamodel_name)\n return {\n 'report_uuid': predictor.report_uuid\n }\n", "path": "mindsdb/api/http/namespaces/util.py"}]} | 2,306 | 968 |
gh_patches_debug_17210 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-4524 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Match color does not show up in completion
**Version info (see `:version`)**:
Broken:
- https://paste.the-compiler.org/view/13d147a2
- https://paste.the-compiler.org/view/afa000e3
Working:
- https://paste.the-compiler.org/view/fcd00119
**Does the bug happen if you start with `--temp-basedir`?** (if applicable):
Yes
**Description**
I don't have enough time to debug this issue myself anymore so I'm filing an issue
On master, since switching to `QSyntaxHighlighter` on #4220, matches no longer show up for me on qt 5.11.3. qt 5.12 and 5.11.1 seems fine.
Breaking the SyntaxHighlighter so that it highlights everything does not help, so I think this is some issue we're triggering in qt...
Broken:

Working:

**How to reproduce**
<!-- Link to the affected site, or steps to reproduce the issue
(if possible/applicable). -->
type `:version` and expect a match.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/completion/completiondelegate.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Completion item delegate for CompletionView.
21
22 We use this to be able to highlight parts of the text.
23 """
24
25 import re
26 import html
27
28 from PyQt5.QtWidgets import QStyle, QStyleOptionViewItem, QStyledItemDelegate
29 from PyQt5.QtCore import QRectF, QSize, Qt
30 from PyQt5.QtGui import (QIcon, QPalette, QTextDocument, QTextOption,
31 QAbstractTextDocumentLayout, QSyntaxHighlighter,
32 QTextCharFormat)
33
34 from qutebrowser.config import config
35 from qutebrowser.utils import qtutils
36
37
38 class _Highlighter(QSyntaxHighlighter):
39
40 def __init__(self, doc, pattern, color):
41 super().__init__(doc)
42 self._format = QTextCharFormat()
43 self._format.setForeground(color)
44 self._pattern = pattern
45
46 def highlightBlock(self, text):
47 """Override highlightBlock for custom highlighting."""
48 for match in re.finditer(self._pattern, text, re.IGNORECASE):
49 start, end = match.span()
50 length = end - start
51 self.setFormat(start, length, self._format)
52
53
54 class CompletionItemDelegate(QStyledItemDelegate):
55
56 """Delegate used by CompletionView to draw individual items.
57
58 Mainly a cleaned up port of Qt's way to draw a TreeView item, except it
59 uses a QTextDocument to draw the text and add marking.
60
61 Original implementation:
62 qt/src/gui/styles/qcommonstyle.cpp:drawControl:2153
63
64 Attributes:
65 _opt: The QStyleOptionViewItem which is used.
66 _style: The style to be used.
67 _painter: The QPainter to be used.
68 _doc: The QTextDocument to be used.
69 """
70
71 # FIXME this is horribly slow when resizing.
72 # We should probably cache something in _get_textdoc or so, but as soon as
73 # we implement eliding that cache probably isn't worth much anymore...
74 # https://github.com/qutebrowser/qutebrowser/issues/121
75
76 def __init__(self, parent=None):
77 self._painter = None
78 self._opt = None
79 self._doc = None
80 self._style = None
81 super().__init__(parent)
82
83 def _draw_background(self):
84 """Draw the background of an ItemViewItem."""
85 self._style.drawPrimitive(self._style.PE_PanelItemViewItem, self._opt,
86 self._painter, self._opt.widget)
87
88 def _draw_icon(self):
89 """Draw the icon of an ItemViewItem."""
90 icon_rect = self._style.subElementRect(
91 self._style.SE_ItemViewItemDecoration, self._opt, self._opt.widget)
92 if not icon_rect.isValid():
93 # The rect seems to be wrong in all kind of ways if no icon should
94 # be displayed.
95 return
96
97 mode = QIcon.Normal
98 if not self._opt.state & QStyle.State_Enabled:
99 mode = QIcon.Disabled
100 elif self._opt.state & QStyle.State_Selected:
101 mode = QIcon.Selected
102 state = QIcon.On if self._opt.state & QStyle.State_Open else QIcon.Off
103 self._opt.icon.paint(self._painter, icon_rect,
104 self._opt.decorationAlignment, mode, state)
105
106 def _draw_text(self, index):
107 """Draw the text of an ItemViewItem.
108
109 This is the main part where we differ from the original implementation
110 in Qt: We use a QTextDocument to draw text.
111
112 Args:
113 index: The QModelIndex of the item to draw.
114 """
115 if not self._opt.text:
116 return
117
118 text_rect_ = self._style.subElementRect(
119 self._style.SE_ItemViewItemText, self._opt, self._opt.widget)
120 qtutils.ensure_valid(text_rect_)
121 margin = self._style.pixelMetric(QStyle.PM_FocusFrameHMargin,
122 self._opt, self._opt.widget) + 1
123 # remove width padding
124 text_rect = text_rect_.adjusted(margin, 0, -margin, 0)
125 qtutils.ensure_valid(text_rect)
126 # move text upwards a bit
127 if index.parent().isValid():
128 text_rect.adjust(0, -1, 0, -1)
129 else:
130 text_rect.adjust(0, -2, 0, -2)
131 self._painter.save()
132 state = self._opt.state
133 if state & QStyle.State_Enabled and state & QStyle.State_Active:
134 cg = QPalette.Normal
135 elif state & QStyle.State_Enabled:
136 cg = QPalette.Inactive
137 else:
138 cg = QPalette.Disabled
139
140 if state & QStyle.State_Selected:
141 self._painter.setPen(self._opt.palette.color(
142 cg, QPalette.HighlightedText))
143 # This is a dirty fix for the text jumping by one pixel for
144 # whatever reason.
145 text_rect.adjust(0, -1, 0, 0)
146 else:
147 self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))
148
149 if state & QStyle.State_Editing:
150 self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))
151 self._painter.drawRect(text_rect_.adjusted(0, 0, -1, -1))
152
153 self._painter.translate(text_rect.left(), text_rect.top())
154 self._get_textdoc(index)
155 self._draw_textdoc(text_rect, index.column())
156 self._painter.restore()
157
158 def _draw_textdoc(self, rect, col):
159 """Draw the QTextDocument of an item.
160
161 Args:
162 rect: The QRect to clip the drawing to.
163 """
164 # We can't use drawContents because then the color would be ignored.
165 clip = QRectF(0, 0, rect.width(), rect.height())
166 self._painter.save()
167
168 if self._opt.state & QStyle.State_Selected:
169 color = config.cache['colors.completion.item.selected.fg']
170 elif not self._opt.state & QStyle.State_Enabled:
171 color = config.cache['colors.completion.category.fg']
172 else:
173 colors = config.cache['colors.completion.fg']
174 # if multiple colors are set, use different colors per column
175 color = colors[col % len(colors)]
176 self._painter.setPen(color)
177
178 ctx = QAbstractTextDocumentLayout.PaintContext()
179 ctx.palette.setColor(QPalette.Text, self._painter.pen().color())
180 if clip.isValid():
181 self._painter.setClipRect(clip)
182 ctx.clip = clip
183 self._doc.documentLayout().draw(self._painter, ctx)
184 self._painter.restore()
185
186 def _get_textdoc(self, index):
187 """Create the QTextDocument of an item.
188
189 Args:
190 index: The QModelIndex of the item to draw.
191 """
192 # FIXME we probably should do eliding here. See
193 # qcommonstyle.cpp:viewItemDrawText
194 # https://github.com/qutebrowser/qutebrowser/issues/118
195 text_option = QTextOption()
196 if self._opt.features & QStyleOptionViewItem.WrapText:
197 text_option.setWrapMode(QTextOption.WordWrap)
198 else:
199 text_option.setWrapMode(QTextOption.ManualWrap)
200 text_option.setTextDirection(self._opt.direction)
201 text_option.setAlignment(QStyle.visualAlignment(
202 self._opt.direction, self._opt.displayAlignment))
203
204 if self._doc is not None:
205 self._doc.deleteLater()
206 self._doc = QTextDocument(self)
207 self._doc.setDefaultFont(self._opt.font)
208 self._doc.setDefaultTextOption(text_option)
209 self._doc.setDocumentMargin(2)
210
211 if index.parent().isValid():
212 view = self.parent()
213 pattern = view.pattern
214 columns_to_filter = index.model().columns_to_filter(index)
215 self._doc.setPlainText(self._opt.text)
216 if index.column() in columns_to_filter and pattern:
217 pat = re.escape(pattern).replace(r'\ ', r'|')
218 _Highlighter(self._doc, pat,
219 config.val.colors.completion.match.fg)
220 else:
221 self._doc.setHtml(
222 '<span style="font: {};">{}</span>'.format(
223 html.escape(config.val.fonts.completion.category),
224 html.escape(self._opt.text)))
225
226 def _draw_focus_rect(self):
227 """Draw the focus rectangle of an ItemViewItem."""
228 state = self._opt.state
229 if not state & QStyle.State_HasFocus:
230 return
231 o = self._opt
232 o.rect = self._style.subElementRect(
233 self._style.SE_ItemViewItemFocusRect, self._opt, self._opt.widget)
234 o.state |= QStyle.State_KeyboardFocusChange | QStyle.State_Item
235 qtutils.ensure_valid(o.rect)
236 if state & QStyle.State_Enabled:
237 cg = QPalette.Normal
238 else:
239 cg = QPalette.Disabled
240 if state & QStyle.State_Selected:
241 role = QPalette.Highlight
242 else:
243 role = QPalette.Window
244 o.backgroundColor = self._opt.palette.color(cg, role)
245 self._style.drawPrimitive(QStyle.PE_FrameFocusRect, o, self._painter,
246 self._opt.widget)
247
248 def sizeHint(self, option, index):
249 """Override sizeHint of QStyledItemDelegate.
250
251 Return the cell size based on the QTextDocument size, but might not
252 work correctly yet.
253
254 Args:
255 option: const QStyleOptionViewItem & option
256 index: const QModelIndex & index
257
258 Return:
259 A QSize with the recommended size.
260 """
261 value = index.data(Qt.SizeHintRole)
262 if value is not None:
263 return value
264 self._opt = QStyleOptionViewItem(option)
265 self.initStyleOption(self._opt, index)
266 self._style = self._opt.widget.style()
267 self._get_textdoc(index)
268 docsize = self._doc.size().toSize()
269 size = self._style.sizeFromContents(QStyle.CT_ItemViewItem, self._opt,
270 docsize, self._opt.widget)
271 qtutils.ensure_valid(size)
272 return size + QSize(10, 3)
273
274 def paint(self, painter, option, index):
275 """Override the QStyledItemDelegate paint function.
276
277 Args:
278 painter: QPainter * painter
279 option: const QStyleOptionViewItem & option
280 index: const QModelIndex & index
281 """
282 self._painter = painter
283 self._painter.save()
284 self._opt = QStyleOptionViewItem(option)
285 self.initStyleOption(self._opt, index)
286 self._style = self._opt.widget.style()
287
288 self._draw_background()
289 self._draw_icon()
290 self._draw_text(index)
291 self._draw_focus_rect()
292
293 self._painter.restore()
294
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutebrowser/completion/completiondelegate.py b/qutebrowser/completion/completiondelegate.py
--- a/qutebrowser/completion/completiondelegate.py
+++ b/qutebrowser/completion/completiondelegate.py
@@ -212,11 +212,11 @@
view = self.parent()
pattern = view.pattern
columns_to_filter = index.model().columns_to_filter(index)
- self._doc.setPlainText(self._opt.text)
if index.column() in columns_to_filter and pattern:
pat = re.escape(pattern).replace(r'\ ', r'|')
_Highlighter(self._doc, pat,
config.val.colors.completion.match.fg)
+ self._doc.setPlainText(self._opt.text)
else:
self._doc.setHtml(
'<span style="font: {};">{}</span>'.format(
| {"golden_diff": "diff --git a/qutebrowser/completion/completiondelegate.py b/qutebrowser/completion/completiondelegate.py\n--- a/qutebrowser/completion/completiondelegate.py\n+++ b/qutebrowser/completion/completiondelegate.py\n@@ -212,11 +212,11 @@\n view = self.parent()\n pattern = view.pattern\n columns_to_filter = index.model().columns_to_filter(index)\n- self._doc.setPlainText(self._opt.text)\n if index.column() in columns_to_filter and pattern:\n pat = re.escape(pattern).replace(r'\\ ', r'|')\n _Highlighter(self._doc, pat,\n config.val.colors.completion.match.fg)\n+ self._doc.setPlainText(self._opt.text)\n else:\n self._doc.setHtml(\n '<span style=\"font: {};\">{}</span>'.format(\n", "issue": "Match color does not show up in completion\n**Version info (see `:version`)**:\r\nBroken:\r\n- https://paste.the-compiler.org/view/13d147a2\r\n- https://paste.the-compiler.org/view/afa000e3\r\n\r\nWorking:\r\n- https://paste.the-compiler.org/view/fcd00119\r\n\r\n**Does the bug happen if you start with `--temp-basedir`?** (if applicable):\r\nYes\r\n\r\n**Description**\r\nI don't have enough time to debug this issue myself anymore so I'm filing an issue\r\n\r\nOn master, since switching to `QSyntaxHighlighter` on #4220, matches no longer show up for me on qt 5.11.3. qt 5.12 and 5.11.1 seems fine.\r\n\r\nBreaking the SyntaxHighlighter so that it highlights everything does not help, so I think this is some issue we're triggering in qt...\r\n\r\nBroken:\r\n\r\n\r\nWorking:\r\n\r\n\r\n\r\n\r\n**How to reproduce**\r\n<!-- Link to the affected site, or steps to reproduce the issue\r\n(if possible/applicable). -->\r\n\r\ntype `:version` and expect a match.\r\n\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completion item delegate for CompletionView.\n\nWe use this to be able to highlight parts of the text.\n\"\"\"\n\nimport re\nimport html\n\nfrom PyQt5.QtWidgets import QStyle, QStyleOptionViewItem, QStyledItemDelegate\nfrom PyQt5.QtCore import QRectF, QSize, Qt\nfrom PyQt5.QtGui import (QIcon, QPalette, QTextDocument, QTextOption,\n QAbstractTextDocumentLayout, QSyntaxHighlighter,\n QTextCharFormat)\n\nfrom qutebrowser.config import config\nfrom qutebrowser.utils import qtutils\n\n\nclass _Highlighter(QSyntaxHighlighter):\n\n def __init__(self, doc, pattern, color):\n super().__init__(doc)\n self._format = QTextCharFormat()\n self._format.setForeground(color)\n self._pattern = pattern\n\n def highlightBlock(self, text):\n \"\"\"Override highlightBlock for custom highlighting.\"\"\"\n for match in re.finditer(self._pattern, text, re.IGNORECASE):\n start, end = match.span()\n length = end - start\n self.setFormat(start, length, self._format)\n\n\nclass CompletionItemDelegate(QStyledItemDelegate):\n\n \"\"\"Delegate used by CompletionView to draw individual items.\n\n Mainly a cleaned up port of Qt's way to draw a TreeView item, except it\n uses a QTextDocument to draw the text and add marking.\n\n Original implementation:\n qt/src/gui/styles/qcommonstyle.cpp:drawControl:2153\n\n Attributes:\n _opt: The QStyleOptionViewItem which is used.\n _style: The style to be used.\n _painter: The QPainter to be used.\n _doc: The QTextDocument to be used.\n \"\"\"\n\n # FIXME this is horribly slow when resizing.\n # We should probably cache something in _get_textdoc or so, but as soon as\n # we implement eliding that cache probably isn't worth much anymore...\n # https://github.com/qutebrowser/qutebrowser/issues/121\n\n def __init__(self, parent=None):\n self._painter = None\n self._opt = None\n self._doc = None\n self._style = None\n super().__init__(parent)\n\n def _draw_background(self):\n \"\"\"Draw the background of an ItemViewItem.\"\"\"\n self._style.drawPrimitive(self._style.PE_PanelItemViewItem, self._opt,\n self._painter, self._opt.widget)\n\n def _draw_icon(self):\n \"\"\"Draw the icon of an ItemViewItem.\"\"\"\n icon_rect = self._style.subElementRect(\n self._style.SE_ItemViewItemDecoration, self._opt, self._opt.widget)\n if not icon_rect.isValid():\n # The rect seems to be wrong in all kind of ways if no icon should\n # be displayed.\n return\n\n mode = QIcon.Normal\n if not self._opt.state & QStyle.State_Enabled:\n mode = QIcon.Disabled\n elif self._opt.state & QStyle.State_Selected:\n mode = QIcon.Selected\n state = QIcon.On if self._opt.state & QStyle.State_Open else QIcon.Off\n self._opt.icon.paint(self._painter, icon_rect,\n self._opt.decorationAlignment, mode, state)\n\n def _draw_text(self, index):\n \"\"\"Draw the text of an ItemViewItem.\n\n This is the main part where we differ from the original implementation\n in Qt: We use a QTextDocument to draw text.\n\n Args:\n index: The QModelIndex of the item to draw.\n \"\"\"\n if not self._opt.text:\n return\n\n text_rect_ = self._style.subElementRect(\n self._style.SE_ItemViewItemText, self._opt, self._opt.widget)\n qtutils.ensure_valid(text_rect_)\n margin = self._style.pixelMetric(QStyle.PM_FocusFrameHMargin,\n self._opt, self._opt.widget) + 1\n # remove width padding\n text_rect = text_rect_.adjusted(margin, 0, -margin, 0)\n qtutils.ensure_valid(text_rect)\n # move text upwards a bit\n if index.parent().isValid():\n text_rect.adjust(0, -1, 0, -1)\n else:\n text_rect.adjust(0, -2, 0, -2)\n self._painter.save()\n state = self._opt.state\n if state & QStyle.State_Enabled and state & QStyle.State_Active:\n cg = QPalette.Normal\n elif state & QStyle.State_Enabled:\n cg = QPalette.Inactive\n else:\n cg = QPalette.Disabled\n\n if state & QStyle.State_Selected:\n self._painter.setPen(self._opt.palette.color(\n cg, QPalette.HighlightedText))\n # This is a dirty fix for the text jumping by one pixel for\n # whatever reason.\n text_rect.adjust(0, -1, 0, 0)\n else:\n self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))\n\n if state & QStyle.State_Editing:\n self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))\n self._painter.drawRect(text_rect_.adjusted(0, 0, -1, -1))\n\n self._painter.translate(text_rect.left(), text_rect.top())\n self._get_textdoc(index)\n self._draw_textdoc(text_rect, index.column())\n self._painter.restore()\n\n def _draw_textdoc(self, rect, col):\n \"\"\"Draw the QTextDocument of an item.\n\n Args:\n rect: The QRect to clip the drawing to.\n \"\"\"\n # We can't use drawContents because then the color would be ignored.\n clip = QRectF(0, 0, rect.width(), rect.height())\n self._painter.save()\n\n if self._opt.state & QStyle.State_Selected:\n color = config.cache['colors.completion.item.selected.fg']\n elif not self._opt.state & QStyle.State_Enabled:\n color = config.cache['colors.completion.category.fg']\n else:\n colors = config.cache['colors.completion.fg']\n # if multiple colors are set, use different colors per column\n color = colors[col % len(colors)]\n self._painter.setPen(color)\n\n ctx = QAbstractTextDocumentLayout.PaintContext()\n ctx.palette.setColor(QPalette.Text, self._painter.pen().color())\n if clip.isValid():\n self._painter.setClipRect(clip)\n ctx.clip = clip\n self._doc.documentLayout().draw(self._painter, ctx)\n self._painter.restore()\n\n def _get_textdoc(self, index):\n \"\"\"Create the QTextDocument of an item.\n\n Args:\n index: The QModelIndex of the item to draw.\n \"\"\"\n # FIXME we probably should do eliding here. See\n # qcommonstyle.cpp:viewItemDrawText\n # https://github.com/qutebrowser/qutebrowser/issues/118\n text_option = QTextOption()\n if self._opt.features & QStyleOptionViewItem.WrapText:\n text_option.setWrapMode(QTextOption.WordWrap)\n else:\n text_option.setWrapMode(QTextOption.ManualWrap)\n text_option.setTextDirection(self._opt.direction)\n text_option.setAlignment(QStyle.visualAlignment(\n self._opt.direction, self._opt.displayAlignment))\n\n if self._doc is not None:\n self._doc.deleteLater()\n self._doc = QTextDocument(self)\n self._doc.setDefaultFont(self._opt.font)\n self._doc.setDefaultTextOption(text_option)\n self._doc.setDocumentMargin(2)\n\n if index.parent().isValid():\n view = self.parent()\n pattern = view.pattern\n columns_to_filter = index.model().columns_to_filter(index)\n self._doc.setPlainText(self._opt.text)\n if index.column() in columns_to_filter and pattern:\n pat = re.escape(pattern).replace(r'\\ ', r'|')\n _Highlighter(self._doc, pat,\n config.val.colors.completion.match.fg)\n else:\n self._doc.setHtml(\n '<span style=\"font: {};\">{}</span>'.format(\n html.escape(config.val.fonts.completion.category),\n html.escape(self._opt.text)))\n\n def _draw_focus_rect(self):\n \"\"\"Draw the focus rectangle of an ItemViewItem.\"\"\"\n state = self._opt.state\n if not state & QStyle.State_HasFocus:\n return\n o = self._opt\n o.rect = self._style.subElementRect(\n self._style.SE_ItemViewItemFocusRect, self._opt, self._opt.widget)\n o.state |= QStyle.State_KeyboardFocusChange | QStyle.State_Item\n qtutils.ensure_valid(o.rect)\n if state & QStyle.State_Enabled:\n cg = QPalette.Normal\n else:\n cg = QPalette.Disabled\n if state & QStyle.State_Selected:\n role = QPalette.Highlight\n else:\n role = QPalette.Window\n o.backgroundColor = self._opt.palette.color(cg, role)\n self._style.drawPrimitive(QStyle.PE_FrameFocusRect, o, self._painter,\n self._opt.widget)\n\n def sizeHint(self, option, index):\n \"\"\"Override sizeHint of QStyledItemDelegate.\n\n Return the cell size based on the QTextDocument size, but might not\n work correctly yet.\n\n Args:\n option: const QStyleOptionViewItem & option\n index: const QModelIndex & index\n\n Return:\n A QSize with the recommended size.\n \"\"\"\n value = index.data(Qt.SizeHintRole)\n if value is not None:\n return value\n self._opt = QStyleOptionViewItem(option)\n self.initStyleOption(self._opt, index)\n self._style = self._opt.widget.style()\n self._get_textdoc(index)\n docsize = self._doc.size().toSize()\n size = self._style.sizeFromContents(QStyle.CT_ItemViewItem, self._opt,\n docsize, self._opt.widget)\n qtutils.ensure_valid(size)\n return size + QSize(10, 3)\n\n def paint(self, painter, option, index):\n \"\"\"Override the QStyledItemDelegate paint function.\n\n Args:\n painter: QPainter * painter\n option: const QStyleOptionViewItem & option\n index: const QModelIndex & index\n \"\"\"\n self._painter = painter\n self._painter.save()\n self._opt = QStyleOptionViewItem(option)\n self.initStyleOption(self._opt, index)\n self._style = self._opt.widget.style()\n\n self._draw_background()\n self._draw_icon()\n self._draw_text(index)\n self._draw_focus_rect()\n\n self._painter.restore()\n", "path": "qutebrowser/completion/completiondelegate.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completion item delegate for CompletionView.\n\nWe use this to be able to highlight parts of the text.\n\"\"\"\n\nimport re\nimport html\n\nfrom PyQt5.QtWidgets import QStyle, QStyleOptionViewItem, QStyledItemDelegate\nfrom PyQt5.QtCore import QRectF, QSize, Qt\nfrom PyQt5.QtGui import (QIcon, QPalette, QTextDocument, QTextOption,\n QAbstractTextDocumentLayout, QSyntaxHighlighter,\n QTextCharFormat)\n\nfrom qutebrowser.config import config\nfrom qutebrowser.utils import qtutils\n\n\nclass _Highlighter(QSyntaxHighlighter):\n\n def __init__(self, doc, pattern, color):\n super().__init__(doc)\n self._format = QTextCharFormat()\n self._format.setForeground(color)\n self._pattern = pattern\n\n def highlightBlock(self, text):\n \"\"\"Override highlightBlock for custom highlighting.\"\"\"\n for match in re.finditer(self._pattern, text, re.IGNORECASE):\n start, end = match.span()\n length = end - start\n self.setFormat(start, length, self._format)\n\n\nclass CompletionItemDelegate(QStyledItemDelegate):\n\n \"\"\"Delegate used by CompletionView to draw individual items.\n\n Mainly a cleaned up port of Qt's way to draw a TreeView item, except it\n uses a QTextDocument to draw the text and add marking.\n\n Original implementation:\n qt/src/gui/styles/qcommonstyle.cpp:drawControl:2153\n\n Attributes:\n _opt: The QStyleOptionViewItem which is used.\n _style: The style to be used.\n _painter: The QPainter to be used.\n _doc: The QTextDocument to be used.\n \"\"\"\n\n # FIXME this is horribly slow when resizing.\n # We should probably cache something in _get_textdoc or so, but as soon as\n # we implement eliding that cache probably isn't worth much anymore...\n # https://github.com/qutebrowser/qutebrowser/issues/121\n\n def __init__(self, parent=None):\n self._painter = None\n self._opt = None\n self._doc = None\n self._style = None\n super().__init__(parent)\n\n def _draw_background(self):\n \"\"\"Draw the background of an ItemViewItem.\"\"\"\n self._style.drawPrimitive(self._style.PE_PanelItemViewItem, self._opt,\n self._painter, self._opt.widget)\n\n def _draw_icon(self):\n \"\"\"Draw the icon of an ItemViewItem.\"\"\"\n icon_rect = self._style.subElementRect(\n self._style.SE_ItemViewItemDecoration, self._opt, self._opt.widget)\n if not icon_rect.isValid():\n # The rect seems to be wrong in all kind of ways if no icon should\n # be displayed.\n return\n\n mode = QIcon.Normal\n if not self._opt.state & QStyle.State_Enabled:\n mode = QIcon.Disabled\n elif self._opt.state & QStyle.State_Selected:\n mode = QIcon.Selected\n state = QIcon.On if self._opt.state & QStyle.State_Open else QIcon.Off\n self._opt.icon.paint(self._painter, icon_rect,\n self._opt.decorationAlignment, mode, state)\n\n def _draw_text(self, index):\n \"\"\"Draw the text of an ItemViewItem.\n\n This is the main part where we differ from the original implementation\n in Qt: We use a QTextDocument to draw text.\n\n Args:\n index: The QModelIndex of the item to draw.\n \"\"\"\n if not self._opt.text:\n return\n\n text_rect_ = self._style.subElementRect(\n self._style.SE_ItemViewItemText, self._opt, self._opt.widget)\n qtutils.ensure_valid(text_rect_)\n margin = self._style.pixelMetric(QStyle.PM_FocusFrameHMargin,\n self._opt, self._opt.widget) + 1\n # remove width padding\n text_rect = text_rect_.adjusted(margin, 0, -margin, 0)\n qtutils.ensure_valid(text_rect)\n # move text upwards a bit\n if index.parent().isValid():\n text_rect.adjust(0, -1, 0, -1)\n else:\n text_rect.adjust(0, -2, 0, -2)\n self._painter.save()\n state = self._opt.state\n if state & QStyle.State_Enabled and state & QStyle.State_Active:\n cg = QPalette.Normal\n elif state & QStyle.State_Enabled:\n cg = QPalette.Inactive\n else:\n cg = QPalette.Disabled\n\n if state & QStyle.State_Selected:\n self._painter.setPen(self._opt.palette.color(\n cg, QPalette.HighlightedText))\n # This is a dirty fix for the text jumping by one pixel for\n # whatever reason.\n text_rect.adjust(0, -1, 0, 0)\n else:\n self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))\n\n if state & QStyle.State_Editing:\n self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))\n self._painter.drawRect(text_rect_.adjusted(0, 0, -1, -1))\n\n self._painter.translate(text_rect.left(), text_rect.top())\n self._get_textdoc(index)\n self._draw_textdoc(text_rect, index.column())\n self._painter.restore()\n\n def _draw_textdoc(self, rect, col):\n \"\"\"Draw the QTextDocument of an item.\n\n Args:\n rect: The QRect to clip the drawing to.\n \"\"\"\n # We can't use drawContents because then the color would be ignored.\n clip = QRectF(0, 0, rect.width(), rect.height())\n self._painter.save()\n\n if self._opt.state & QStyle.State_Selected:\n color = config.cache['colors.completion.item.selected.fg']\n elif not self._opt.state & QStyle.State_Enabled:\n color = config.cache['colors.completion.category.fg']\n else:\n colors = config.cache['colors.completion.fg']\n # if multiple colors are set, use different colors per column\n color = colors[col % len(colors)]\n self._painter.setPen(color)\n\n ctx = QAbstractTextDocumentLayout.PaintContext()\n ctx.palette.setColor(QPalette.Text, self._painter.pen().color())\n if clip.isValid():\n self._painter.setClipRect(clip)\n ctx.clip = clip\n self._doc.documentLayout().draw(self._painter, ctx)\n self._painter.restore()\n\n def _get_textdoc(self, index):\n \"\"\"Create the QTextDocument of an item.\n\n Args:\n index: The QModelIndex of the item to draw.\n \"\"\"\n # FIXME we probably should do eliding here. See\n # qcommonstyle.cpp:viewItemDrawText\n # https://github.com/qutebrowser/qutebrowser/issues/118\n text_option = QTextOption()\n if self._opt.features & QStyleOptionViewItem.WrapText:\n text_option.setWrapMode(QTextOption.WordWrap)\n else:\n text_option.setWrapMode(QTextOption.ManualWrap)\n text_option.setTextDirection(self._opt.direction)\n text_option.setAlignment(QStyle.visualAlignment(\n self._opt.direction, self._opt.displayAlignment))\n\n if self._doc is not None:\n self._doc.deleteLater()\n self._doc = QTextDocument(self)\n self._doc.setDefaultFont(self._opt.font)\n self._doc.setDefaultTextOption(text_option)\n self._doc.setDocumentMargin(2)\n\n if index.parent().isValid():\n view = self.parent()\n pattern = view.pattern\n columns_to_filter = index.model().columns_to_filter(index)\n if index.column() in columns_to_filter and pattern:\n pat = re.escape(pattern).replace(r'\\ ', r'|')\n _Highlighter(self._doc, pat,\n config.val.colors.completion.match.fg)\n self._doc.setPlainText(self._opt.text)\n else:\n self._doc.setHtml(\n '<span style=\"font: {};\">{}</span>'.format(\n html.escape(config.val.fonts.completion.category),\n html.escape(self._opt.text)))\n\n def _draw_focus_rect(self):\n \"\"\"Draw the focus rectangle of an ItemViewItem.\"\"\"\n state = self._opt.state\n if not state & QStyle.State_HasFocus:\n return\n o = self._opt\n o.rect = self._style.subElementRect(\n self._style.SE_ItemViewItemFocusRect, self._opt, self._opt.widget)\n o.state |= QStyle.State_KeyboardFocusChange | QStyle.State_Item\n qtutils.ensure_valid(o.rect)\n if state & QStyle.State_Enabled:\n cg = QPalette.Normal\n else:\n cg = QPalette.Disabled\n if state & QStyle.State_Selected:\n role = QPalette.Highlight\n else:\n role = QPalette.Window\n o.backgroundColor = self._opt.palette.color(cg, role)\n self._style.drawPrimitive(QStyle.PE_FrameFocusRect, o, self._painter,\n self._opt.widget)\n\n def sizeHint(self, option, index):\n \"\"\"Override sizeHint of QStyledItemDelegate.\n\n Return the cell size based on the QTextDocument size, but might not\n work correctly yet.\n\n Args:\n option: const QStyleOptionViewItem & option\n index: const QModelIndex & index\n\n Return:\n A QSize with the recommended size.\n \"\"\"\n value = index.data(Qt.SizeHintRole)\n if value is not None:\n return value\n self._opt = QStyleOptionViewItem(option)\n self.initStyleOption(self._opt, index)\n self._style = self._opt.widget.style()\n self._get_textdoc(index)\n docsize = self._doc.size().toSize()\n size = self._style.sizeFromContents(QStyle.CT_ItemViewItem, self._opt,\n docsize, self._opt.widget)\n qtutils.ensure_valid(size)\n return size + QSize(10, 3)\n\n def paint(self, painter, option, index):\n \"\"\"Override the QStyledItemDelegate paint function.\n\n Args:\n painter: QPainter * painter\n option: const QStyleOptionViewItem & option\n index: const QModelIndex & index\n \"\"\"\n self._painter = painter\n self._painter.save()\n self._opt = QStyleOptionViewItem(option)\n self.initStyleOption(self._opt, index)\n self._style = self._opt.widget.style()\n\n self._draw_background()\n self._draw_icon()\n self._draw_text(index)\n self._draw_focus_rect()\n\n self._painter.restore()\n", "path": "qutebrowser/completion/completiondelegate.py"}]} | 3,985 | 184 |
gh_patches_debug_13882 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-6032 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken command argument type parsing
#### Problem Description
It seems like our command argument type parsing does not like `from __future__ import annotations`.
#### Steps to reproduce the behavior:
1. `mitmproxy`
2. `[n] [enter] [r]`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/command.py`
Content:
```
1 """
2 This module manages and invokes typed commands.
3 """
4 import functools
5 import inspect
6 import logging
7 import sys
8 import textwrap
9 import types
10 from collections.abc import Callable
11 from collections.abc import Iterable
12 from collections.abc import Sequence
13 from typing import Any
14 from typing import NamedTuple
15
16 import pyparsing
17
18 import mitmproxy.types
19 from mitmproxy import command_lexer
20 from mitmproxy import exceptions
21 from mitmproxy.command_lexer import unquote
22
23
24 def verify_arg_signature(f: Callable, args: Iterable[Any], kwargs: dict) -> None:
25 sig = inspect.signature(f)
26 try:
27 sig.bind(*args, **kwargs)
28 except TypeError as v:
29 raise exceptions.CommandError("command argument mismatch: %s" % v.args[0])
30
31
32 def typename(t: type) -> str:
33 """
34 Translates a type to an explanatory string.
35 """
36 if t == inspect._empty: # type: ignore
37 raise exceptions.CommandError("missing type annotation")
38 to = mitmproxy.types.CommandTypes.get(t, None)
39 if not to:
40 raise exceptions.CommandError(
41 "unsupported type: %s" % getattr(t, "__name__", t)
42 )
43 return to.display
44
45
46 def _empty_as_none(x: Any) -> Any:
47 if x == inspect.Signature.empty:
48 return None
49 return x
50
51
52 class CommandParameter(NamedTuple):
53 name: str
54 type: type
55 kind: inspect._ParameterKind = inspect.Parameter.POSITIONAL_OR_KEYWORD
56
57 def __str__(self):
58 if self.kind is inspect.Parameter.VAR_POSITIONAL:
59 return f"*{self.name}"
60 else:
61 return self.name
62
63
64 class Command:
65 name: str
66 manager: "CommandManager"
67 signature: inspect.Signature
68 help: str | None
69
70 def __init__(self, manager: "CommandManager", name: str, func: Callable) -> None:
71 self.name = name
72 self.manager = manager
73 self.func = func
74 self.signature = inspect.signature(self.func)
75
76 if func.__doc__:
77 txt = func.__doc__.strip()
78 self.help = "\n".join(textwrap.wrap(txt))
79 else:
80 self.help = None
81
82 # This fails with a CommandException if types are invalid
83 for name, parameter in self.signature.parameters.items():
84 t = parameter.annotation
85 if not mitmproxy.types.CommandTypes.get(parameter.annotation, None):
86 raise exceptions.CommandError(
87 f"Argument {name} has an unknown type {t} in {func}."
88 )
89 if self.return_type and not mitmproxy.types.CommandTypes.get(
90 self.return_type, None
91 ):
92 raise exceptions.CommandError(
93 f"Return type has an unknown type ({self.return_type}) in {func}."
94 )
95
96 @property
97 def return_type(self) -> type | None:
98 return _empty_as_none(self.signature.return_annotation)
99
100 @property
101 def parameters(self) -> list[CommandParameter]:
102 """Returns a list of CommandParameters."""
103 ret = []
104 for name, param in self.signature.parameters.items():
105 ret.append(CommandParameter(name, param.annotation, param.kind))
106 return ret
107
108 def signature_help(self) -> str:
109 params = " ".join(str(param) for param in self.parameters)
110 if self.return_type:
111 ret = f" -> {typename(self.return_type)}"
112 else:
113 ret = ""
114 return f"{self.name} {params}{ret}"
115
116 def prepare_args(self, args: Sequence[str]) -> inspect.BoundArguments:
117 try:
118 bound_arguments = self.signature.bind(*args)
119 except TypeError:
120 expected = f"Expected: {str(self.signature.parameters)}"
121 received = f"Received: {str(args)}"
122 raise exceptions.CommandError(
123 f"Command argument mismatch: \n {expected}\n {received}"
124 )
125
126 for name, value in bound_arguments.arguments.items():
127 param = self.signature.parameters[name]
128 convert_to = param.annotation
129 if param.kind == param.VAR_POSITIONAL:
130 bound_arguments.arguments[name] = tuple(
131 parsearg(self.manager, x, convert_to) for x in value
132 )
133 else:
134 bound_arguments.arguments[name] = parsearg(
135 self.manager, value, convert_to
136 )
137
138 bound_arguments.apply_defaults()
139
140 return bound_arguments
141
142 def call(self, args: Sequence[str]) -> Any:
143 """
144 Call the command with a list of arguments. At this point, all
145 arguments are strings.
146 """
147 bound_args = self.prepare_args(args)
148 ret = self.func(*bound_args.args, **bound_args.kwargs)
149 if ret is None and self.return_type is None:
150 return
151 typ = mitmproxy.types.CommandTypes.get(self.return_type)
152 assert typ
153 if not typ.is_valid(self.manager, typ, ret):
154 raise exceptions.CommandError(
155 f"{self.name} returned unexpected data - expected {typ.display}"
156 )
157 return ret
158
159
160 class ParseResult(NamedTuple):
161 value: str
162 type: type
163 valid: bool
164
165
166 class CommandManager:
167 commands: dict[str, Command]
168
169 def __init__(self, master):
170 self.master = master
171 self.commands = {}
172
173 def collect_commands(self, addon):
174 for i in dir(addon):
175 if not i.startswith("__"):
176 o = getattr(addon, i)
177 try:
178 # hasattr is not enough, see https://github.com/mitmproxy/mitmproxy/issues/3794
179 is_command = isinstance(getattr(o, "command_name", None), str)
180 except Exception:
181 pass # getattr may raise if o implements __getattr__.
182 else:
183 if is_command:
184 try:
185 self.add(o.command_name, o)
186 except exceptions.CommandError as e:
187 logging.warning(
188 f"Could not load command {o.command_name}: {e}"
189 )
190
191 def add(self, path: str, func: Callable):
192 self.commands[path] = Command(self, path, func)
193
194 @functools.lru_cache(maxsize=128)
195 def parse_partial(
196 self, cmdstr: str
197 ) -> tuple[Sequence[ParseResult], Sequence[CommandParameter]]:
198 """
199 Parse a possibly partial command. Return a sequence of ParseResults and a sequence of remainder type help items.
200 """
201
202 parts: pyparsing.ParseResults = command_lexer.expr.parseString(
203 cmdstr, parseAll=True
204 )
205
206 parsed: list[ParseResult] = []
207 next_params: list[CommandParameter] = [
208 CommandParameter("", mitmproxy.types.Cmd),
209 CommandParameter("", mitmproxy.types.CmdArgs),
210 ]
211 expected: CommandParameter | None = None
212 for part in parts:
213 if part.isspace():
214 parsed.append(
215 ParseResult(
216 value=part,
217 type=mitmproxy.types.Space,
218 valid=True,
219 )
220 )
221 continue
222
223 if expected and expected.kind is inspect.Parameter.VAR_POSITIONAL:
224 assert not next_params
225 elif next_params:
226 expected = next_params.pop(0)
227 else:
228 expected = CommandParameter("", mitmproxy.types.Unknown)
229
230 arg_is_known_command = (
231 expected.type == mitmproxy.types.Cmd and part in self.commands
232 )
233 arg_is_unknown_command = (
234 expected.type == mitmproxy.types.Cmd and part not in self.commands
235 )
236 command_args_following = (
237 next_params and next_params[0].type == mitmproxy.types.CmdArgs
238 )
239 if arg_is_known_command and command_args_following:
240 next_params = self.commands[part].parameters + next_params[1:]
241 if arg_is_unknown_command and command_args_following:
242 next_params.pop(0)
243
244 to = mitmproxy.types.CommandTypes.get(expected.type, None)
245 valid = False
246 if to:
247 try:
248 to.parse(self, expected.type, part)
249 except ValueError:
250 valid = False
251 else:
252 valid = True
253
254 parsed.append(
255 ParseResult(
256 value=part,
257 type=expected.type,
258 valid=valid,
259 )
260 )
261
262 return parsed, next_params
263
264 def call(self, command_name: str, *args: Any) -> Any:
265 """
266 Call a command with native arguments. May raise CommandError.
267 """
268 if command_name not in self.commands:
269 raise exceptions.CommandError("Unknown command: %s" % command_name)
270 return self.commands[command_name].func(*args)
271
272 def call_strings(self, command_name: str, args: Sequence[str]) -> Any:
273 """
274 Call a command using a list of string arguments. May raise CommandError.
275 """
276 if command_name not in self.commands:
277 raise exceptions.CommandError("Unknown command: %s" % command_name)
278
279 return self.commands[command_name].call(args)
280
281 def execute(self, cmdstr: str) -> Any:
282 """
283 Execute a command string. May raise CommandError.
284 """
285 parts, _ = self.parse_partial(cmdstr)
286 if not parts:
287 raise exceptions.CommandError(f"Invalid command: {cmdstr!r}")
288 command_name, *args = (
289 unquote(part.value) for part in parts if part.type != mitmproxy.types.Space
290 )
291 return self.call_strings(command_name, args)
292
293 def dump(self, out=sys.stdout) -> None:
294 cmds = list(self.commands.values())
295 cmds.sort(key=lambda x: x.signature_help())
296 for c in cmds:
297 for hl in (c.help or "").splitlines():
298 print("# " + hl, file=out)
299 print(c.signature_help(), file=out)
300 print(file=out)
301
302
303 def parsearg(manager: CommandManager, spec: str, argtype: type) -> Any:
304 """
305 Convert a string to a argument to the appropriate type.
306 """
307 t = mitmproxy.types.CommandTypes.get(argtype, None)
308 if not t:
309 raise exceptions.CommandError(f"Unsupported argument type: {argtype}")
310 try:
311 return t.parse(manager, argtype, spec)
312 except ValueError as e:
313 raise exceptions.CommandError(str(e)) from e
314
315
316 def command(name: str | None = None):
317 def decorator(function):
318 @functools.wraps(function)
319 def wrapper(*args, **kwargs):
320 verify_arg_signature(function, args, kwargs)
321 return function(*args, **kwargs)
322
323 wrapper.__dict__["command_name"] = name or function.__name__.replace("_", ".")
324 return wrapper
325
326 return decorator
327
328
329 def argument(name, type):
330 """
331 Set the type of a command argument at runtime. This is useful for more
332 specific types such as mitmproxy.types.Choice, which we cannot annotate
333 directly as mypy does not like that.
334 """
335
336 def decorator(f: types.FunctionType) -> types.FunctionType:
337 assert name in f.__annotations__
338 f.__annotations__[name] = type
339 return f
340
341 return decorator
342
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/command.py b/mitmproxy/command.py
--- a/mitmproxy/command.py
+++ b/mitmproxy/command.py
@@ -22,7 +22,7 @@
def verify_arg_signature(f: Callable, args: Iterable[Any], kwargs: dict) -> None:
- sig = inspect.signature(f)
+ sig = inspect.signature(f, eval_str=True)
try:
sig.bind(*args, **kwargs)
except TypeError as v:
@@ -71,7 +71,7 @@
self.name = name
self.manager = manager
self.func = func
- self.signature = inspect.signature(self.func)
+ self.signature = inspect.signature(self.func, eval_str=True)
if func.__doc__:
txt = func.__doc__.strip()
| {"golden_diff": "diff --git a/mitmproxy/command.py b/mitmproxy/command.py\n--- a/mitmproxy/command.py\n+++ b/mitmproxy/command.py\n@@ -22,7 +22,7 @@\n \n \n def verify_arg_signature(f: Callable, args: Iterable[Any], kwargs: dict) -> None:\n- sig = inspect.signature(f)\n+ sig = inspect.signature(f, eval_str=True)\n try:\n sig.bind(*args, **kwargs)\n except TypeError as v:\n@@ -71,7 +71,7 @@\n self.name = name\n self.manager = manager\n self.func = func\n- self.signature = inspect.signature(self.func)\n+ self.signature = inspect.signature(self.func, eval_str=True)\n \n if func.__doc__:\n txt = func.__doc__.strip()\n", "issue": "Broken command argument type parsing\n#### Problem Description\r\n\r\nIt seems like our command argument type parsing does not like `from __future__ import annotations`.\r\n\r\n#### Steps to reproduce the behavior:\r\n\r\n1. `mitmproxy`\r\n2. `[n] [enter] [r]`\n", "before_files": [{"content": "\"\"\"\n This module manages and invokes typed commands.\n\"\"\"\nimport functools\nimport inspect\nimport logging\nimport sys\nimport textwrap\nimport types\nfrom collections.abc import Callable\nfrom collections.abc import Iterable\nfrom collections.abc import Sequence\nfrom typing import Any\nfrom typing import NamedTuple\n\nimport pyparsing\n\nimport mitmproxy.types\nfrom mitmproxy import command_lexer\nfrom mitmproxy import exceptions\nfrom mitmproxy.command_lexer import unquote\n\n\ndef verify_arg_signature(f: Callable, args: Iterable[Any], kwargs: dict) -> None:\n sig = inspect.signature(f)\n try:\n sig.bind(*args, **kwargs)\n except TypeError as v:\n raise exceptions.CommandError(\"command argument mismatch: %s\" % v.args[0])\n\n\ndef typename(t: type) -> str:\n \"\"\"\n Translates a type to an explanatory string.\n \"\"\"\n if t == inspect._empty: # type: ignore\n raise exceptions.CommandError(\"missing type annotation\")\n to = mitmproxy.types.CommandTypes.get(t, None)\n if not to:\n raise exceptions.CommandError(\n \"unsupported type: %s\" % getattr(t, \"__name__\", t)\n )\n return to.display\n\n\ndef _empty_as_none(x: Any) -> Any:\n if x == inspect.Signature.empty:\n return None\n return x\n\n\nclass CommandParameter(NamedTuple):\n name: str\n type: type\n kind: inspect._ParameterKind = inspect.Parameter.POSITIONAL_OR_KEYWORD\n\n def __str__(self):\n if self.kind is inspect.Parameter.VAR_POSITIONAL:\n return f\"*{self.name}\"\n else:\n return self.name\n\n\nclass Command:\n name: str\n manager: \"CommandManager\"\n signature: inspect.Signature\n help: str | None\n\n def __init__(self, manager: \"CommandManager\", name: str, func: Callable) -> None:\n self.name = name\n self.manager = manager\n self.func = func\n self.signature = inspect.signature(self.func)\n\n if func.__doc__:\n txt = func.__doc__.strip()\n self.help = \"\\n\".join(textwrap.wrap(txt))\n else:\n self.help = None\n\n # This fails with a CommandException if types are invalid\n for name, parameter in self.signature.parameters.items():\n t = parameter.annotation\n if not mitmproxy.types.CommandTypes.get(parameter.annotation, None):\n raise exceptions.CommandError(\n f\"Argument {name} has an unknown type {t} in {func}.\"\n )\n if self.return_type and not mitmproxy.types.CommandTypes.get(\n self.return_type, None\n ):\n raise exceptions.CommandError(\n f\"Return type has an unknown type ({self.return_type}) in {func}.\"\n )\n\n @property\n def return_type(self) -> type | None:\n return _empty_as_none(self.signature.return_annotation)\n\n @property\n def parameters(self) -> list[CommandParameter]:\n \"\"\"Returns a list of CommandParameters.\"\"\"\n ret = []\n for name, param in self.signature.parameters.items():\n ret.append(CommandParameter(name, param.annotation, param.kind))\n return ret\n\n def signature_help(self) -> str:\n params = \" \".join(str(param) for param in self.parameters)\n if self.return_type:\n ret = f\" -> {typename(self.return_type)}\"\n else:\n ret = \"\"\n return f\"{self.name} {params}{ret}\"\n\n def prepare_args(self, args: Sequence[str]) -> inspect.BoundArguments:\n try:\n bound_arguments = self.signature.bind(*args)\n except TypeError:\n expected = f\"Expected: {str(self.signature.parameters)}\"\n received = f\"Received: {str(args)}\"\n raise exceptions.CommandError(\n f\"Command argument mismatch: \\n {expected}\\n {received}\"\n )\n\n for name, value in bound_arguments.arguments.items():\n param = self.signature.parameters[name]\n convert_to = param.annotation\n if param.kind == param.VAR_POSITIONAL:\n bound_arguments.arguments[name] = tuple(\n parsearg(self.manager, x, convert_to) for x in value\n )\n else:\n bound_arguments.arguments[name] = parsearg(\n self.manager, value, convert_to\n )\n\n bound_arguments.apply_defaults()\n\n return bound_arguments\n\n def call(self, args: Sequence[str]) -> Any:\n \"\"\"\n Call the command with a list of arguments. At this point, all\n arguments are strings.\n \"\"\"\n bound_args = self.prepare_args(args)\n ret = self.func(*bound_args.args, **bound_args.kwargs)\n if ret is None and self.return_type is None:\n return\n typ = mitmproxy.types.CommandTypes.get(self.return_type)\n assert typ\n if not typ.is_valid(self.manager, typ, ret):\n raise exceptions.CommandError(\n f\"{self.name} returned unexpected data - expected {typ.display}\"\n )\n return ret\n\n\nclass ParseResult(NamedTuple):\n value: str\n type: type\n valid: bool\n\n\nclass CommandManager:\n commands: dict[str, Command]\n\n def __init__(self, master):\n self.master = master\n self.commands = {}\n\n def collect_commands(self, addon):\n for i in dir(addon):\n if not i.startswith(\"__\"):\n o = getattr(addon, i)\n try:\n # hasattr is not enough, see https://github.com/mitmproxy/mitmproxy/issues/3794\n is_command = isinstance(getattr(o, \"command_name\", None), str)\n except Exception:\n pass # getattr may raise if o implements __getattr__.\n else:\n if is_command:\n try:\n self.add(o.command_name, o)\n except exceptions.CommandError as e:\n logging.warning(\n f\"Could not load command {o.command_name}: {e}\"\n )\n\n def add(self, path: str, func: Callable):\n self.commands[path] = Command(self, path, func)\n\n @functools.lru_cache(maxsize=128)\n def parse_partial(\n self, cmdstr: str\n ) -> tuple[Sequence[ParseResult], Sequence[CommandParameter]]:\n \"\"\"\n Parse a possibly partial command. Return a sequence of ParseResults and a sequence of remainder type help items.\n \"\"\"\n\n parts: pyparsing.ParseResults = command_lexer.expr.parseString(\n cmdstr, parseAll=True\n )\n\n parsed: list[ParseResult] = []\n next_params: list[CommandParameter] = [\n CommandParameter(\"\", mitmproxy.types.Cmd),\n CommandParameter(\"\", mitmproxy.types.CmdArgs),\n ]\n expected: CommandParameter | None = None\n for part in parts:\n if part.isspace():\n parsed.append(\n ParseResult(\n value=part,\n type=mitmproxy.types.Space,\n valid=True,\n )\n )\n continue\n\n if expected and expected.kind is inspect.Parameter.VAR_POSITIONAL:\n assert not next_params\n elif next_params:\n expected = next_params.pop(0)\n else:\n expected = CommandParameter(\"\", mitmproxy.types.Unknown)\n\n arg_is_known_command = (\n expected.type == mitmproxy.types.Cmd and part in self.commands\n )\n arg_is_unknown_command = (\n expected.type == mitmproxy.types.Cmd and part not in self.commands\n )\n command_args_following = (\n next_params and next_params[0].type == mitmproxy.types.CmdArgs\n )\n if arg_is_known_command and command_args_following:\n next_params = self.commands[part].parameters + next_params[1:]\n if arg_is_unknown_command and command_args_following:\n next_params.pop(0)\n\n to = mitmproxy.types.CommandTypes.get(expected.type, None)\n valid = False\n if to:\n try:\n to.parse(self, expected.type, part)\n except ValueError:\n valid = False\n else:\n valid = True\n\n parsed.append(\n ParseResult(\n value=part,\n type=expected.type,\n valid=valid,\n )\n )\n\n return parsed, next_params\n\n def call(self, command_name: str, *args: Any) -> Any:\n \"\"\"\n Call a command with native arguments. May raise CommandError.\n \"\"\"\n if command_name not in self.commands:\n raise exceptions.CommandError(\"Unknown command: %s\" % command_name)\n return self.commands[command_name].func(*args)\n\n def call_strings(self, command_name: str, args: Sequence[str]) -> Any:\n \"\"\"\n Call a command using a list of string arguments. May raise CommandError.\n \"\"\"\n if command_name not in self.commands:\n raise exceptions.CommandError(\"Unknown command: %s\" % command_name)\n\n return self.commands[command_name].call(args)\n\n def execute(self, cmdstr: str) -> Any:\n \"\"\"\n Execute a command string. May raise CommandError.\n \"\"\"\n parts, _ = self.parse_partial(cmdstr)\n if not parts:\n raise exceptions.CommandError(f\"Invalid command: {cmdstr!r}\")\n command_name, *args = (\n unquote(part.value) for part in parts if part.type != mitmproxy.types.Space\n )\n return self.call_strings(command_name, args)\n\n def dump(self, out=sys.stdout) -> None:\n cmds = list(self.commands.values())\n cmds.sort(key=lambda x: x.signature_help())\n for c in cmds:\n for hl in (c.help or \"\").splitlines():\n print(\"# \" + hl, file=out)\n print(c.signature_help(), file=out)\n print(file=out)\n\n\ndef parsearg(manager: CommandManager, spec: str, argtype: type) -> Any:\n \"\"\"\n Convert a string to a argument to the appropriate type.\n \"\"\"\n t = mitmproxy.types.CommandTypes.get(argtype, None)\n if not t:\n raise exceptions.CommandError(f\"Unsupported argument type: {argtype}\")\n try:\n return t.parse(manager, argtype, spec)\n except ValueError as e:\n raise exceptions.CommandError(str(e)) from e\n\n\ndef command(name: str | None = None):\n def decorator(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n verify_arg_signature(function, args, kwargs)\n return function(*args, **kwargs)\n\n wrapper.__dict__[\"command_name\"] = name or function.__name__.replace(\"_\", \".\")\n return wrapper\n\n return decorator\n\n\ndef argument(name, type):\n \"\"\"\n Set the type of a command argument at runtime. This is useful for more\n specific types such as mitmproxy.types.Choice, which we cannot annotate\n directly as mypy does not like that.\n \"\"\"\n\n def decorator(f: types.FunctionType) -> types.FunctionType:\n assert name in f.__annotations__\n f.__annotations__[name] = type\n return f\n\n return decorator\n", "path": "mitmproxy/command.py"}], "after_files": [{"content": "\"\"\"\n This module manages and invokes typed commands.\n\"\"\"\nimport functools\nimport inspect\nimport logging\nimport sys\nimport textwrap\nimport types\nfrom collections.abc import Callable\nfrom collections.abc import Iterable\nfrom collections.abc import Sequence\nfrom typing import Any\nfrom typing import NamedTuple\n\nimport pyparsing\n\nimport mitmproxy.types\nfrom mitmproxy import command_lexer\nfrom mitmproxy import exceptions\nfrom mitmproxy.command_lexer import unquote\n\n\ndef verify_arg_signature(f: Callable, args: Iterable[Any], kwargs: dict) -> None:\n sig = inspect.signature(f, eval_str=True)\n try:\n sig.bind(*args, **kwargs)\n except TypeError as v:\n raise exceptions.CommandError(\"command argument mismatch: %s\" % v.args[0])\n\n\ndef typename(t: type) -> str:\n \"\"\"\n Translates a type to an explanatory string.\n \"\"\"\n if t == inspect._empty: # type: ignore\n raise exceptions.CommandError(\"missing type annotation\")\n to = mitmproxy.types.CommandTypes.get(t, None)\n if not to:\n raise exceptions.CommandError(\n \"unsupported type: %s\" % getattr(t, \"__name__\", t)\n )\n return to.display\n\n\ndef _empty_as_none(x: Any) -> Any:\n if x == inspect.Signature.empty:\n return None\n return x\n\n\nclass CommandParameter(NamedTuple):\n name: str\n type: type\n kind: inspect._ParameterKind = inspect.Parameter.POSITIONAL_OR_KEYWORD\n\n def __str__(self):\n if self.kind is inspect.Parameter.VAR_POSITIONAL:\n return f\"*{self.name}\"\n else:\n return self.name\n\n\nclass Command:\n name: str\n manager: \"CommandManager\"\n signature: inspect.Signature\n help: str | None\n\n def __init__(self, manager: \"CommandManager\", name: str, func: Callable) -> None:\n self.name = name\n self.manager = manager\n self.func = func\n self.signature = inspect.signature(self.func, eval_str=True)\n\n if func.__doc__:\n txt = func.__doc__.strip()\n self.help = \"\\n\".join(textwrap.wrap(txt))\n else:\n self.help = None\n\n # This fails with a CommandException if types are invalid\n for name, parameter in self.signature.parameters.items():\n t = parameter.annotation\n if not mitmproxy.types.CommandTypes.get(parameter.annotation, None):\n raise exceptions.CommandError(\n f\"Argument {name} has an unknown type {t} in {func}.\"\n )\n if self.return_type and not mitmproxy.types.CommandTypes.get(\n self.return_type, None\n ):\n raise exceptions.CommandError(\n f\"Return type has an unknown type ({self.return_type}) in {func}.\"\n )\n\n @property\n def return_type(self) -> type | None:\n return _empty_as_none(self.signature.return_annotation)\n\n @property\n def parameters(self) -> list[CommandParameter]:\n \"\"\"Returns a list of CommandParameters.\"\"\"\n ret = []\n for name, param in self.signature.parameters.items():\n ret.append(CommandParameter(name, param.annotation, param.kind))\n return ret\n\n def signature_help(self) -> str:\n params = \" \".join(str(param) for param in self.parameters)\n if self.return_type:\n ret = f\" -> {typename(self.return_type)}\"\n else:\n ret = \"\"\n return f\"{self.name} {params}{ret}\"\n\n def prepare_args(self, args: Sequence[str]) -> inspect.BoundArguments:\n try:\n bound_arguments = self.signature.bind(*args)\n except TypeError:\n expected = f\"Expected: {str(self.signature.parameters)}\"\n received = f\"Received: {str(args)}\"\n raise exceptions.CommandError(\n f\"Command argument mismatch: \\n {expected}\\n {received}\"\n )\n\n for name, value in bound_arguments.arguments.items():\n param = self.signature.parameters[name]\n convert_to = param.annotation\n if param.kind == param.VAR_POSITIONAL:\n bound_arguments.arguments[name] = tuple(\n parsearg(self.manager, x, convert_to) for x in value\n )\n else:\n bound_arguments.arguments[name] = parsearg(\n self.manager, value, convert_to\n )\n\n bound_arguments.apply_defaults()\n\n return bound_arguments\n\n def call(self, args: Sequence[str]) -> Any:\n \"\"\"\n Call the command with a list of arguments. At this point, all\n arguments are strings.\n \"\"\"\n bound_args = self.prepare_args(args)\n ret = self.func(*bound_args.args, **bound_args.kwargs)\n if ret is None and self.return_type is None:\n return\n typ = mitmproxy.types.CommandTypes.get(self.return_type)\n assert typ\n if not typ.is_valid(self.manager, typ, ret):\n raise exceptions.CommandError(\n f\"{self.name} returned unexpected data - expected {typ.display}\"\n )\n return ret\n\n\nclass ParseResult(NamedTuple):\n value: str\n type: type\n valid: bool\n\n\nclass CommandManager:\n commands: dict[str, Command]\n\n def __init__(self, master):\n self.master = master\n self.commands = {}\n\n def collect_commands(self, addon):\n for i in dir(addon):\n if not i.startswith(\"__\"):\n o = getattr(addon, i)\n try:\n # hasattr is not enough, see https://github.com/mitmproxy/mitmproxy/issues/3794\n is_command = isinstance(getattr(o, \"command_name\", None), str)\n except Exception:\n pass # getattr may raise if o implements __getattr__.\n else:\n if is_command:\n try:\n self.add(o.command_name, o)\n except exceptions.CommandError as e:\n logging.warning(\n f\"Could not load command {o.command_name}: {e}\"\n )\n\n def add(self, path: str, func: Callable):\n self.commands[path] = Command(self, path, func)\n\n @functools.lru_cache(maxsize=128)\n def parse_partial(\n self, cmdstr: str\n ) -> tuple[Sequence[ParseResult], Sequence[CommandParameter]]:\n \"\"\"\n Parse a possibly partial command. Return a sequence of ParseResults and a sequence of remainder type help items.\n \"\"\"\n\n parts: pyparsing.ParseResults = command_lexer.expr.parseString(\n cmdstr, parseAll=True\n )\n\n parsed: list[ParseResult] = []\n next_params: list[CommandParameter] = [\n CommandParameter(\"\", mitmproxy.types.Cmd),\n CommandParameter(\"\", mitmproxy.types.CmdArgs),\n ]\n expected: CommandParameter | None = None\n for part in parts:\n if part.isspace():\n parsed.append(\n ParseResult(\n value=part,\n type=mitmproxy.types.Space,\n valid=True,\n )\n )\n continue\n\n if expected and expected.kind is inspect.Parameter.VAR_POSITIONAL:\n assert not next_params\n elif next_params:\n expected = next_params.pop(0)\n else:\n expected = CommandParameter(\"\", mitmproxy.types.Unknown)\n\n arg_is_known_command = (\n expected.type == mitmproxy.types.Cmd and part in self.commands\n )\n arg_is_unknown_command = (\n expected.type == mitmproxy.types.Cmd and part not in self.commands\n )\n command_args_following = (\n next_params and next_params[0].type == mitmproxy.types.CmdArgs\n )\n if arg_is_known_command and command_args_following:\n next_params = self.commands[part].parameters + next_params[1:]\n if arg_is_unknown_command and command_args_following:\n next_params.pop(0)\n\n to = mitmproxy.types.CommandTypes.get(expected.type, None)\n valid = False\n if to:\n try:\n to.parse(self, expected.type, part)\n except ValueError:\n valid = False\n else:\n valid = True\n\n parsed.append(\n ParseResult(\n value=part,\n type=expected.type,\n valid=valid,\n )\n )\n\n return parsed, next_params\n\n def call(self, command_name: str, *args: Any) -> Any:\n \"\"\"\n Call a command with native arguments. May raise CommandError.\n \"\"\"\n if command_name not in self.commands:\n raise exceptions.CommandError(\"Unknown command: %s\" % command_name)\n return self.commands[command_name].func(*args)\n\n def call_strings(self, command_name: str, args: Sequence[str]) -> Any:\n \"\"\"\n Call a command using a list of string arguments. May raise CommandError.\n \"\"\"\n if command_name not in self.commands:\n raise exceptions.CommandError(\"Unknown command: %s\" % command_name)\n\n return self.commands[command_name].call(args)\n\n def execute(self, cmdstr: str) -> Any:\n \"\"\"\n Execute a command string. May raise CommandError.\n \"\"\"\n parts, _ = self.parse_partial(cmdstr)\n if not parts:\n raise exceptions.CommandError(f\"Invalid command: {cmdstr!r}\")\n command_name, *args = (\n unquote(part.value) for part in parts if part.type != mitmproxy.types.Space\n )\n return self.call_strings(command_name, args)\n\n def dump(self, out=sys.stdout) -> None:\n cmds = list(self.commands.values())\n cmds.sort(key=lambda x: x.signature_help())\n for c in cmds:\n for hl in (c.help or \"\").splitlines():\n print(\"# \" + hl, file=out)\n print(c.signature_help(), file=out)\n print(file=out)\n\n\ndef parsearg(manager: CommandManager, spec: str, argtype: type) -> Any:\n \"\"\"\n Convert a string to a argument to the appropriate type.\n \"\"\"\n t = mitmproxy.types.CommandTypes.get(argtype, None)\n if not t:\n raise exceptions.CommandError(f\"Unsupported argument type: {argtype}\")\n try:\n return t.parse(manager, argtype, spec)\n except ValueError as e:\n raise exceptions.CommandError(str(e)) from e\n\n\ndef command(name: str | None = None):\n def decorator(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n verify_arg_signature(function, args, kwargs)\n return function(*args, **kwargs)\n\n wrapper.__dict__[\"command_name\"] = name or function.__name__.replace(\"_\", \".\")\n return wrapper\n\n return decorator\n\n\ndef argument(name, type):\n \"\"\"\n Set the type of a command argument at runtime. This is useful for more\n specific types such as mitmproxy.types.Choice, which we cannot annotate\n directly as mypy does not like that.\n \"\"\"\n\n def decorator(f: types.FunctionType) -> types.FunctionType:\n assert name in f.__annotations__\n f.__annotations__[name] = type\n return f\n\n return decorator\n", "path": "mitmproxy/command.py"}]} | 3,621 | 174 |
gh_patches_debug_1487 | rasdani/github-patches | git_diff | huggingface__diffusers-1149 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Flax] 🚨 0.7.0 not working 🚨
### Describe the bug

### Reproduction
_No response_
### Logs
_No response_
### System Info
TPU v3-8
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/diffusers/models/embeddings_flax.py`
Content:
```
1 # Copyright 2022 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import math
15
16 import flax.linen as nn
17 import jax.numpy as jnp
18
19
20 def get_sinusoidal_embeddings(
21 timesteps: jnp.ndarray,
22 embedding_dim: int,
23 freq_shift: float = 1,
24 min_timescale: float = 1,
25 max_timescale: float = 1.0e4,
26 flip_sin_to_cos: bool = False,
27 scale: float = 1.0,
28 ) -> jnp.ndarray:
29 """Returns the positional encoding (same as Tensor2Tensor).
30 Args:
31 timesteps: a 1-D Tensor of N indices, one per batch element.
32 These may be fractional.
33 embedding_dim: The number of output channels.
34 min_timescale: The smallest time unit (should probably be 0.0).
35 max_timescale: The largest time unit.
36 Returns:
37 a Tensor of timing signals [N, num_channels]
38 """
39 assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
40 assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
41 num_timescales = float(embedding_dim // 2)
42 log_timescale_increment = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)
43 inv_timescales = min_timescale * jnp.exp(jnp.arange(num_timescales, dtype=jnp.float32) * -log_timescale_increment)
44 emb = jnp.expand_dims(timesteps, 1) * jnp.expand_dims(inv_timescales, 0)
45
46 # scale embeddings
47 scaled_time = scale * emb
48
49 if flip_sin_to_cos:
50 signal = jnp.concatenate([jnp.cos(scaled_time), jnp.sin(scaled_time)], axis=1)
51 else:
52 signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1)
53 signal = jnp.reshape(signal, [jnp.shape(timesteps)[0], embedding_dim])
54 return signal
55
56
57 class FlaxTimestepEmbedding(nn.Module):
58 r"""
59 Time step Embedding Module. Learns embeddings for input time steps.
60
61 Args:
62 time_embed_dim (`int`, *optional*, defaults to `32`):
63 Time step embedding dimension
64 dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
65 Parameters `dtype`
66 """
67 time_embed_dim: int = 32
68 dtype: jnp.dtype = jnp.float32
69
70 @nn.compact
71 def __call__(self, temb):
72 temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_1")(temb)
73 temb = nn.silu(temb)
74 temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="linear_2")(temb)
75 return temb
76
77
78 class FlaxTimesteps(nn.Module):
79 r"""
80 Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239
81
82 Args:
83 dim (`int`, *optional*, defaults to `32`):
84 Time step embedding dimension
85 """
86 dim: int = 32
87 freq_shift: float = 1
88
89 @nn.compact
90 def __call__(self, timesteps):
91 return get_sinusoidal_embeddings(timesteps, embedding_dim=self.dim, freq_shift=self.freq_shift)
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/diffusers/models/embeddings_flax.py b/src/diffusers/models/embeddings_flax.py
--- a/src/diffusers/models/embeddings_flax.py
+++ b/src/diffusers/models/embeddings_flax.py
@@ -88,4 +88,6 @@
@nn.compact
def __call__(self, timesteps):
- return get_sinusoidal_embeddings(timesteps, embedding_dim=self.dim, freq_shift=self.freq_shift)
+ return get_sinusoidal_embeddings(
+ timesteps, embedding_dim=self.dim, freq_shift=self.freq_shift, flip_sin_to_cos=True
+ )
| {"golden_diff": "diff --git a/src/diffusers/models/embeddings_flax.py b/src/diffusers/models/embeddings_flax.py\n--- a/src/diffusers/models/embeddings_flax.py\n+++ b/src/diffusers/models/embeddings_flax.py\n@@ -88,4 +88,6 @@\n \n @nn.compact\n def __call__(self, timesteps):\n- return get_sinusoidal_embeddings(timesteps, embedding_dim=self.dim, freq_shift=self.freq_shift)\n+ return get_sinusoidal_embeddings(\n+ timesteps, embedding_dim=self.dim, freq_shift=self.freq_shift, flip_sin_to_cos=True\n+ )\n", "issue": "[Flax] \ud83d\udea8 0.7.0 not working \ud83d\udea8\n### Describe the bug\n\n\r\n\n\n### Reproduction\n\n_No response_\n\n### Logs\n\n_No response_\n\n### System Info\n\nTPU v3-8\n", "before_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\n\nimport flax.linen as nn\nimport jax.numpy as jnp\n\n\ndef get_sinusoidal_embeddings(\n timesteps: jnp.ndarray,\n embedding_dim: int,\n freq_shift: float = 1,\n min_timescale: float = 1,\n max_timescale: float = 1.0e4,\n flip_sin_to_cos: bool = False,\n scale: float = 1.0,\n) -> jnp.ndarray:\n \"\"\"Returns the positional encoding (same as Tensor2Tensor).\n Args:\n timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n embedding_dim: The number of output channels.\n min_timescale: The smallest time unit (should probably be 0.0).\n max_timescale: The largest time unit.\n Returns:\n a Tensor of timing signals [N, num_channels]\n \"\"\"\n assert timesteps.ndim == 1, \"Timesteps should be a 1d-array\"\n assert embedding_dim % 2 == 0, f\"Embedding dimension {embedding_dim} should be even\"\n num_timescales = float(embedding_dim // 2)\n log_timescale_increment = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)\n inv_timescales = min_timescale * jnp.exp(jnp.arange(num_timescales, dtype=jnp.float32) * -log_timescale_increment)\n emb = jnp.expand_dims(timesteps, 1) * jnp.expand_dims(inv_timescales, 0)\n\n # scale embeddings\n scaled_time = scale * emb\n\n if flip_sin_to_cos:\n signal = jnp.concatenate([jnp.cos(scaled_time), jnp.sin(scaled_time)], axis=1)\n else:\n signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1)\n signal = jnp.reshape(signal, [jnp.shape(timesteps)[0], embedding_dim])\n return signal\n\n\nclass FlaxTimestepEmbedding(nn.Module):\n r\"\"\"\n Time step Embedding Module. Learns embeddings for input time steps.\n\n Args:\n time_embed_dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n time_embed_dim: int = 32\n dtype: jnp.dtype = jnp.float32\n\n @nn.compact\n def __call__(self, temb):\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_1\")(temb)\n temb = nn.silu(temb)\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_2\")(temb)\n return temb\n\n\nclass FlaxTimesteps(nn.Module):\n r\"\"\"\n Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239\n\n Args:\n dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n \"\"\"\n dim: int = 32\n freq_shift: float = 1\n\n @nn.compact\n def __call__(self, timesteps):\n return get_sinusoidal_embeddings(timesteps, embedding_dim=self.dim, freq_shift=self.freq_shift)\n", "path": "src/diffusers/models/embeddings_flax.py"}], "after_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\n\nimport flax.linen as nn\nimport jax.numpy as jnp\n\n\ndef get_sinusoidal_embeddings(\n timesteps: jnp.ndarray,\n embedding_dim: int,\n freq_shift: float = 1,\n min_timescale: float = 1,\n max_timescale: float = 1.0e4,\n flip_sin_to_cos: bool = False,\n scale: float = 1.0,\n) -> jnp.ndarray:\n \"\"\"Returns the positional encoding (same as Tensor2Tensor).\n Args:\n timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n embedding_dim: The number of output channels.\n min_timescale: The smallest time unit (should probably be 0.0).\n max_timescale: The largest time unit.\n Returns:\n a Tensor of timing signals [N, num_channels]\n \"\"\"\n assert timesteps.ndim == 1, \"Timesteps should be a 1d-array\"\n assert embedding_dim % 2 == 0, f\"Embedding dimension {embedding_dim} should be even\"\n num_timescales = float(embedding_dim // 2)\n log_timescale_increment = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)\n inv_timescales = min_timescale * jnp.exp(jnp.arange(num_timescales, dtype=jnp.float32) * -log_timescale_increment)\n emb = jnp.expand_dims(timesteps, 1) * jnp.expand_dims(inv_timescales, 0)\n\n # scale embeddings\n scaled_time = scale * emb\n\n if flip_sin_to_cos:\n signal = jnp.concatenate([jnp.cos(scaled_time), jnp.sin(scaled_time)], axis=1)\n else:\n signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1)\n signal = jnp.reshape(signal, [jnp.shape(timesteps)[0], embedding_dim])\n return signal\n\n\nclass FlaxTimestepEmbedding(nn.Module):\n r\"\"\"\n Time step Embedding Module. Learns embeddings for input time steps.\n\n Args:\n time_embed_dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n time_embed_dim: int = 32\n dtype: jnp.dtype = jnp.float32\n\n @nn.compact\n def __call__(self, temb):\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_1\")(temb)\n temb = nn.silu(temb)\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_2\")(temb)\n return temb\n\n\nclass FlaxTimesteps(nn.Module):\n r\"\"\"\n Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239\n\n Args:\n dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n \"\"\"\n dim: int = 32\n freq_shift: float = 1\n\n @nn.compact\n def __call__(self, timesteps):\n return get_sinusoidal_embeddings(\n timesteps, embedding_dim=self.dim, freq_shift=self.freq_shift, flip_sin_to_cos=True\n )\n", "path": "src/diffusers/models/embeddings_flax.py"}]} | 1,458 | 137 |
gh_patches_debug_48383 | rasdani/github-patches | git_diff | DDMAL__CantusDB-900 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
we need to re-add a restart policy to docker-compose.yml
A recent change to docker-compose.yml removed the `restart: always` policy we added to our containers a couple of weeks ago. We should re-instate this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/widgets.py`
Content:
```
1 from django.forms.widgets import TextInput, Select, Textarea, CheckboxInput
2 from django.utils.safestring import mark_safe
3
4 class TextInputWidget(TextInput):
5 def __init__(self):
6 self.attrs = {"class": "form-control form-control-sm"}
7
8
9 class SelectWidget(Select):
10 """
11 not used, this widget does work, but we cannot order the choices by name
12 """
13
14 def __init__(self):
15 attrs = {"class": "form-control custom-select custom-select-sm"}
16 super().__init__(attrs=attrs)
17 # super().choices = choices
18 # self.choices = super().choices
19
20
21 class TextAreaWidget(Textarea):
22 def __init__(self):
23 self.attrs = {"class": "form-control", "rows": "3"}
24
25
26 class VolpianoAreaWidget(Textarea):
27 def __init__(self):
28 self.attrs = {
29 "class": "form-control",
30 "rows": "1.5",
31 "style": "font-family: Volpiano; font-size: xx-large",
32 }
33
34
35 class VolpianoInputWidget(TextInput):
36 def __init__(self):
37 self.attrs = {
38 "class": "form-control form-control-sm",
39 "style": "font-family: Volpiano; font-size: xx-large",
40 }
41
42
43 class CheckboxWidget(CheckboxInput):
44 pass
45
46
47 class AdminTextAreaWidget(Textarea):
48 def __init__(self):
49 self.attrs = {"class": "form-control", "rows": 10, "cols": 75}
50
51 def render(self, name, value, attrs=None, renderer=None):
52 return super().render(name, value, attrs=self.attrs) + mark_safe(
53 '<span style="color: red; font-weight: bold;"> * </span>'
54 )
55
56
57 class AdminTextInputWidget(TextInputWidget):
58 def render(self, name, value, attrs=None, renderer=None):
59 return super().render(name, value) + mark_safe(
60 '<span style="color: red; font-weight: bold;"> * </span>'
61 )
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/main_app/widgets.py b/django/cantusdb_project/main_app/widgets.py
--- a/django/cantusdb_project/main_app/widgets.py
+++ b/django/cantusdb_project/main_app/widgets.py
@@ -1,6 +1,7 @@
from django.forms.widgets import TextInput, Select, Textarea, CheckboxInput
from django.utils.safestring import mark_safe
+
class TextInputWidget(TextInput):
def __init__(self):
self.attrs = {"class": "form-control form-control-sm"}
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/widgets.py b/django/cantusdb_project/main_app/widgets.py\n--- a/django/cantusdb_project/main_app/widgets.py\n+++ b/django/cantusdb_project/main_app/widgets.py\n@@ -1,6 +1,7 @@\n from django.forms.widgets import TextInput, Select, Textarea, CheckboxInput\n from django.utils.safestring import mark_safe\n \n+\n class TextInputWidget(TextInput):\n def __init__(self):\n self.attrs = {\"class\": \"form-control form-control-sm\"}\n", "issue": "we need to re-add a restart policy to docker-compose.yml\nA recent change to docker-compose.yml removed the `restart: always` policy we added to our containers a couple of weeks ago. We should re-instate this.\n", "before_files": [{"content": "from django.forms.widgets import TextInput, Select, Textarea, CheckboxInput\nfrom django.utils.safestring import mark_safe\n\nclass TextInputWidget(TextInput):\n def __init__(self):\n self.attrs = {\"class\": \"form-control form-control-sm\"}\n\n\nclass SelectWidget(Select):\n \"\"\"\n not used, this widget does work, but we cannot order the choices by name\n \"\"\"\n\n def __init__(self):\n attrs = {\"class\": \"form-control custom-select custom-select-sm\"}\n super().__init__(attrs=attrs)\n # super().choices = choices\n # self.choices = super().choices\n\n\nclass TextAreaWidget(Textarea):\n def __init__(self):\n self.attrs = {\"class\": \"form-control\", \"rows\": \"3\"}\n\n\nclass VolpianoAreaWidget(Textarea):\n def __init__(self):\n self.attrs = {\n \"class\": \"form-control\",\n \"rows\": \"1.5\",\n \"style\": \"font-family: Volpiano; font-size: xx-large\",\n }\n\n\nclass VolpianoInputWidget(TextInput):\n def __init__(self):\n self.attrs = {\n \"class\": \"form-control form-control-sm\",\n \"style\": \"font-family: Volpiano; font-size: xx-large\",\n }\n\n\nclass CheckboxWidget(CheckboxInput):\n pass\n\n\nclass AdminTextAreaWidget(Textarea):\n def __init__(self):\n self.attrs = {\"class\": \"form-control\", \"rows\": 10, \"cols\": 75}\n\n def render(self, name, value, attrs=None, renderer=None):\n return super().render(name, value, attrs=self.attrs) + mark_safe(\n '<span style=\"color: red; font-weight: bold;\"> * </span>'\n )\n\n\nclass AdminTextInputWidget(TextInputWidget):\n def render(self, name, value, attrs=None, renderer=None):\n return super().render(name, value) + mark_safe(\n '<span style=\"color: red; font-weight: bold;\"> * </span>'\n )\n", "path": "django/cantusdb_project/main_app/widgets.py"}], "after_files": [{"content": "from django.forms.widgets import TextInput, Select, Textarea, CheckboxInput\nfrom django.utils.safestring import mark_safe\n\n\nclass TextInputWidget(TextInput):\n def __init__(self):\n self.attrs = {\"class\": \"form-control form-control-sm\"}\n\n\nclass SelectWidget(Select):\n \"\"\"\n not used, this widget does work, but we cannot order the choices by name\n \"\"\"\n\n def __init__(self):\n attrs = {\"class\": \"form-control custom-select custom-select-sm\"}\n super().__init__(attrs=attrs)\n # super().choices = choices\n # self.choices = super().choices\n\n\nclass TextAreaWidget(Textarea):\n def __init__(self):\n self.attrs = {\"class\": \"form-control\", \"rows\": \"3\"}\n\n\nclass VolpianoAreaWidget(Textarea):\n def __init__(self):\n self.attrs = {\n \"class\": \"form-control\",\n \"rows\": \"1.5\",\n \"style\": \"font-family: Volpiano; font-size: xx-large\",\n }\n\n\nclass VolpianoInputWidget(TextInput):\n def __init__(self):\n self.attrs = {\n \"class\": \"form-control form-control-sm\",\n \"style\": \"font-family: Volpiano; font-size: xx-large\",\n }\n\n\nclass CheckboxWidget(CheckboxInput):\n pass\n\n\nclass AdminTextAreaWidget(Textarea):\n def __init__(self):\n self.attrs = {\"class\": \"form-control\", \"rows\": 10, \"cols\": 75}\n\n def render(self, name, value, attrs=None, renderer=None):\n return super().render(name, value, attrs=self.attrs) + mark_safe(\n '<span style=\"color: red; font-weight: bold;\"> * </span>'\n )\n\n\nclass AdminTextInputWidget(TextInputWidget):\n def render(self, name, value, attrs=None, renderer=None):\n return super().render(name, value) + mark_safe(\n '<span style=\"color: red; font-weight: bold;\"> * </span>'\n )\n", "path": "django/cantusdb_project/main_app/widgets.py"}]} | 874 | 119 |
gh_patches_debug_64424 | rasdani/github-patches | git_diff | pwndbg__pwndbg-80 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
source code section unavailable if current line is in the first chunk of the list command
when debugging through code and the current function is indeed one that it at the top of a .c file (that does not use tons of includes) the section is lost because of a too aggressive check:
https://github.com/pwndbg/pwndbg/blob/master/pwndbg/commands/context.py#L160
Which edge-case does that try to solve? As I'm hitting that in several cases, I think it does harm more then it maybe solves something? Would be cool to know which case it does try to fix so we could adjust that check to be less aggressive and preserve the section of the mentioned scenario.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/commands/context.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from __future__ import print_function
4 import gdb
5 import sys
6
7 import pwndbg.arguments
8 import pwndbg.chain
9 import pwndbg.color
10 import pwndbg.commands
11 import pwndbg.commands.nearpc
12 import pwndbg.commands.telescope
13 import pwndbg.config
14 import pwndbg.disasm
15 import pwndbg.events
16 import pwndbg.ida
17 import pwndbg.regs
18 import pwndbg.symbol
19 import pwndbg.ui
20 import pwndbg.vmmap
21
22 # @pwndbg.events.stop
23 @pwndbg.commands.Command
24 @pwndbg.commands.OnlyWhenRunning
25 def context(*args):
26 """
27 Print out the current register, instruction, and stack context.
28
29 Accepts subcommands 'reg', 'code', 'stack', 'backtrace', and 'args'.
30 """
31 if len(args) == 0:
32 args = ['reg','code','stack','backtrace','args']
33
34 args = [a[0] for a in args]
35
36 result = []
37
38 result.append(pwndbg.color.legend())
39 if 'r' in args: result.extend(context_regs())
40 if 'c' in args: result.extend(context_code())
41 if 'c' in args: result.extend(context_source())
42 if 'a' in args: result.extend(context_args())
43 if 's' in args: result.extend(context_stack())
44 if 'b' in args: result.extend(context_backtrace())
45 result.extend(context_signal())
46
47 for line in result:
48 sys.stdout.write(line + '\n')
49 sys.stdout.flush()
50
51 def context_regs():
52 result = []
53 result.append(pwndbg.color.blue(pwndbg.ui.banner("registers")))
54 result.extend(get_regs())
55 return result
56
57 @pwndbg.commands.Command
58 @pwndbg.commands.OnlyWhenRunning
59 def regs(*regs):
60 '''Print out all registers and enhance the information.'''
61 print('\n'.join(get_regs(*regs)))
62
63 pwndbg.config.Parameter('show-flags', False, 'whether to show flags registers')
64 pwndbg.config.Parameter('colored-flags', False, 'whether to colorize flags registers')
65
66 def get_regs(*regs):
67 result = []
68
69 if not regs:
70 regs = pwndbg.regs.gpr + (pwndbg.regs.frame, pwndbg.regs.current.stack, pwndbg.regs.current.pc)
71
72 if pwndbg.config.show_flags:
73 regs += tuple(pwndbg.regs.flags)
74
75 changed = pwndbg.regs.changed
76
77 for reg in regs:
78 if reg is None:
79 continue
80
81 if reg not in pwndbg.regs:
82 print("Unknown register: %r" % reg)
83 continue
84
85 value = pwndbg.regs[reg]
86
87 # Make the register stand out
88 regname = pwndbg.color.bold(reg.ljust(4).upper())
89
90 # Show a dot next to the register if it changed
91 m = ' ' if reg not in changed else '*'
92
93 if reg not in pwndbg.regs.flags:
94 desc = pwndbg.chain.format(value)
95
96 else:
97 names = []
98 desc = '%#x' % value
99 last = pwndbg.regs.last.get(reg, 0) or 0
100 flags = pwndbg.regs.flags[reg]
101
102 for name, bit in sorted(flags.items()):
103 bit = 1<<bit
104 if value & bit:
105 name = name.upper()
106 name = pwndbg.color.bold(name)
107 if pwndbg.config.colored_flags:
108 name = pwndbg.color.green(name)
109 else:
110 name = name.lower()
111 if pwndbg.config.colored_flags:
112 name = pwndbg.color.red(name)
113
114 if value & bit != last & bit:
115 name = pwndbg.color.underline(name)
116 names.append(name)
117
118 if names:
119 desc = '%s [ %s ]' % (desc, ' '.join(names))
120
121 result.append("%s%s %s" % (m, regname, desc))
122
123 return result
124
125 pwndbg.config.Parameter('emulate', True, '''
126 Unicorn emulation of code near the current instruction
127 ''')
128
129 def context_code():
130 banner = [pwndbg.color.blue(pwndbg.ui.banner("code"))]
131 emulate = bool(pwndbg.config.emulate)
132 result = pwndbg.commands.nearpc.nearpc(to_string=True, emulate=emulate)
133
134 # If we didn't disassemble backward, try to make sure
135 # that the amount of screen space taken is roughly constant.
136 while len(result) < 11:
137 result.append('')
138
139 return banner + result
140
141 def context_source():
142 try:
143 symtab = gdb.selected_frame().find_sal().symtab
144 linetable = symtab.linetable()
145
146 closest_pc = -1
147 closest_line = -1
148 for line in linetable:
149 if line.pc <= pwndbg.regs.pc and line.pc > closest_pc:
150 closest_line = line.line
151 closest_pc = line.pc
152
153 if closest_line < 0:
154 return []
155
156 source = gdb.execute('list %i' % closest_line, from_tty=False, to_string=True)
157
158 # If it starts on line 1, it's not really using the
159 # correct source code.
160 if not source or source.startswith('1\t'):
161 return []
162
163 banner = [pwndbg.color.blue(pwndbg.ui.banner("code"))]
164 banner.extend(source.splitlines())
165 return banner
166 except:
167 pass
168
169 if not pwndbg.ida.available():
170 return []
171
172 try:
173 name = pwndbg.ida.GetFunctionName(pwndbg.regs.pc)
174 addr = pwndbg.ida.LocByName(name)
175 lines = pwndbg.ida.decompile(addr)
176 return lines.splitlines()
177 except:
178 pass
179
180 return []
181
182 def context_stack():
183 result = []
184 result.append(pwndbg.color.blue(pwndbg.ui.banner("stack")))
185 telescope = pwndbg.commands.telescope.telescope(pwndbg.regs.sp, to_string=True)
186 if telescope:
187 result.extend(telescope)
188 return result
189
190 def context_backtrace(frame_count=10, with_banner=True):
191 result = []
192
193 if with_banner:
194 result.append(pwndbg.color.blue(pwndbg.ui.banner("backtrace")))
195
196 this_frame = gdb.selected_frame()
197 newest_frame = this_frame
198 oldest_frame = this_frame
199
200 for i in range(frame_count):
201 try:
202 candidate = oldest_frame.older()
203 except gdb.MemoryError:
204 break
205
206 if not candidate:
207 break
208 oldest_frame = candidate
209
210 for i in range(frame_count):
211 candidate = newest_frame.newer()
212 if not candidate:
213 break
214 newest_frame = candidate
215
216 frame = newest_frame
217 i = 0
218 while True:
219 prefix = '> ' if frame == this_frame else ' '
220 addrsz = pwndbg.ui.addrsz(frame.pc())
221 symbol = pwndbg.symbol.get(frame.pc())
222 if symbol:
223 addrsz = addrsz + ' ' + symbol
224 line = map(str, (prefix, 'f', i, addrsz))
225 line = ' '.join(line)
226 result.append(line)
227
228 if frame == oldest_frame:
229 break
230
231 frame = frame.older()
232 i += 1
233 return result
234
235 def context_args():
236 result = []
237
238 ##################################################
239 # DISABLED FOR NOW, I LIKE INLINE DISPLAY BETTER
240 ##################################################
241 # # For call instructions, attempt to resolve the target and
242 # # determine the number of arguments.
243 # for arg, value in pwndbg.arguments.arguments(pwndbg.disasm.one()):
244 # code = False if arg.type == 'char' else True
245 # pretty = pwndbg.chain.format(value, code=code)
246 # result.append('%-10s %s' % (arg.name+':', pretty))
247 # if not result:
248 # return []
249 # result.insert(0, pwndbg.color.blue(pwndbg.ui.banner("arguments")))
250 return result
251
252 last_signal = []
253
254 def save_signal(signal):
255 global last_signal
256 last_signal = result = []
257
258 if isinstance(signal, gdb.ExitedEvent):
259 # Booooo old gdb
260 if hasattr(signal, 'exit_code'):
261 result.append(pwndbg.color.red('Exited: %r' % signal.exit_code))
262
263 elif isinstance(signal, gdb.SignalEvent):
264 msg = 'Program received signal %s' % signal.stop_signal
265 if signal.stop_signal == 'SIGSEGV':
266 try:
267 si_addr = gdb.parse_and_eval("$_siginfo._sifields._sigfault.si_addr")
268 msg += ' (fault address %#x)' % int(si_addr or 0)
269 except gdb.error:
270 pass
271 msg = pwndbg.color.red(msg)
272 msg = pwndbg.color.bold(msg)
273 result.append(msg)
274
275 elif isinstance(signal, gdb.BreakpointEvent):
276 for bkpt in signal.breakpoints:
277 result.append(pwndbg.color.yellow('Breakpoint %s' % (bkpt.location)))
278
279 gdb.events.cont.connect(save_signal)
280 gdb.events.stop.connect(save_signal)
281 gdb.events.exited.connect(save_signal)
282
283 def context_signal():
284 return last_signal
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/commands/context.py b/pwndbg/commands/context.py
--- a/pwndbg/commands/context.py
+++ b/pwndbg/commands/context.py
@@ -157,7 +157,7 @@
# If it starts on line 1, it's not really using the
# correct source code.
- if not source or source.startswith('1\t'):
+ if not source or closest_line <= 1:
return []
banner = [pwndbg.color.blue(pwndbg.ui.banner("code"))]
| {"golden_diff": "diff --git a/pwndbg/commands/context.py b/pwndbg/commands/context.py\n--- a/pwndbg/commands/context.py\n+++ b/pwndbg/commands/context.py\n@@ -157,7 +157,7 @@\n \n # If it starts on line 1, it's not really using the\n # correct source code.\n- if not source or source.startswith('1\\t'):\n+ if not source or closest_line <= 1:\n return []\n \n banner = [pwndbg.color.blue(pwndbg.ui.banner(\"code\"))]\n", "issue": "source code section unavailable if current line is in the first chunk of the list command\nwhen debugging through code and the current function is indeed one that it at the top of a .c file (that does not use tons of includes) the section is lost because of a too aggressive check:\n\nhttps://github.com/pwndbg/pwndbg/blob/master/pwndbg/commands/context.py#L160\n\nWhich edge-case does that try to solve? As I'm hitting that in several cases, I think it does harm more then it maybe solves something? Would be cool to know which case it does try to fix so we could adjust that check to be less aggressive and preserve the section of the mentioned scenario.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport gdb\nimport sys\n\nimport pwndbg.arguments\nimport pwndbg.chain\nimport pwndbg.color\nimport pwndbg.commands\nimport pwndbg.commands.nearpc\nimport pwndbg.commands.telescope\nimport pwndbg.config\nimport pwndbg.disasm\nimport pwndbg.events\nimport pwndbg.ida\nimport pwndbg.regs\nimport pwndbg.symbol\nimport pwndbg.ui\nimport pwndbg.vmmap\n\n# @pwndbg.events.stop\[email protected]\[email protected]\ndef context(*args):\n \"\"\"\n Print out the current register, instruction, and stack context.\n\n Accepts subcommands 'reg', 'code', 'stack', 'backtrace', and 'args'.\n \"\"\"\n if len(args) == 0:\n args = ['reg','code','stack','backtrace','args']\n\n args = [a[0] for a in args]\n\n result = []\n\n result.append(pwndbg.color.legend())\n if 'r' in args: result.extend(context_regs())\n if 'c' in args: result.extend(context_code())\n if 'c' in args: result.extend(context_source())\n if 'a' in args: result.extend(context_args())\n if 's' in args: result.extend(context_stack())\n if 'b' in args: result.extend(context_backtrace())\n result.extend(context_signal())\n\n for line in result:\n sys.stdout.write(line + '\\n')\n sys.stdout.flush()\n\ndef context_regs():\n result = []\n result.append(pwndbg.color.blue(pwndbg.ui.banner(\"registers\")))\n result.extend(get_regs())\n return result\n\[email protected]\[email protected]\ndef regs(*regs):\n '''Print out all registers and enhance the information.'''\n print('\\n'.join(get_regs(*regs)))\n\npwndbg.config.Parameter('show-flags', False, 'whether to show flags registers')\npwndbg.config.Parameter('colored-flags', False, 'whether to colorize flags registers')\n\ndef get_regs(*regs):\n result = []\n\n if not regs:\n regs = pwndbg.regs.gpr + (pwndbg.regs.frame, pwndbg.regs.current.stack, pwndbg.regs.current.pc)\n\n if pwndbg.config.show_flags:\n regs += tuple(pwndbg.regs.flags)\n\n changed = pwndbg.regs.changed\n\n for reg in regs:\n if reg is None:\n continue\n\n if reg not in pwndbg.regs:\n print(\"Unknown register: %r\" % reg)\n continue\n\n value = pwndbg.regs[reg]\n\n # Make the register stand out\n regname = pwndbg.color.bold(reg.ljust(4).upper())\n\n # Show a dot next to the register if it changed\n m = ' ' if reg not in changed else '*'\n\n if reg not in pwndbg.regs.flags:\n desc = pwndbg.chain.format(value)\n\n else:\n names = []\n desc = '%#x' % value\n last = pwndbg.regs.last.get(reg, 0) or 0\n flags = pwndbg.regs.flags[reg]\n\n for name, bit in sorted(flags.items()):\n bit = 1<<bit\n if value & bit:\n name = name.upper()\n name = pwndbg.color.bold(name)\n if pwndbg.config.colored_flags:\n name = pwndbg.color.green(name)\n else:\n name = name.lower()\n if pwndbg.config.colored_flags:\n name = pwndbg.color.red(name)\n\n if value & bit != last & bit:\n name = pwndbg.color.underline(name)\n names.append(name)\n\n if names:\n desc = '%s [ %s ]' % (desc, ' '.join(names))\n\n result.append(\"%s%s %s\" % (m, regname, desc))\n\n return result\n\npwndbg.config.Parameter('emulate', True, '''\nUnicorn emulation of code near the current instruction\n''')\n\ndef context_code():\n banner = [pwndbg.color.blue(pwndbg.ui.banner(\"code\"))]\n emulate = bool(pwndbg.config.emulate)\n result = pwndbg.commands.nearpc.nearpc(to_string=True, emulate=emulate)\n\n # If we didn't disassemble backward, try to make sure\n # that the amount of screen space taken is roughly constant.\n while len(result) < 11:\n result.append('')\n\n return banner + result\n\ndef context_source():\n try:\n symtab = gdb.selected_frame().find_sal().symtab\n linetable = symtab.linetable()\n\n closest_pc = -1\n closest_line = -1\n for line in linetable:\n if line.pc <= pwndbg.regs.pc and line.pc > closest_pc:\n closest_line = line.line\n closest_pc = line.pc\n\n if closest_line < 0:\n return []\n\n source = gdb.execute('list %i' % closest_line, from_tty=False, to_string=True)\n\n # If it starts on line 1, it's not really using the\n # correct source code.\n if not source or source.startswith('1\\t'):\n return []\n\n banner = [pwndbg.color.blue(pwndbg.ui.banner(\"code\"))]\n banner.extend(source.splitlines())\n return banner\n except:\n pass\n\n if not pwndbg.ida.available():\n return []\n\n try:\n name = pwndbg.ida.GetFunctionName(pwndbg.regs.pc)\n addr = pwndbg.ida.LocByName(name)\n lines = pwndbg.ida.decompile(addr)\n return lines.splitlines()\n except:\n pass\n\n return []\n\ndef context_stack():\n result = []\n result.append(pwndbg.color.blue(pwndbg.ui.banner(\"stack\")))\n telescope = pwndbg.commands.telescope.telescope(pwndbg.regs.sp, to_string=True)\n if telescope:\n result.extend(telescope)\n return result\n\ndef context_backtrace(frame_count=10, with_banner=True):\n result = []\n\n if with_banner:\n result.append(pwndbg.color.blue(pwndbg.ui.banner(\"backtrace\")))\n\n this_frame = gdb.selected_frame()\n newest_frame = this_frame\n oldest_frame = this_frame\n\n for i in range(frame_count):\n try:\n candidate = oldest_frame.older()\n except gdb.MemoryError:\n break\n\n if not candidate:\n break\n oldest_frame = candidate\n\n for i in range(frame_count):\n candidate = newest_frame.newer()\n if not candidate:\n break\n newest_frame = candidate\n\n frame = newest_frame\n i = 0\n while True:\n prefix = '> ' if frame == this_frame else ' '\n addrsz = pwndbg.ui.addrsz(frame.pc())\n symbol = pwndbg.symbol.get(frame.pc())\n if symbol:\n addrsz = addrsz + ' ' + symbol\n line = map(str, (prefix, 'f', i, addrsz))\n line = ' '.join(line)\n result.append(line)\n\n if frame == oldest_frame:\n break\n\n frame = frame.older()\n i += 1\n return result\n\ndef context_args():\n result = []\n\n ##################################################\n # DISABLED FOR NOW, I LIKE INLINE DISPLAY BETTER\n ##################################################\n # # For call instructions, attempt to resolve the target and\n # # determine the number of arguments.\n # for arg, value in pwndbg.arguments.arguments(pwndbg.disasm.one()):\n # code = False if arg.type == 'char' else True\n # pretty = pwndbg.chain.format(value, code=code)\n # result.append('%-10s %s' % (arg.name+':', pretty))\n # if not result:\n # return []\n # result.insert(0, pwndbg.color.blue(pwndbg.ui.banner(\"arguments\")))\n return result\n\nlast_signal = []\n\ndef save_signal(signal):\n global last_signal\n last_signal = result = []\n\n if isinstance(signal, gdb.ExitedEvent):\n # Booooo old gdb\n if hasattr(signal, 'exit_code'):\n result.append(pwndbg.color.red('Exited: %r' % signal.exit_code))\n\n elif isinstance(signal, gdb.SignalEvent):\n msg = 'Program received signal %s' % signal.stop_signal\n if signal.stop_signal == 'SIGSEGV':\n try:\n si_addr = gdb.parse_and_eval(\"$_siginfo._sifields._sigfault.si_addr\")\n msg += ' (fault address %#x)' % int(si_addr or 0)\n except gdb.error:\n pass\n msg = pwndbg.color.red(msg)\n msg = pwndbg.color.bold(msg)\n result.append(msg)\n\n elif isinstance(signal, gdb.BreakpointEvent):\n for bkpt in signal.breakpoints:\n result.append(pwndbg.color.yellow('Breakpoint %s' % (bkpt.location)))\n\ngdb.events.cont.connect(save_signal)\ngdb.events.stop.connect(save_signal)\ngdb.events.exited.connect(save_signal)\n\ndef context_signal():\n return last_signal\n", "path": "pwndbg/commands/context.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport gdb\nimport sys\n\nimport pwndbg.arguments\nimport pwndbg.chain\nimport pwndbg.color\nimport pwndbg.commands\nimport pwndbg.commands.nearpc\nimport pwndbg.commands.telescope\nimport pwndbg.config\nimport pwndbg.disasm\nimport pwndbg.events\nimport pwndbg.ida\nimport pwndbg.regs\nimport pwndbg.symbol\nimport pwndbg.ui\nimport pwndbg.vmmap\n\n# @pwndbg.events.stop\[email protected]\[email protected]\ndef context(*args):\n \"\"\"\n Print out the current register, instruction, and stack context.\n\n Accepts subcommands 'reg', 'code', 'stack', 'backtrace', and 'args'.\n \"\"\"\n if len(args) == 0:\n args = ['reg','code','stack','backtrace','args']\n\n args = [a[0] for a in args]\n\n result = []\n\n result.append(pwndbg.color.legend())\n if 'r' in args: result.extend(context_regs())\n if 'c' in args: result.extend(context_code())\n if 'c' in args: result.extend(context_source())\n if 'a' in args: result.extend(context_args())\n if 's' in args: result.extend(context_stack())\n if 'b' in args: result.extend(context_backtrace())\n result.extend(context_signal())\n\n for line in result:\n sys.stdout.write(line + '\\n')\n sys.stdout.flush()\n\ndef context_regs():\n result = []\n result.append(pwndbg.color.blue(pwndbg.ui.banner(\"registers\")))\n result.extend(get_regs())\n return result\n\[email protected]\[email protected]\ndef regs(*regs):\n '''Print out all registers and enhance the information.'''\n print('\\n'.join(get_regs(*regs)))\n\npwndbg.config.Parameter('show-flags', False, 'whether to show flags registers')\npwndbg.config.Parameter('colored-flags', False, 'whether to colorize flags registers')\n\ndef get_regs(*regs):\n result = []\n\n if not regs:\n regs = pwndbg.regs.gpr + (pwndbg.regs.frame, pwndbg.regs.current.stack, pwndbg.regs.current.pc)\n\n if pwndbg.config.show_flags:\n regs += tuple(pwndbg.regs.flags)\n\n changed = pwndbg.regs.changed\n\n for reg in regs:\n if reg is None:\n continue\n\n if reg not in pwndbg.regs:\n print(\"Unknown register: %r\" % reg)\n continue\n\n value = pwndbg.regs[reg]\n\n # Make the register stand out\n regname = pwndbg.color.bold(reg.ljust(4).upper())\n\n # Show a dot next to the register if it changed\n m = ' ' if reg not in changed else '*'\n\n if reg not in pwndbg.regs.flags:\n desc = pwndbg.chain.format(value)\n\n else:\n names = []\n desc = '%#x' % value\n last = pwndbg.regs.last.get(reg, 0) or 0\n flags = pwndbg.regs.flags[reg]\n\n for name, bit in sorted(flags.items()):\n bit = 1<<bit\n if value & bit:\n name = name.upper()\n name = pwndbg.color.bold(name)\n if pwndbg.config.colored_flags:\n name = pwndbg.color.green(name)\n else:\n name = name.lower()\n if pwndbg.config.colored_flags:\n name = pwndbg.color.red(name)\n\n if value & bit != last & bit:\n name = pwndbg.color.underline(name)\n names.append(name)\n\n if names:\n desc = '%s [ %s ]' % (desc, ' '.join(names))\n\n result.append(\"%s%s %s\" % (m, regname, desc))\n\n return result\n\npwndbg.config.Parameter('emulate', True, '''\nUnicorn emulation of code near the current instruction\n''')\n\ndef context_code():\n banner = [pwndbg.color.blue(pwndbg.ui.banner(\"code\"))]\n emulate = bool(pwndbg.config.emulate)\n result = pwndbg.commands.nearpc.nearpc(to_string=True, emulate=emulate)\n\n # If we didn't disassemble backward, try to make sure\n # that the amount of screen space taken is roughly constant.\n while len(result) < 11:\n result.append('')\n\n return banner + result\n\ndef context_source():\n try:\n symtab = gdb.selected_frame().find_sal().symtab\n linetable = symtab.linetable()\n\n closest_pc = -1\n closest_line = -1\n for line in linetable:\n if line.pc <= pwndbg.regs.pc and line.pc > closest_pc:\n closest_line = line.line\n closest_pc = line.pc\n\n if closest_line < 0:\n return []\n\n source = gdb.execute('list %i' % closest_line, from_tty=False, to_string=True)\n\n # If it starts on line 1, it's not really using the\n # correct source code.\n if not source or closest_line <= 1:\n return []\n\n banner = [pwndbg.color.blue(pwndbg.ui.banner(\"code\"))]\n banner.extend(source.splitlines())\n return banner\n except:\n pass\n\n if not pwndbg.ida.available():\n return []\n\n try:\n name = pwndbg.ida.GetFunctionName(pwndbg.regs.pc)\n addr = pwndbg.ida.LocByName(name)\n lines = pwndbg.ida.decompile(addr)\n return lines.splitlines()\n except:\n pass\n\n return []\n\ndef context_stack():\n result = []\n result.append(pwndbg.color.blue(pwndbg.ui.banner(\"stack\")))\n telescope = pwndbg.commands.telescope.telescope(pwndbg.regs.sp, to_string=True)\n if telescope:\n result.extend(telescope)\n return result\n\ndef context_backtrace(frame_count=10, with_banner=True):\n result = []\n\n if with_banner:\n result.append(pwndbg.color.blue(pwndbg.ui.banner(\"backtrace\")))\n\n this_frame = gdb.selected_frame()\n newest_frame = this_frame\n oldest_frame = this_frame\n\n for i in range(frame_count):\n try:\n candidate = oldest_frame.older()\n except gdb.MemoryError:\n break\n\n if not candidate:\n break\n oldest_frame = candidate\n\n for i in range(frame_count):\n candidate = newest_frame.newer()\n if not candidate:\n break\n newest_frame = candidate\n\n frame = newest_frame\n i = 0\n while True:\n prefix = '> ' if frame == this_frame else ' '\n addrsz = pwndbg.ui.addrsz(frame.pc())\n symbol = pwndbg.symbol.get(frame.pc())\n if symbol:\n addrsz = addrsz + ' ' + symbol\n line = map(str, (prefix, 'f', i, addrsz))\n line = ' '.join(line)\n result.append(line)\n\n if frame == oldest_frame:\n break\n\n frame = frame.older()\n i += 1\n return result\n\ndef context_args():\n result = []\n\n ##################################################\n # DISABLED FOR NOW, I LIKE INLINE DISPLAY BETTER\n ##################################################\n # # For call instructions, attempt to resolve the target and\n # # determine the number of arguments.\n # for arg, value in pwndbg.arguments.arguments(pwndbg.disasm.one()):\n # code = False if arg.type == 'char' else True\n # pretty = pwndbg.chain.format(value, code=code)\n # result.append('%-10s %s' % (arg.name+':', pretty))\n # if not result:\n # return []\n # result.insert(0, pwndbg.color.blue(pwndbg.ui.banner(\"arguments\")))\n return result\n\nlast_signal = []\n\ndef save_signal(signal):\n global last_signal\n last_signal = result = []\n\n if isinstance(signal, gdb.ExitedEvent):\n # Booooo old gdb\n if hasattr(signal, 'exit_code'):\n result.append(pwndbg.color.red('Exited: %r' % signal.exit_code))\n\n elif isinstance(signal, gdb.SignalEvent):\n msg = 'Program received signal %s' % signal.stop_signal\n if signal.stop_signal == 'SIGSEGV':\n try:\n si_addr = gdb.parse_and_eval(\"$_siginfo._sifields._sigfault.si_addr\")\n msg += ' (fault address %#x)' % int(si_addr or 0)\n except gdb.error:\n pass\n msg = pwndbg.color.red(msg)\n msg = pwndbg.color.bold(msg)\n result.append(msg)\n\n elif isinstance(signal, gdb.BreakpointEvent):\n for bkpt in signal.breakpoints:\n result.append(pwndbg.color.yellow('Breakpoint %s' % (bkpt.location)))\n\ngdb.events.cont.connect(save_signal)\ngdb.events.stop.connect(save_signal)\ngdb.events.exited.connect(save_signal)\n\ndef context_signal():\n return last_signal\n", "path": "pwndbg/commands/context.py"}]} | 3,220 | 124 |
gh_patches_debug_3244 | rasdani/github-patches | git_diff | ansible__molecule-135 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Verbose in molecule.yml should handle bool True
https://github.com/metacloud/molecule/blob/master/molecule/ansible_playbook.py#L104
```
clearly string +bool isn't gonna work. And it will have error like
Traceback (most recent call last):
File "/Users/zochao/.virtualenvs/metamole/bin/molecule", line 10, in <module>
sys.exit(main())
File "/Users/zochao/metamole/molecule/molecule/cli.py", line 66, in main
CLI().main()
File "/Users/zochao/metamole/molecule/molecule/cli.py", line 62, in main
sys.exit(c.execute())
File "/Users/zochao/metamole/molecule/molecule/commands.py", line 393, in execute
c.execute()
File "/Users/zochao/metamole/molecule/molecule/commands.py", line 224, in execute
ansible = AnsiblePlaybook(self.molecule._config.config['ansible'])
File "/Users/zochao/metamole/molecule/molecule/ansible_playbook.py", line 51, in _init_
self.parse_arg(k, v)
File "/Users/zochao/metamole/molecule/molecule/ansible_playbook.py", line 104, in parse_arg
self.cli_pos.append('-' + value)
TypeError: cannot concatenate 'str' and 'bool' objects
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `molecule/ansible_playbook.py`
Content:
```
1 # Copyright (c) 2015 Cisco Systems
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 # THE SOFTWARE.
20
21 from __future__ import print_function
22
23 import os
24 import sys
25
26 import sh
27
28 from utilities import print_stderr
29 from utilities import print_stdout
30
31
32 class AnsiblePlaybook:
33 def __init__(self, args, _env=None, _out=print_stdout, _err=print_stderr):
34 """
35 Sets up requirements for ansible-playbook
36
37 :param args: Dictionary arguments to pass to ansible-playbook
38 :param _env: Environment dictionary to use. os.environ.copy() is used by default
39 :param _out: Function passed to sh for STDOUT
40 :param _err: Function passed to sh for STDERR
41 :return: None
42 """
43 self.cli = {}
44 self.cli_pos = []
45 self.env = _env if _env else os.environ.copy()
46 self.playbook = None
47 self.ansible = None
48
49 # process arguments passed in (typically from molecule.yml's ansible block)
50 for k, v in args.iteritems():
51 self.parse_arg(k, v)
52
53 # defaults can be redefined with call to add_env_arg() before baking
54 self.add_env_arg('PYTHONUNBUFFERED', '1')
55 self.add_env_arg('ANSIBLE_FORCE_COLOR', 'true')
56
57 # passed through to sh, not ansible-playbook
58 self.add_cli_arg('_out', _out)
59 self.add_cli_arg('_err', _err)
60
61 def bake(self):
62 """
63 Bake ansible-playbook command so it's ready to execute.
64
65 :return: None
66 """
67 self.ansible = sh.ansible_playbook.bake(self.playbook, *self.cli_pos, _env=self.env, **self.cli)
68
69 def parse_arg(self, name, value):
70 """
71 Parses argument and adds to CLI or environment
72
73 :param name: Name of argument to be added
74 :param value: Value of argument to be added
75 :return: None
76 """
77 # skip `requirements_file` since it used by ansible-galaxy only
78 if name == 'requirements_file':
79 return
80
81 if name == 'raw_env_vars':
82 for k, v in value.iteritems():
83 self.add_env_arg(k, v)
84 return
85
86 if name == 'host_key_checking':
87 self.add_env_arg('ANSIBLE_HOST_KEY_CHECKING', str(value).lower())
88 return
89
90 if name == 'raw_ssh_args':
91 self.add_env_arg('ANSIBLE_SSH_ARGS', ' '.join(value))
92 return
93
94 if name == 'config_file':
95 self.add_env_arg('ANSIBLE_CONFIG', value)
96 return
97
98 if name == 'playbook':
99 self.playbook = value
100 return
101
102 # verbose is weird, must be -vvvv not verbose=vvvv
103 if name == 'verbose' and value:
104 self.cli_pos.append('-' + value)
105 return
106
107 self.add_cli_arg(name, value)
108
109 def add_cli_arg(self, name, value):
110 """
111 Adds argument to CLI passed to ansible-playbook
112
113 :param name: Name of argument to be added
114 :param value: Value of argument to be added
115 :return: None
116 """
117 if value:
118 self.cli[name] = value
119
120 def remove_cli_arg(self, name):
121 """
122 Removes CLI argument
123
124 :param name: Key name of CLI argument to remove
125 :return: None
126 """
127 self.cli.pop(name, None)
128
129 def add_env_arg(self, name, value):
130 """
131 Adds argument to environment passed to ansible-playbook
132
133 :param name: Name of argument to be added
134 :param value: Value of argument to be added
135 :return: None
136 """
137 self.env[name] = value
138
139 def remove_env_arg(self, name):
140 """
141 Removes environment argument
142
143 :param name: Key name of environment argument to remove
144 :return: None
145 """
146 self.env.pop(name, None)
147
148 def execute(self):
149 """
150 Executes ansible-playbook
151
152 :return: sh.stdout on success, else None
153 :return: None
154 """
155 if self.ansible is None:
156 self.bake()
157
158 try:
159 return self.ansible().stdout
160 except sh.ErrorReturnCode as e:
161 print('ERROR: {}'.format(e))
162 sys.exit(e.exit_code)
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/molecule/ansible_playbook.py b/molecule/ansible_playbook.py
--- a/molecule/ansible_playbook.py
+++ b/molecule/ansible_playbook.py
@@ -101,6 +101,9 @@
# verbose is weird, must be -vvvv not verbose=vvvv
if name == 'verbose' and value:
+ # for cases where someone passes in verbose: True
+ if value is True:
+ value = 'vvvv'
self.cli_pos.append('-' + value)
return
| {"golden_diff": "diff --git a/molecule/ansible_playbook.py b/molecule/ansible_playbook.py\n--- a/molecule/ansible_playbook.py\n+++ b/molecule/ansible_playbook.py\n@@ -101,6 +101,9 @@\n \n # verbose is weird, must be -vvvv not verbose=vvvv\n if name == 'verbose' and value:\n+ # for cases where someone passes in verbose: True\n+ if value is True:\n+ value = 'vvvv'\n self.cli_pos.append('-' + value)\n return\n", "issue": "Verbose in molecule.yml should handle bool True\nhttps://github.com/metacloud/molecule/blob/master/molecule/ansible_playbook.py#L104\n\n```\nclearly string +bool isn't gonna work. And it will have error like\nTraceback (most recent call last):\nFile \"/Users/zochao/.virtualenvs/metamole/bin/molecule\", line 10, in <module>\nsys.exit(main())\nFile \"/Users/zochao/metamole/molecule/molecule/cli.py\", line 66, in main\nCLI().main()\nFile \"/Users/zochao/metamole/molecule/molecule/cli.py\", line 62, in main\nsys.exit(c.execute())\nFile \"/Users/zochao/metamole/molecule/molecule/commands.py\", line 393, in execute\nc.execute()\nFile \"/Users/zochao/metamole/molecule/molecule/commands.py\", line 224, in execute\nansible = AnsiblePlaybook(self.molecule._config.config['ansible'])\nFile \"/Users/zochao/metamole/molecule/molecule/ansible_playbook.py\", line 51, in _init_\nself.parse_arg(k, v)\nFile \"/Users/zochao/metamole/molecule/molecule/ansible_playbook.py\", line 104, in parse_arg\nself.cli_pos.append('-' + value)\nTypeError: cannot concatenate 'str' and 'bool' objects\n```\n\n", "before_files": [{"content": "# Copyright (c) 2015 Cisco Systems\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nimport sh\n\nfrom utilities import print_stderr\nfrom utilities import print_stdout\n\n\nclass AnsiblePlaybook:\n def __init__(self, args, _env=None, _out=print_stdout, _err=print_stderr):\n \"\"\"\n Sets up requirements for ansible-playbook\n\n :param args: Dictionary arguments to pass to ansible-playbook\n :param _env: Environment dictionary to use. os.environ.copy() is used by default\n :param _out: Function passed to sh for STDOUT\n :param _err: Function passed to sh for STDERR\n :return: None\n \"\"\"\n self.cli = {}\n self.cli_pos = []\n self.env = _env if _env else os.environ.copy()\n self.playbook = None\n self.ansible = None\n\n # process arguments passed in (typically from molecule.yml's ansible block)\n for k, v in args.iteritems():\n self.parse_arg(k, v)\n\n # defaults can be redefined with call to add_env_arg() before baking\n self.add_env_arg('PYTHONUNBUFFERED', '1')\n self.add_env_arg('ANSIBLE_FORCE_COLOR', 'true')\n\n # passed through to sh, not ansible-playbook\n self.add_cli_arg('_out', _out)\n self.add_cli_arg('_err', _err)\n\n def bake(self):\n \"\"\"\n Bake ansible-playbook command so it's ready to execute.\n\n :return: None\n \"\"\"\n self.ansible = sh.ansible_playbook.bake(self.playbook, *self.cli_pos, _env=self.env, **self.cli)\n\n def parse_arg(self, name, value):\n \"\"\"\n Parses argument and adds to CLI or environment\n\n :param name: Name of argument to be added\n :param value: Value of argument to be added\n :return: None\n \"\"\"\n # skip `requirements_file` since it used by ansible-galaxy only\n if name == 'requirements_file':\n return\n\n if name == 'raw_env_vars':\n for k, v in value.iteritems():\n self.add_env_arg(k, v)\n return\n\n if name == 'host_key_checking':\n self.add_env_arg('ANSIBLE_HOST_KEY_CHECKING', str(value).lower())\n return\n\n if name == 'raw_ssh_args':\n self.add_env_arg('ANSIBLE_SSH_ARGS', ' '.join(value))\n return\n\n if name == 'config_file':\n self.add_env_arg('ANSIBLE_CONFIG', value)\n return\n\n if name == 'playbook':\n self.playbook = value\n return\n\n # verbose is weird, must be -vvvv not verbose=vvvv\n if name == 'verbose' and value:\n self.cli_pos.append('-' + value)\n return\n\n self.add_cli_arg(name, value)\n\n def add_cli_arg(self, name, value):\n \"\"\"\n Adds argument to CLI passed to ansible-playbook\n\n :param name: Name of argument to be added\n :param value: Value of argument to be added\n :return: None\n \"\"\"\n if value:\n self.cli[name] = value\n\n def remove_cli_arg(self, name):\n \"\"\"\n Removes CLI argument\n\n :param name: Key name of CLI argument to remove\n :return: None\n \"\"\"\n self.cli.pop(name, None)\n\n def add_env_arg(self, name, value):\n \"\"\"\n Adds argument to environment passed to ansible-playbook\n\n :param name: Name of argument to be added\n :param value: Value of argument to be added\n :return: None\n \"\"\"\n self.env[name] = value\n\n def remove_env_arg(self, name):\n \"\"\"\n Removes environment argument\n\n :param name: Key name of environment argument to remove\n :return: None\n \"\"\"\n self.env.pop(name, None)\n\n def execute(self):\n \"\"\"\n Executes ansible-playbook\n\n :return: sh.stdout on success, else None\n :return: None\n \"\"\"\n if self.ansible is None:\n self.bake()\n\n try:\n return self.ansible().stdout\n except sh.ErrorReturnCode as e:\n print('ERROR: {}'.format(e))\n sys.exit(e.exit_code)\n", "path": "molecule/ansible_playbook.py"}], "after_files": [{"content": "# Copyright (c) 2015 Cisco Systems\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nimport sh\n\nfrom utilities import print_stderr\nfrom utilities import print_stdout\n\n\nclass AnsiblePlaybook:\n def __init__(self, args, _env=None, _out=print_stdout, _err=print_stderr):\n \"\"\"\n Sets up requirements for ansible-playbook\n\n :param args: Dictionary arguments to pass to ansible-playbook\n :param _env: Environment dictionary to use. os.environ.copy() is used by default\n :param _out: Function passed to sh for STDOUT\n :param _err: Function passed to sh for STDERR\n :return: None\n \"\"\"\n self.cli = {}\n self.cli_pos = []\n self.env = _env if _env else os.environ.copy()\n self.playbook = None\n self.ansible = None\n\n # process arguments passed in (typically from molecule.yml's ansible block)\n for k, v in args.iteritems():\n self.parse_arg(k, v)\n\n # defaults can be redefined with call to add_env_arg() before baking\n self.add_env_arg('PYTHONUNBUFFERED', '1')\n self.add_env_arg('ANSIBLE_FORCE_COLOR', 'true')\n\n # passed through to sh, not ansible-playbook\n self.add_cli_arg('_out', _out)\n self.add_cli_arg('_err', _err)\n\n def bake(self):\n \"\"\"\n Bake ansible-playbook command so it's ready to execute.\n\n :return: None\n \"\"\"\n self.ansible = sh.ansible_playbook.bake(self.playbook, *self.cli_pos, _env=self.env, **self.cli)\n\n def parse_arg(self, name, value):\n \"\"\"\n Parses argument and adds to CLI or environment\n\n :param name: Name of argument to be added\n :param value: Value of argument to be added\n :return: None\n \"\"\"\n # skip `requirements_file` since it used by ansible-galaxy only\n if name == 'requirements_file':\n return\n\n if name == 'raw_env_vars':\n for k, v in value.iteritems():\n self.add_env_arg(k, v)\n return\n\n if name == 'host_key_checking':\n self.add_env_arg('ANSIBLE_HOST_KEY_CHECKING', str(value).lower())\n return\n\n if name == 'raw_ssh_args':\n self.add_env_arg('ANSIBLE_SSH_ARGS', ' '.join(value))\n return\n\n if name == 'config_file':\n self.add_env_arg('ANSIBLE_CONFIG', value)\n return\n\n if name == 'playbook':\n self.playbook = value\n return\n\n # verbose is weird, must be -vvvv not verbose=vvvv\n if name == 'verbose' and value:\n # for cases where someone passes in verbose: True\n if value is True:\n value = 'vvvv'\n self.cli_pos.append('-' + value)\n return\n\n self.add_cli_arg(name, value)\n\n def add_cli_arg(self, name, value):\n \"\"\"\n Adds argument to CLI passed to ansible-playbook\n\n :param name: Name of argument to be added\n :param value: Value of argument to be added\n :return: None\n \"\"\"\n if value:\n self.cli[name] = value\n\n def remove_cli_arg(self, name):\n \"\"\"\n Removes CLI argument\n\n :param name: Key name of CLI argument to remove\n :return: None\n \"\"\"\n self.cli.pop(name, None)\n\n def add_env_arg(self, name, value):\n \"\"\"\n Adds argument to environment passed to ansible-playbook\n\n :param name: Name of argument to be added\n :param value: Value of argument to be added\n :return: None\n \"\"\"\n self.env[name] = value\n\n def remove_env_arg(self, name):\n \"\"\"\n Removes environment argument\n\n :param name: Key name of environment argument to remove\n :return: None\n \"\"\"\n self.env.pop(name, None)\n\n def execute(self):\n \"\"\"\n Executes ansible-playbook\n\n :return: sh.stdout on success, else None\n :return: None\n \"\"\"\n if self.ansible is None:\n self.bake()\n\n try:\n return self.ansible().stdout\n except sh.ErrorReturnCode as e:\n print('ERROR: {}'.format(e))\n sys.exit(e.exit_code)\n", "path": "molecule/ansible_playbook.py"}]} | 2,140 | 123 |
gh_patches_debug_568 | rasdani/github-patches | git_diff | pex-tool__pex-836 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.0
On the docket:
The prime motivator:
+ [x] Pex does not download foreign abi3 wheels correctly #823
Changes to support the above as well as others:
+ [x] Fix pex resolving for foreign platforms. #835
+ [x] Use pypa/packaging. #831
+ [x] Upgrade vendored setuptools to 42.0.2. #832
+ [x] De-vendor pex just once per version. #833
+ [x] Support VCS urls for vendoring. #834
+ [x] Support python 3.8 in CI. #829
+ [x] Fix pex resolution to respect --ignore-errors. #828
+ [x] Kill `pkg_resources` finders monkey-patching. #827
+ [x] Use flit to distribute pex. #826
+ [x] Cleanup extras_require. #825
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.0.3'
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.0.3'
+__version__ = '2.1.0'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.0.3'\n+__version__ = '2.1.0'\n", "issue": "Release 2.1.0\nOn the docket:\r\n\r\nThe prime motivator:\r\n+ [x] Pex does not download foreign abi3 wheels correctly #823\r\n\r\nChanges to support the above as well as others:\r\n+ [x] Fix pex resolving for foreign platforms. #835 \r\n+ [x] Use pypa/packaging. #831\r\n+ [x] Upgrade vendored setuptools to 42.0.2. #832\r\n+ [x] De-vendor pex just once per version. #833\r\n+ [x] Support VCS urls for vendoring. #834\r\n+ [x] Support python 3.8 in CI. #829\r\n+ [x] Fix pex resolution to respect --ignore-errors. #828\r\n+ [x] Kill `pkg_resources` finders monkey-patching. #827\r\n+ [x] Use flit to distribute pex. #826\r\n+ [x] Cleanup extras_require. #825\r\n\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.0.3'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.0'\n", "path": "pex/version.py"}]} | 534 | 94 |
gh_patches_debug_16389 | rasdani/github-patches | git_diff | facebookresearch__hydra-1968 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature Request] submitit: add `stderr_to_stdout` ass supported by submitit
# 🚀 Feature Request
`stderr_to_stdout` option was added in submitit (not the plugin) [last year](https://github.com/facebookincubator/submitit/pull/1611) but cannot currently be given from submitit plugin.
We should add support for passing this `stderr_to_stdout` via the plugin as well.
cc @Jasha10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/hydra_submitit_launcher/setup.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 # type: ignore
3 from pathlib import Path
4
5 from read_version import read_version
6 from setuptools import find_namespace_packages, setup
7
8 setup(
9 name="hydra-submitit-launcher",
10 version=read_version("hydra_plugins/hydra_submitit_launcher", "__init__.py"),
11 author="Jeremy Rapin, Jieru Hu, Omry Yadan",
12 author_email="[email protected], [email protected], [email protected]",
13 description="Submitit Launcher for Hydra apps",
14 long_description=(Path(__file__).parent / "README.md").read_text(),
15 long_description_content_type="text/markdown",
16 url="https://github.com/facebookincubator/submitit",
17 packages=find_namespace_packages(include=["hydra_plugins.*"]),
18 classifiers=[
19 "License :: OSI Approved :: MIT License",
20 "Programming Language :: Python :: 3.7",
21 "Programming Language :: Python :: 3.8",
22 "Programming Language :: Python :: 3.9",
23 "Operating System :: MacOS",
24 "Operating System :: POSIX :: Linux",
25 "Development Status :: 4 - Beta",
26 ],
27 install_requires=[
28 "hydra-core>=1.1.0.dev7",
29 "submitit>=1.0.0",
30 ],
31 include_package_data=True,
32 )
33
```
Path: `plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 from dataclasses import dataclass, field
3 from typing import Any, Dict, List, Optional
4
5 from hydra.core.config_store import ConfigStore
6
7
8 @dataclass
9 class BaseQueueConf:
10 """Configuration shared by all executors"""
11
12 submitit_folder: str = "${hydra.sweep.dir}/.submitit/%j"
13
14 # maximum time for the job in minutes
15 timeout_min: int = 60
16 # number of cpus to use for each task
17 cpus_per_task: Optional[int] = None
18 # number of gpus to use on each node
19 gpus_per_node: Optional[int] = None
20 # number of tasks to spawn on each node
21 tasks_per_node: int = 1
22 # memory to reserve for the job on each node (in GB)
23 mem_gb: Optional[int] = None
24 # number of nodes to use for the job
25 nodes: int = 1
26 # name of the job
27 name: str = "${hydra.job.name}"
28
29
30 @dataclass
31 class SlurmQueueConf(BaseQueueConf):
32 """Slurm configuration overrides and specific parameters"""
33
34 _target_: str = (
35 "hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher"
36 )
37
38 # Params are used to configure sbatch, for more info check:
39 # https://github.com/facebookincubator/submitit/blob/master/submitit/slurm/slurm.py
40
41 # Following parameters are slurm specific
42 # More information: https://slurm.schedmd.com/sbatch.html
43 #
44 # slurm partition to use on the cluster
45 partition: Optional[str] = None
46 qos: Optional[str] = None
47 comment: Optional[str] = None
48 constraint: Optional[str] = None
49 exclude: Optional[str] = None
50 gres: Optional[str] = None
51 cpus_per_gpu: Optional[int] = None
52 gpus_per_task: Optional[int] = None
53 mem_per_gpu: Optional[str] = None
54 mem_per_cpu: Optional[str] = None
55
56 # Following parameters are submitit specifics
57 #
58 # USR1 signal delay before timeout
59 signal_delay_s: int = 120
60 # Maximum number of retries on job timeout.
61 # Change this only after you confirmed your code can handle re-submission
62 # by properly resuming from the latest stored checkpoint.
63 # check the following for more info on slurm_max_num_timeout
64 # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md
65 max_num_timeout: int = 0
66 # Useful to add parameters which are not currently available in the plugin.
67 # Eg: {"mail-user": "[email protected]", "mail-type": "BEGIN"}
68 additional_parameters: Dict[str, Any] = field(default_factory=dict)
69 # Maximum number of jobs running in parallel
70 array_parallelism: int = 256
71 # A list of commands to run in sbatch befure running srun
72 setup: Optional[List[str]] = None
73
74
75 @dataclass
76 class LocalQueueConf(BaseQueueConf):
77 _target_: str = (
78 "hydra_plugins.hydra_submitit_launcher.submitit_launcher.LocalLauncher"
79 )
80
81
82 # finally, register two different choices:
83 ConfigStore.instance().store(
84 group="hydra/launcher",
85 name="submitit_local",
86 node=LocalQueueConf(),
87 provider="submitit_launcher",
88 )
89
90
91 ConfigStore.instance().store(
92 group="hydra/launcher",
93 name="submitit_slurm",
94 node=SlurmQueueConf(),
95 provider="submitit_launcher",
96 )
97
```
Path: `plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/__init__.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
3 __version__ = "1.2.0dev1"
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/__init__.py b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/__init__.py
--- a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/__init__.py
+++ b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/__init__.py
@@ -1,3 +1,3 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-__version__ = "1.2.0dev1"
+__version__ = "1.2.0dev2"
diff --git a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py
--- a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py
+++ b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py
@@ -25,6 +25,8 @@
nodes: int = 1
# name of the job
name: str = "${hydra.job.name}"
+ # redirect stderr to stdout
+ stderr_to_stdout: bool = False
@dataclass
diff --git a/plugins/hydra_submitit_launcher/setup.py b/plugins/hydra_submitit_launcher/setup.py
--- a/plugins/hydra_submitit_launcher/setup.py
+++ b/plugins/hydra_submitit_launcher/setup.py
@@ -26,7 +26,7 @@
],
install_requires=[
"hydra-core>=1.1.0.dev7",
- "submitit>=1.0.0",
+ "submitit>=1.3.3",
],
include_package_data=True,
)
| {"golden_diff": "diff --git a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/__init__.py b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/__init__.py\n--- a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/__init__.py\n+++ b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/__init__.py\n@@ -1,3 +1,3 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n \n-__version__ = \"1.2.0dev1\"\n+__version__ = \"1.2.0dev2\"\ndiff --git a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py\n--- a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py\n+++ b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py\n@@ -25,6 +25,8 @@\n nodes: int = 1\n # name of the job\n name: str = \"${hydra.job.name}\"\n+ # redirect stderr to stdout\n+ stderr_to_stdout: bool = False\n \n \n @dataclass\ndiff --git a/plugins/hydra_submitit_launcher/setup.py b/plugins/hydra_submitit_launcher/setup.py\n--- a/plugins/hydra_submitit_launcher/setup.py\n+++ b/plugins/hydra_submitit_launcher/setup.py\n@@ -26,7 +26,7 @@\n ],\n install_requires=[\n \"hydra-core>=1.1.0.dev7\",\n- \"submitit>=1.0.0\",\n+ \"submitit>=1.3.3\",\n ],\n include_package_data=True,\n )\n", "issue": "[Feature Request] submitit: add `stderr_to_stdout` ass supported by submitit\n# \ud83d\ude80 Feature Request\r\n\r\n`stderr_to_stdout` option was added in submitit (not the plugin) [last year](https://github.com/facebookincubator/submitit/pull/1611) but cannot currently be given from submitit plugin. \r\n\r\nWe should add support for passing this `stderr_to_stdout` via the plugin as well.\r\n\r\ncc @Jasha10 \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom pathlib import Path\n\nfrom read_version import read_version\nfrom setuptools import find_namespace_packages, setup\n\nsetup(\n name=\"hydra-submitit-launcher\",\n version=read_version(\"hydra_plugins/hydra_submitit_launcher\", \"__init__.py\"),\n author=\"Jeremy Rapin, Jieru Hu, Omry Yadan\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Submitit Launcher for Hydra apps\",\n long_description=(Path(__file__).parent / \"README.md\").read_text(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookincubator/submitit\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX :: Linux\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\n \"hydra-core>=1.1.0.dev7\",\n \"submitit>=1.0.0\",\n ],\n include_package_data=True,\n)\n", "path": "plugins/hydra_submitit_launcher/setup.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, List, Optional\n\nfrom hydra.core.config_store import ConfigStore\n\n\n@dataclass\nclass BaseQueueConf:\n \"\"\"Configuration shared by all executors\"\"\"\n\n submitit_folder: str = \"${hydra.sweep.dir}/.submitit/%j\"\n\n # maximum time for the job in minutes\n timeout_min: int = 60\n # number of cpus to use for each task\n cpus_per_task: Optional[int] = None\n # number of gpus to use on each node\n gpus_per_node: Optional[int] = None\n # number of tasks to spawn on each node\n tasks_per_node: int = 1\n # memory to reserve for the job on each node (in GB)\n mem_gb: Optional[int] = None\n # number of nodes to use for the job\n nodes: int = 1\n # name of the job\n name: str = \"${hydra.job.name}\"\n\n\n@dataclass\nclass SlurmQueueConf(BaseQueueConf):\n \"\"\"Slurm configuration overrides and specific parameters\"\"\"\n\n _target_: str = (\n \"hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher\"\n )\n\n # Params are used to configure sbatch, for more info check:\n # https://github.com/facebookincubator/submitit/blob/master/submitit/slurm/slurm.py\n\n # Following parameters are slurm specific\n # More information: https://slurm.schedmd.com/sbatch.html\n #\n # slurm partition to use on the cluster\n partition: Optional[str] = None\n qos: Optional[str] = None\n comment: Optional[str] = None\n constraint: Optional[str] = None\n exclude: Optional[str] = None\n gres: Optional[str] = None\n cpus_per_gpu: Optional[int] = None\n gpus_per_task: Optional[int] = None\n mem_per_gpu: Optional[str] = None\n mem_per_cpu: Optional[str] = None\n\n # Following parameters are submitit specifics\n #\n # USR1 signal delay before timeout\n signal_delay_s: int = 120\n # Maximum number of retries on job timeout.\n # Change this only after you confirmed your code can handle re-submission\n # by properly resuming from the latest stored checkpoint.\n # check the following for more info on slurm_max_num_timeout\n # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md\n max_num_timeout: int = 0\n # Useful to add parameters which are not currently available in the plugin.\n # Eg: {\"mail-user\": \"[email protected]\", \"mail-type\": \"BEGIN\"}\n additional_parameters: Dict[str, Any] = field(default_factory=dict)\n # Maximum number of jobs running in parallel\n array_parallelism: int = 256\n # A list of commands to run in sbatch befure running srun\n setup: Optional[List[str]] = None\n\n\n@dataclass\nclass LocalQueueConf(BaseQueueConf):\n _target_: str = (\n \"hydra_plugins.hydra_submitit_launcher.submitit_launcher.LocalLauncher\"\n )\n\n\n# finally, register two different choices:\nConfigStore.instance().store(\n group=\"hydra/launcher\",\n name=\"submitit_local\",\n node=LocalQueueConf(),\n provider=\"submitit_launcher\",\n)\n\n\nConfigStore.instance().store(\n group=\"hydra/launcher\",\n name=\"submitit_slurm\",\n node=SlurmQueueConf(),\n provider=\"submitit_launcher\",\n)\n", "path": "plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n__version__ = \"1.2.0dev1\"\n", "path": "plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/__init__.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom pathlib import Path\n\nfrom read_version import read_version\nfrom setuptools import find_namespace_packages, setup\n\nsetup(\n name=\"hydra-submitit-launcher\",\n version=read_version(\"hydra_plugins/hydra_submitit_launcher\", \"__init__.py\"),\n author=\"Jeremy Rapin, Jieru Hu, Omry Yadan\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Submitit Launcher for Hydra apps\",\n long_description=(Path(__file__).parent / \"README.md\").read_text(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookincubator/submitit\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX :: Linux\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\n \"hydra-core>=1.1.0.dev7\",\n \"submitit>=1.3.3\",\n ],\n include_package_data=True,\n)\n", "path": "plugins/hydra_submitit_launcher/setup.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, List, Optional\n\nfrom hydra.core.config_store import ConfigStore\n\n\n@dataclass\nclass BaseQueueConf:\n \"\"\"Configuration shared by all executors\"\"\"\n\n submitit_folder: str = \"${hydra.sweep.dir}/.submitit/%j\"\n\n # maximum time for the job in minutes\n timeout_min: int = 60\n # number of cpus to use for each task\n cpus_per_task: Optional[int] = None\n # number of gpus to use on each node\n gpus_per_node: Optional[int] = None\n # number of tasks to spawn on each node\n tasks_per_node: int = 1\n # memory to reserve for the job on each node (in GB)\n mem_gb: Optional[int] = None\n # number of nodes to use for the job\n nodes: int = 1\n # name of the job\n name: str = \"${hydra.job.name}\"\n # redirect stderr to stdout\n stderr_to_stdout: bool = False\n\n\n@dataclass\nclass SlurmQueueConf(BaseQueueConf):\n \"\"\"Slurm configuration overrides and specific parameters\"\"\"\n\n _target_: str = (\n \"hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher\"\n )\n\n # Params are used to configure sbatch, for more info check:\n # https://github.com/facebookincubator/submitit/blob/master/submitit/slurm/slurm.py\n\n # Following parameters are slurm specific\n # More information: https://slurm.schedmd.com/sbatch.html\n #\n # slurm partition to use on the cluster\n partition: Optional[str] = None\n qos: Optional[str] = None\n comment: Optional[str] = None\n constraint: Optional[str] = None\n exclude: Optional[str] = None\n gres: Optional[str] = None\n cpus_per_gpu: Optional[int] = None\n gpus_per_task: Optional[int] = None\n mem_per_gpu: Optional[str] = None\n mem_per_cpu: Optional[str] = None\n\n # Following parameters are submitit specifics\n #\n # USR1 signal delay before timeout\n signal_delay_s: int = 120\n # Maximum number of retries on job timeout.\n # Change this only after you confirmed your code can handle re-submission\n # by properly resuming from the latest stored checkpoint.\n # check the following for more info on slurm_max_num_timeout\n # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md\n max_num_timeout: int = 0\n # Useful to add parameters which are not currently available in the plugin.\n # Eg: {\"mail-user\": \"[email protected]\", \"mail-type\": \"BEGIN\"}\n additional_parameters: Dict[str, Any] = field(default_factory=dict)\n # Maximum number of jobs running in parallel\n array_parallelism: int = 256\n # A list of commands to run in sbatch befure running srun\n setup: Optional[List[str]] = None\n\n\n@dataclass\nclass LocalQueueConf(BaseQueueConf):\n _target_: str = (\n \"hydra_plugins.hydra_submitit_launcher.submitit_launcher.LocalLauncher\"\n )\n\n\n# finally, register two different choices:\nConfigStore.instance().store(\n group=\"hydra/launcher\",\n name=\"submitit_local\",\n node=LocalQueueConf(),\n provider=\"submitit_launcher\",\n)\n\n\nConfigStore.instance().store(\n group=\"hydra/launcher\",\n name=\"submitit_slurm\",\n node=SlurmQueueConf(),\n provider=\"submitit_launcher\",\n)\n", "path": "plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n__version__ = \"1.2.0dev2\"\n", "path": "plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/__init__.py"}]} | 1,822 | 413 |
gh_patches_debug_5819 | rasdani/github-patches | git_diff | aio-libs__aiohttp-5176 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tuple index out of range with 3.7.1 and no IPv6
🐞 **Describe the bug**
After upgrading to aiohttp==3.7.1, dns resolution fails with an exception.
💡 **To Reproduce**
<!-- How to reproduce the behavior?
For example:
1. Have certain environment
2. Run given code snippet in a certain way
3. See some behavior described
Add these steps below this comment: -->
1. I believe this is due to not have IPV6 enabled in my kernel, which is custom compiled with.
`CONFIG_IPV6=n`
2. Run:
```python
import asyncio
import aiohttp
async def main():
async with aiohttp.ClientSession() as session:
async with session.get('http://python.org') as response:
print(response.status)
asyncio.run(main())
```
3. Actual behavior
```python-traceback
$ python bug.py
Traceback (most recent call last):
File "bug.py", line 11, in <module>
asyncio.run(main())
File "/usr/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib/python3.7/asyncio/base_events.py", line 587, in run_until_complete
return future.result()
File "bug.py", line 7, in main
async with session.get('http://python.org') as response:
File "/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/client.py", line 1124, in __aenter__
self._resp = await self._coro
File "/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/client.py", line 528, in _request
req, traces=traces, timeout=real_timeout
File "/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/connector.py", line 541, in connect
proto = await self._create_connection(req, traces, timeout)
File "/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/connector.py", line 898, in _create_connection
_, proto = await self._create_direct_connection(req, traces, timeout)
File "/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/connector.py", line 1005, in _create_direct_connection
hosts = await asyncio.shield(host_resolved)
File "/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/connector.py", line 871, in _resolve_host
addrs = await self._resolver.resolve(host, port, family=self._family)
File "/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/resolver.py", line 37, in resolve
if family == socket.AF_INET6 and address[3]: # type: ignore
IndexError: tuple index out of range
```
💡 **Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
Should print `200`.
📋 **Logs/tracebacks**
<!-- If applicable, add logs/tracebacks to help explain your problem. -->
```python-traceback (paste your traceback in the next line)
```
📋 **Your version of the Python**
<!-- Attach your version of the Python. -->
```console
$ python --version
Python 3.7.9
```
📋 **Your version of the aiohttp/yarl/multidict distributions**
<!-- Attach your version of the distributions in the code blocks below. -->
```console
$ python -m pip show aiohttp
Name: aiohttp
Version: 3.7.1
Summary: Async http client/server framework (asyncio)
Home-page: https://github.com/aio-libs/aiohttp
Author: Nikolay Kim
Author-email: [email protected]
License: Apache 2
Location: /home/dbn/usr/py37/lib/python3.7/site-packages
Requires: multidict, async-timeout, typing-extensions, yarl, chardet, attrs
Required-by: replay-proxy
```
```console
$ python -m pip show multidict
Name: multidict
Version: 4.7.6
Summary: multidict implementation
Home-page: https://github.com/aio-libs/multidict
Author: Andrew Svetlov
Author-email: [email protected]
License: Apache 2
Location: /home/dbn/usr/py37/lib/python3.7/site-packages
Requires:
Required-by: yarl, aiohttp
```
```console
$ python -m pip show yarl
Name: yarl
Version: 1.5.1
Summary: Yet another URL library
Home-page: https://github.com/aio-libs/yarl/
Author: Andrew Svetlov
Author-email: [email protected]
License: Apache 2
Location: /home/dbn/usr/py37/lib/python3.7/site-packages
Requires: typing-extensions, multidict, idna
Required-by: aiohttp, replay-proxy
```
📋 **Additional context**
<!-- Add any other context about the problem here, in the next line. -->
<!-- Describe the environment you have that lead to your issue.
This includes aiohttp version, OS, proxy server and other bits that
are related to your case.
IMPORTANT: aiohttp is both server framework and client library.
For getting rid of confusing please put 'server', 'client' or 'both'
word here.
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aiohttp/resolver.py`
Content:
```
1 import socket
2 from typing import Any, Dict, List
3
4 from .abc import AbstractResolver
5 from .helpers import get_running_loop
6
7 __all__ = ("ThreadedResolver", "AsyncResolver", "DefaultResolver")
8
9 try:
10 import aiodns
11
12 # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')
13 except ImportError: # pragma: no cover
14 aiodns = None
15
16 aiodns_default = False
17
18
19 class ThreadedResolver(AbstractResolver):
20 """Use Executor for synchronous getaddrinfo() calls, which defaults to
21 concurrent.futures.ThreadPoolExecutor.
22 """
23
24 def __init__(self) -> None:
25 self._loop = get_running_loop()
26
27 async def resolve(
28 self, hostname: str, port: int = 0, family: int = socket.AF_INET
29 ) -> List[Dict[str, Any]]:
30 infos = await self._loop.getaddrinfo(
31 hostname, port, type=socket.SOCK_STREAM, family=family
32 )
33
34 hosts = []
35 for family, _, proto, _, address in infos:
36 if family == socket.AF_INET6 and address[3]: # type: ignore
37 # This is essential for link-local IPv6 addresses.
38 # LL IPv6 is a VERY rare case. Strictly speaking, we should use
39 # getnameinfo() unconditionally, but performance makes sense.
40 host, _port = socket.getnameinfo(
41 address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV
42 )
43 port = int(_port)
44 else:
45 host, port = address[:2]
46 hosts.append(
47 {
48 "hostname": hostname,
49 "host": host,
50 "port": port,
51 "family": family,
52 "proto": proto,
53 "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
54 }
55 )
56
57 return hosts
58
59 async def close(self) -> None:
60 pass
61
62
63 class AsyncResolver(AbstractResolver):
64 """Use the `aiodns` package to make asynchronous DNS lookups"""
65
66 def __init__(self, *args: Any, **kwargs: Any) -> None:
67 if aiodns is None:
68 raise RuntimeError("Resolver requires aiodns library")
69
70 self._loop = get_running_loop()
71 self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)
72
73 async def resolve(
74 self, host: str, port: int = 0, family: int = socket.AF_INET
75 ) -> List[Dict[str, Any]]:
76 try:
77 resp = await self._resolver.gethostbyname(host, family)
78 except aiodns.error.DNSError as exc:
79 msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
80 raise OSError(msg) from exc
81 hosts = []
82 for address in resp.addresses:
83 hosts.append(
84 {
85 "hostname": host,
86 "host": address,
87 "port": port,
88 "family": family,
89 "proto": 0,
90 "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
91 }
92 )
93
94 if not hosts:
95 raise OSError("DNS lookup failed")
96
97 return hosts
98
99 async def close(self) -> None:
100 return self._resolver.cancel()
101
102
103 DefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py
--- a/aiohttp/resolver.py
+++ b/aiohttp/resolver.py
@@ -28,7 +28,11 @@
self, hostname: str, port: int = 0, family: int = socket.AF_INET
) -> List[Dict[str, Any]]:
infos = await self._loop.getaddrinfo(
- hostname, port, type=socket.SOCK_STREAM, family=family
+ hostname,
+ port,
+ type=socket.SOCK_STREAM,
+ family=family,
+ flags=socket.AI_ADDRCONFIG,
)
hosts = []
| {"golden_diff": "diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py\n--- a/aiohttp/resolver.py\n+++ b/aiohttp/resolver.py\n@@ -28,7 +28,11 @@\n self, hostname: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n- hostname, port, type=socket.SOCK_STREAM, family=family\n+ hostname,\n+ port,\n+ type=socket.SOCK_STREAM,\n+ family=family,\n+ flags=socket.AI_ADDRCONFIG,\n )\n \n hosts = []\n", "issue": "Tuple index out of range with 3.7.1 and no IPv6\n\ud83d\udc1e **Describe the bug**\r\nAfter upgrading to aiohttp==3.7.1, dns resolution fails with an exception.\r\n\r\n\r\n\ud83d\udca1 **To Reproduce**\r\n<!-- How to reproduce the behavior?\r\n\r\nFor example:\r\n1. Have certain environment\r\n2. Run given code snippet in a certain way\r\n3. See some behavior described\r\n\r\nAdd these steps below this comment: -->\r\n\r\n1. I believe this is due to not have IPV6 enabled in my kernel, which is custom compiled with.\r\n `CONFIG_IPV6=n`\r\n2. Run:\r\n ```python\r\n import asyncio\r\n import aiohttp\r\n\r\n\r\n async def main():\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get('http://python.org') as response:\r\n print(response.status)\r\n\r\n\r\n asyncio.run(main())\r\n ```\r\n3. Actual behavior\r\n ```python-traceback\r\n $ python bug.py\r\n Traceback (most recent call last):\r\n File \"bug.py\", line 11, in <module>\r\n asyncio.run(main())\r\n File \"/usr/lib/python3.7/asyncio/runners.py\", line 43, in run\r\n return loop.run_until_complete(main)\r\n File \"/usr/lib/python3.7/asyncio/base_events.py\", line 587, in run_until_complete\r\n return future.result()\r\n File \"bug.py\", line 7, in main\r\n async with session.get('http://python.org') as response:\r\n File \"/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/client.py\", line 1124, in __aenter__\r\n self._resp = await self._coro\r\n File \"/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/client.py\", line 528, in _request\r\n req, traces=traces, timeout=real_timeout\r\n File \"/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/connector.py\", line 541, in connect\r\n proto = await self._create_connection(req, traces, timeout)\r\n File \"/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/connector.py\", line 898, in _create_connection\r\n _, proto = await self._create_direct_connection(req, traces, timeout)\r\n File \"/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/connector.py\", line 1005, in _create_direct_connection\r\n hosts = await asyncio.shield(host_resolved)\r\n File \"/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/connector.py\", line 871, in _resolve_host\r\n addrs = await self._resolver.resolve(host, port, family=self._family)\r\n File \"/home/dbn/usr/py37/lib/python3.7/site-packages/aiohttp/resolver.py\", line 37, in resolve\r\n if family == socket.AF_INET6 and address[3]: # type: ignore\r\n IndexError: tuple index out of range\r\n ```\r\n\r\n\r\n\ud83d\udca1 **Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nShould print `200`.\r\n\r\n\r\n\ud83d\udccb **Logs/tracebacks**\r\n<!-- If applicable, add logs/tracebacks to help explain your problem. -->\r\n```python-traceback (paste your traceback in the next line)\r\n\r\n```\r\n\r\n\ud83d\udccb **Your version of the Python**\r\n<!-- Attach your version of the Python. -->\r\n```console\r\n$ python --version\r\nPython 3.7.9\r\n```\r\n\r\n\ud83d\udccb **Your version of the aiohttp/yarl/multidict distributions**\r\n<!-- Attach your version of the distributions in the code blocks below. -->\r\n```console\r\n$ python -m pip show aiohttp\r\nName: aiohttp\r\nVersion: 3.7.1\r\nSummary: Async http client/server framework (asyncio)\r\nHome-page: https://github.com/aio-libs/aiohttp\r\nAuthor: Nikolay Kim\r\nAuthor-email: [email protected]\r\nLicense: Apache 2\r\nLocation: /home/dbn/usr/py37/lib/python3.7/site-packages\r\nRequires: multidict, async-timeout, typing-extensions, yarl, chardet, attrs\r\nRequired-by: replay-proxy\r\n```\r\n```console\r\n$ python -m pip show multidict\r\nName: multidict\r\nVersion: 4.7.6\r\nSummary: multidict implementation\r\nHome-page: https://github.com/aio-libs/multidict\r\nAuthor: Andrew Svetlov\r\nAuthor-email: [email protected]\r\nLicense: Apache 2\r\nLocation: /home/dbn/usr/py37/lib/python3.7/site-packages\r\nRequires:\r\nRequired-by: yarl, aiohttp\r\n```\r\n```console\r\n$ python -m pip show yarl\r\nName: yarl\r\nVersion: 1.5.1\r\nSummary: Yet another URL library\r\nHome-page: https://github.com/aio-libs/yarl/\r\nAuthor: Andrew Svetlov\r\nAuthor-email: [email protected]\r\nLicense: Apache 2\r\nLocation: /home/dbn/usr/py37/lib/python3.7/site-packages\r\nRequires: typing-extensions, multidict, idna\r\nRequired-by: aiohttp, replay-proxy\r\n```\r\n\r\n\ud83d\udccb **Additional context**\r\n<!-- Add any other context about the problem here, in the next line. -->\r\n\r\n<!-- Describe the environment you have that lead to your issue.\r\n This includes aiohttp version, OS, proxy server and other bits that\r\n are related to your case.\r\n\r\n IMPORTANT: aiohttp is both server framework and client library.\r\n For getting rid of confusing please put 'server', 'client' or 'both'\r\n word here.\r\n -->\r\n\n", "before_files": [{"content": "import socket\nfrom typing import Any, Dict, List\n\nfrom .abc import AbstractResolver\nfrom .helpers import get_running_loop\n\n__all__ = (\"ThreadedResolver\", \"AsyncResolver\", \"DefaultResolver\")\n\ntry:\n import aiodns\n\n # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')\nexcept ImportError: # pragma: no cover\n aiodns = None\n\naiodns_default = False\n\n\nclass ThreadedResolver(AbstractResolver):\n \"\"\"Use Executor for synchronous getaddrinfo() calls, which defaults to\n concurrent.futures.ThreadPoolExecutor.\n \"\"\"\n\n def __init__(self) -> None:\n self._loop = get_running_loop()\n\n async def resolve(\n self, hostname: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n hostname, port, type=socket.SOCK_STREAM, family=family\n )\n\n hosts = []\n for family, _, proto, _, address in infos:\n if family == socket.AF_INET6 and address[3]: # type: ignore\n # This is essential for link-local IPv6 addresses.\n # LL IPv6 is a VERY rare case. Strictly speaking, we should use\n # getnameinfo() unconditionally, but performance makes sense.\n host, _port = socket.getnameinfo(\n address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV\n )\n port = int(_port)\n else:\n host, port = address[:2]\n hosts.append(\n {\n \"hostname\": hostname,\n \"host\": host,\n \"port\": port,\n \"family\": family,\n \"proto\": proto,\n \"flags\": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n }\n )\n\n return hosts\n\n async def close(self) -> None:\n pass\n\n\nclass AsyncResolver(AbstractResolver):\n \"\"\"Use the `aiodns` package to make asynchronous DNS lookups\"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n if aiodns is None:\n raise RuntimeError(\"Resolver requires aiodns library\")\n\n self._loop = get_running_loop()\n self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)\n\n async def resolve(\n self, host: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n try:\n resp = await self._resolver.gethostbyname(host, family)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n hosts = []\n for address in resp.addresses:\n hosts.append(\n {\n \"hostname\": host,\n \"host\": address,\n \"port\": port,\n \"family\": family,\n \"proto\": 0,\n \"flags\": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n }\n )\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def close(self) -> None:\n return self._resolver.cancel()\n\n\nDefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver\n", "path": "aiohttp/resolver.py"}], "after_files": [{"content": "import socket\nfrom typing import Any, Dict, List\n\nfrom .abc import AbstractResolver\nfrom .helpers import get_running_loop\n\n__all__ = (\"ThreadedResolver\", \"AsyncResolver\", \"DefaultResolver\")\n\ntry:\n import aiodns\n\n # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')\nexcept ImportError: # pragma: no cover\n aiodns = None\n\naiodns_default = False\n\n\nclass ThreadedResolver(AbstractResolver):\n \"\"\"Use Executor for synchronous getaddrinfo() calls, which defaults to\n concurrent.futures.ThreadPoolExecutor.\n \"\"\"\n\n def __init__(self) -> None:\n self._loop = get_running_loop()\n\n async def resolve(\n self, hostname: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n hostname,\n port,\n type=socket.SOCK_STREAM,\n family=family,\n flags=socket.AI_ADDRCONFIG,\n )\n\n hosts = []\n for family, _, proto, _, address in infos:\n if family == socket.AF_INET6 and address[3]: # type: ignore\n # This is essential for link-local IPv6 addresses.\n # LL IPv6 is a VERY rare case. Strictly speaking, we should use\n # getnameinfo() unconditionally, but performance makes sense.\n host, _port = socket.getnameinfo(\n address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV\n )\n port = int(_port)\n else:\n host, port = address[:2]\n hosts.append(\n {\n \"hostname\": hostname,\n \"host\": host,\n \"port\": port,\n \"family\": family,\n \"proto\": proto,\n \"flags\": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n }\n )\n\n return hosts\n\n async def close(self) -> None:\n pass\n\n\nclass AsyncResolver(AbstractResolver):\n \"\"\"Use the `aiodns` package to make asynchronous DNS lookups\"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n if aiodns is None:\n raise RuntimeError(\"Resolver requires aiodns library\")\n\n self._loop = get_running_loop()\n self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)\n\n async def resolve(\n self, host: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n try:\n resp = await self._resolver.gethostbyname(host, family)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n hosts = []\n for address in resp.addresses:\n hosts.append(\n {\n \"hostname\": host,\n \"host\": address,\n \"port\": port,\n \"family\": family,\n \"proto\": 0,\n \"flags\": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n }\n )\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def close(self) -> None:\n return self._resolver.cancel()\n\n\nDefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver\n", "path": "aiohttp/resolver.py"}]} | 2,444 | 146 |
gh_patches_debug_20712 | rasdani/github-patches | git_diff | chainer__chainer-3925 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MNIST example gets invalid DISPLAY error when run without X11
If MNIST example is run in the environment where matplotlib is installed but X11 is not available (e.g. ssh), the training script fails with `RuntimeError: Invalid DISPLAY variable` error.
```
Exception in main training loop: Invalid DISPLAY variable
Traceback (most recent call last):
File "/data/work/w/repos/chainer/chainer/training/trainer.py", line 302, in run
entry.extension(self)
File "/data/work/w/repos/chainer/chainer/training/extensions/plot_report.py", line 150, in __call__
f = plt.figure()
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/pyplot.py", line 534, in figure
**kwargs)
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backend_bases.py", line 170, in new_figure_manager
return cls.new_figure_manager_given_figure(num, fig)
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backend_bases.py", line 176, in new_figure_manager_given_figure
canvas = cls.FigureCanvas(figure)
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backends/backend_qt5agg.py", line 35, in __init__
super(FigureCanvasQTAggBase, self).__init__(figure=figure)
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backends/backend_qt5.py", line 235, in __init__
_create_qApp()
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backends/backend_qt5.py", line 122, in _create_qApp
raise RuntimeError('Invalid DISPLAY variable')
Will finalize trainer extensions and updater before reraising the exception.
Traceback (most recent call last):
File "train_mnist.py", line 124, in <module>
main()
File "train_mnist.py", line 120, in main
trainer.run()
File "/data/work/w/repos/chainer/chainer/training/trainer.py", line 313, in run
six.reraise(*sys.exc_info())
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/six.py", line 693, in reraise
raise value
File "/data/work/w/repos/chainer/chainer/training/trainer.py", line 302, in run
entry.extension(self)
File "/data/work/w/repos/chainer/chainer/training/extensions/plot_report.py", line 150, in __call__
f = plt.figure()
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/pyplot.py", line 534, in figure
**kwargs)
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backend_bases.py", line 170, in new_figure_manager
return cls.new_figure_manager_given_figure(num, fig)
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backend_bases.py", line 176, in new_figure_manager_given_figure
canvas = cls.FigureCanvas(figure)
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backends/backend_qt5agg.py", line 35, in __init__
super(FigureCanvasQTAggBase, self).__init__(figure=figure)
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backends/backend_qt5.py", line 235, in __init__
_create_qApp()
File "/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backends/backend_qt5.py", line 122, in _create_qApp
raise RuntimeError('Invalid DISPLAY variable')
RuntimeError: Invalid DISPLAY variable
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/mnist/train_mnist.py`
Content:
```
1 #!/usr/bin/env python
2
3 from __future__ import print_function
4
5 import argparse
6
7 import chainer
8 import chainer.functions as F
9 import chainer.links as L
10 from chainer import training
11 from chainer.training import extensions
12
13
14 # Network definition
15 class MLP(chainer.Chain):
16
17 def __init__(self, n_units, n_out):
18 super(MLP, self).__init__()
19 with self.init_scope():
20 # the size of the inputs to each layer will be inferred
21 self.l1 = L.Linear(None, n_units) # n_in -> n_units
22 self.l2 = L.Linear(None, n_units) # n_units -> n_units
23 self.l3 = L.Linear(None, n_out) # n_units -> n_out
24
25 def __call__(self, x):
26 h1 = F.relu(self.l1(x))
27 h2 = F.relu(self.l2(h1))
28 return self.l3(h2)
29
30
31 def main():
32 parser = argparse.ArgumentParser(description='Chainer example: MNIST')
33 parser.add_argument('--batchsize', '-b', type=int, default=100,
34 help='Number of images in each mini-batch')
35 parser.add_argument('--epoch', '-e', type=int, default=20,
36 help='Number of sweeps over the dataset to train')
37 parser.add_argument('--frequency', '-f', type=int, default=-1,
38 help='Frequency of taking a snapshot')
39 parser.add_argument('--gpu', '-g', type=int, default=-1,
40 help='GPU ID (negative value indicates CPU)')
41 parser.add_argument('--out', '-o', default='result',
42 help='Directory to output the result')
43 parser.add_argument('--resume', '-r', default='',
44 help='Resume the training from snapshot')
45 parser.add_argument('--unit', '-u', type=int, default=1000,
46 help='Number of units')
47 args = parser.parse_args()
48
49 print('GPU: {}'.format(args.gpu))
50 print('# unit: {}'.format(args.unit))
51 print('# Minibatch-size: {}'.format(args.batchsize))
52 print('# epoch: {}'.format(args.epoch))
53 print('')
54
55 # Set up a neural network to train
56 # Classifier reports softmax cross entropy loss and accuracy at every
57 # iteration, which will be used by the PrintReport extension below.
58 model = L.Classifier(MLP(args.unit, 10))
59 if args.gpu >= 0:
60 # Make a specified GPU current
61 chainer.cuda.get_device_from_id(args.gpu).use()
62 model.to_gpu() # Copy the model to the GPU
63
64 # Setup an optimizer
65 optimizer = chainer.optimizers.Adam()
66 optimizer.setup(model)
67
68 # Load the MNIST dataset
69 train, test = chainer.datasets.get_mnist()
70
71 train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
72 test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
73 repeat=False, shuffle=False)
74
75 # Set up a trainer
76 updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
77 trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
78
79 # Evaluate the model with the test dataset for each epoch
80 trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
81
82 # Dump a computational graph from 'loss' variable at the first iteration
83 # The "main" refers to the target link of the "main" optimizer.
84 trainer.extend(extensions.dump_graph('main/loss'))
85
86 # Take a snapshot for each specified epoch
87 frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
88 trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))
89
90 # Write a log of evaluation statistics for each epoch
91 trainer.extend(extensions.LogReport())
92
93 # Save two plot images to the result dir
94 if extensions.PlotReport.available():
95 trainer.extend(
96 extensions.PlotReport(['main/loss', 'validation/main/loss'],
97 'epoch', file_name='loss.png'))
98 trainer.extend(
99 extensions.PlotReport(
100 ['main/accuracy', 'validation/main/accuracy'],
101 'epoch', file_name='accuracy.png'))
102
103 # Print selected entries of the log to stdout
104 # Here "main" refers to the target link of the "main" optimizer again, and
105 # "validation" refers to the default name of the Evaluator extension.
106 # Entries other than 'epoch' are reported by the Classifier link, called by
107 # either the updater or the evaluator.
108 trainer.extend(extensions.PrintReport(
109 ['epoch', 'main/loss', 'validation/main/loss',
110 'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
111
112 # Print a progress bar to stdout
113 trainer.extend(extensions.ProgressBar())
114
115 if args.resume:
116 # Resume from a snapshot
117 chainer.serializers.load_npz(args.resume, trainer)
118
119 # Run the training
120 trainer.run()
121
122
123 if __name__ == '__main__':
124 main()
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/mnist/train_mnist.py b/examples/mnist/train_mnist.py
--- a/examples/mnist/train_mnist.py
+++ b/examples/mnist/train_mnist.py
@@ -44,6 +44,8 @@
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
+ parser.add_argument('--noplot', dest='plot', action='store_false',
+ help='Disable PlotReport extension')
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
@@ -91,7 +93,7 @@
trainer.extend(extensions.LogReport())
# Save two plot images to the result dir
- if extensions.PlotReport.available():
+ if args.plot and extensions.PlotReport.available():
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
| {"golden_diff": "diff --git a/examples/mnist/train_mnist.py b/examples/mnist/train_mnist.py\n--- a/examples/mnist/train_mnist.py\n+++ b/examples/mnist/train_mnist.py\n@@ -44,6 +44,8 @@\n help='Resume the training from snapshot')\n parser.add_argument('--unit', '-u', type=int, default=1000,\n help='Number of units')\n+ parser.add_argument('--noplot', dest='plot', action='store_false',\n+ help='Disable PlotReport extension')\n args = parser.parse_args()\n \n print('GPU: {}'.format(args.gpu))\n@@ -91,7 +93,7 @@\n trainer.extend(extensions.LogReport())\n \n # Save two plot images to the result dir\n- if extensions.PlotReport.available():\n+ if args.plot and extensions.PlotReport.available():\n trainer.extend(\n extensions.PlotReport(['main/loss', 'validation/main/loss'],\n 'epoch', file_name='loss.png'))\n", "issue": "MNIST example gets invalid DISPLAY error when run without X11\nIf MNIST example is run in the environment where matplotlib is installed but X11 is not available (e.g. ssh), the training script fails with `RuntimeError: Invalid DISPLAY variable` error.\r\n\r\n```\r\nException in main training loop: Invalid DISPLAY variable\r\nTraceback (most recent call last):\r\n File \"/data/work/w/repos/chainer/chainer/training/trainer.py\", line 302, in run\r\n entry.extension(self)\r\n File \"/data/work/w/repos/chainer/chainer/training/extensions/plot_report.py\", line 150, in __call__\r\n f = plt.figure()\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/pyplot.py\", line 534, in figure\r\n **kwargs)\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backend_bases.py\", line 170, in new_figure_manager\r\n return cls.new_figure_manager_given_figure(num, fig)\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backend_bases.py\", line 176, in new_figure_manager_given_figure\r\n canvas = cls.FigureCanvas(figure)\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backends/backend_qt5agg.py\", line 35, in __init__\r\n super(FigureCanvasQTAggBase, self).__init__(figure=figure)\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backends/backend_qt5.py\", line 235, in __init__\r\n _create_qApp()\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backends/backend_qt5.py\", line 122, in _create_qApp\r\n raise RuntimeError('Invalid DISPLAY variable')\r\nWill finalize trainer extensions and updater before reraising the exception.\r\nTraceback (most recent call last):\r\n File \"train_mnist.py\", line 124, in <module>\r\n main()\r\n File \"train_mnist.py\", line 120, in main\r\n trainer.run()\r\n File \"/data/work/w/repos/chainer/chainer/training/trainer.py\", line 313, in run\r\n six.reraise(*sys.exc_info())\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/six.py\", line 693, in reraise\r\n raise value\r\n File \"/data/work/w/repos/chainer/chainer/training/trainer.py\", line 302, in run\r\n entry.extension(self)\r\n File \"/data/work/w/repos/chainer/chainer/training/extensions/plot_report.py\", line 150, in __call__\r\n f = plt.figure()\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/pyplot.py\", line 534, in figure\r\n **kwargs)\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backend_bases.py\", line 170, in new_figure_manager\r\n return cls.new_figure_manager_given_figure(num, fig)\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backend_bases.py\", line 176, in new_figure_manager_given_figure\r\n canvas = cls.FigureCanvas(figure)\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backends/backend_qt5agg.py\", line 35, in __init__\r\n super(FigureCanvasQTAggBase, self).__init__(figure=figure)\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backends/backend_qt5.py\", line 235, in __init__\r\n _create_qApp()\r\n File \"/home/niboshi/anaconda/anaconda3/lib/python3.6/site-packages/matplotlib/backends/backend_qt5.py\", line 122, in _create_qApp\r\n raise RuntimeError('Invalid DISPLAY variable')\r\nRuntimeError: Invalid DISPLAY variable\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport argparse\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import training\nfrom chainer.training import extensions\n\n\n# Network definition\nclass MLP(chainer.Chain):\n\n def __init__(self, n_units, n_out):\n super(MLP, self).__init__()\n with self.init_scope():\n # the size of the inputs to each layer will be inferred\n self.l1 = L.Linear(None, n_units) # n_in -> n_units\n self.l2 = L.Linear(None, n_units) # n_units -> n_units\n self.l3 = L.Linear(None, n_out) # n_units -> n_out\n\n def __call__(self, x):\n h1 = F.relu(self.l1(x))\n h2 = F.relu(self.l2(h1))\n return self.l3(h2)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Chainer example: MNIST')\n parser.add_argument('--batchsize', '-b', type=int, default=100,\n help='Number of images in each mini-batch')\n parser.add_argument('--epoch', '-e', type=int, default=20,\n help='Number of sweeps over the dataset to train')\n parser.add_argument('--frequency', '-f', type=int, default=-1,\n help='Frequency of taking a snapshot')\n parser.add_argument('--gpu', '-g', type=int, default=-1,\n help='GPU ID (negative value indicates CPU)')\n parser.add_argument('--out', '-o', default='result',\n help='Directory to output the result')\n parser.add_argument('--resume', '-r', default='',\n help='Resume the training from snapshot')\n parser.add_argument('--unit', '-u', type=int, default=1000,\n help='Number of units')\n args = parser.parse_args()\n\n print('GPU: {}'.format(args.gpu))\n print('# unit: {}'.format(args.unit))\n print('# Minibatch-size: {}'.format(args.batchsize))\n print('# epoch: {}'.format(args.epoch))\n print('')\n\n # Set up a neural network to train\n # Classifier reports softmax cross entropy loss and accuracy at every\n # iteration, which will be used by the PrintReport extension below.\n model = L.Classifier(MLP(args.unit, 10))\n if args.gpu >= 0:\n # Make a specified GPU current\n chainer.cuda.get_device_from_id(args.gpu).use()\n model.to_gpu() # Copy the model to the GPU\n\n # Setup an optimizer\n optimizer = chainer.optimizers.Adam()\n optimizer.setup(model)\n\n # Load the MNIST dataset\n train, test = chainer.datasets.get_mnist()\n\n train_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n test_iter = chainer.iterators.SerialIterator(test, args.batchsize,\n repeat=False, shuffle=False)\n\n # Set up a trainer\n updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)\n\n # Evaluate the model with the test dataset for each epoch\n trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))\n\n # Dump a computational graph from 'loss' variable at the first iteration\n # The \"main\" refers to the target link of the \"main\" optimizer.\n trainer.extend(extensions.dump_graph('main/loss'))\n\n # Take a snapshot for each specified epoch\n frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)\n trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))\n\n # Write a log of evaluation statistics for each epoch\n trainer.extend(extensions.LogReport())\n\n # Save two plot images to the result dir\n if extensions.PlotReport.available():\n trainer.extend(\n extensions.PlotReport(['main/loss', 'validation/main/loss'],\n 'epoch', file_name='loss.png'))\n trainer.extend(\n extensions.PlotReport(\n ['main/accuracy', 'validation/main/accuracy'],\n 'epoch', file_name='accuracy.png'))\n\n # Print selected entries of the log to stdout\n # Here \"main\" refers to the target link of the \"main\" optimizer again, and\n # \"validation\" refers to the default name of the Evaluator extension.\n # Entries other than 'epoch' are reported by the Classifier link, called by\n # either the updater or the evaluator.\n trainer.extend(extensions.PrintReport(\n ['epoch', 'main/loss', 'validation/main/loss',\n 'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))\n\n # Print a progress bar to stdout\n trainer.extend(extensions.ProgressBar())\n\n if args.resume:\n # Resume from a snapshot\n chainer.serializers.load_npz(args.resume, trainer)\n\n # Run the training\n trainer.run()\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/mnist/train_mnist.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport argparse\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import training\nfrom chainer.training import extensions\n\n\n# Network definition\nclass MLP(chainer.Chain):\n\n def __init__(self, n_units, n_out):\n super(MLP, self).__init__()\n with self.init_scope():\n # the size of the inputs to each layer will be inferred\n self.l1 = L.Linear(None, n_units) # n_in -> n_units\n self.l2 = L.Linear(None, n_units) # n_units -> n_units\n self.l3 = L.Linear(None, n_out) # n_units -> n_out\n\n def __call__(self, x):\n h1 = F.relu(self.l1(x))\n h2 = F.relu(self.l2(h1))\n return self.l3(h2)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Chainer example: MNIST')\n parser.add_argument('--batchsize', '-b', type=int, default=100,\n help='Number of images in each mini-batch')\n parser.add_argument('--epoch', '-e', type=int, default=20,\n help='Number of sweeps over the dataset to train')\n parser.add_argument('--frequency', '-f', type=int, default=-1,\n help='Frequency of taking a snapshot')\n parser.add_argument('--gpu', '-g', type=int, default=-1,\n help='GPU ID (negative value indicates CPU)')\n parser.add_argument('--out', '-o', default='result',\n help='Directory to output the result')\n parser.add_argument('--resume', '-r', default='',\n help='Resume the training from snapshot')\n parser.add_argument('--unit', '-u', type=int, default=1000,\n help='Number of units')\n parser.add_argument('--noplot', dest='plot', action='store_false',\n help='Disable PlotReport extension')\n args = parser.parse_args()\n\n print('GPU: {}'.format(args.gpu))\n print('# unit: {}'.format(args.unit))\n print('# Minibatch-size: {}'.format(args.batchsize))\n print('# epoch: {}'.format(args.epoch))\n print('')\n\n # Set up a neural network to train\n # Classifier reports softmax cross entropy loss and accuracy at every\n # iteration, which will be used by the PrintReport extension below.\n model = L.Classifier(MLP(args.unit, 10))\n if args.gpu >= 0:\n # Make a specified GPU current\n chainer.cuda.get_device_from_id(args.gpu).use()\n model.to_gpu() # Copy the model to the GPU\n\n # Setup an optimizer\n optimizer = chainer.optimizers.Adam()\n optimizer.setup(model)\n\n # Load the MNIST dataset\n train, test = chainer.datasets.get_mnist()\n\n train_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n test_iter = chainer.iterators.SerialIterator(test, args.batchsize,\n repeat=False, shuffle=False)\n\n # Set up a trainer\n updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)\n\n # Evaluate the model with the test dataset for each epoch\n trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))\n\n # Dump a computational graph from 'loss' variable at the first iteration\n # The \"main\" refers to the target link of the \"main\" optimizer.\n trainer.extend(extensions.dump_graph('main/loss'))\n\n # Take a snapshot for each specified epoch\n frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)\n trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))\n\n # Write a log of evaluation statistics for each epoch\n trainer.extend(extensions.LogReport())\n\n # Save two plot images to the result dir\n if args.plot and extensions.PlotReport.available():\n trainer.extend(\n extensions.PlotReport(['main/loss', 'validation/main/loss'],\n 'epoch', file_name='loss.png'))\n trainer.extend(\n extensions.PlotReport(\n ['main/accuracy', 'validation/main/accuracy'],\n 'epoch', file_name='accuracy.png'))\n\n # Print selected entries of the log to stdout\n # Here \"main\" refers to the target link of the \"main\" optimizer again, and\n # \"validation\" refers to the default name of the Evaluator extension.\n # Entries other than 'epoch' are reported by the Classifier link, called by\n # either the updater or the evaluator.\n trainer.extend(extensions.PrintReport(\n ['epoch', 'main/loss', 'validation/main/loss',\n 'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))\n\n # Print a progress bar to stdout\n trainer.extend(extensions.ProgressBar())\n\n if args.resume:\n # Resume from a snapshot\n chainer.serializers.load_npz(args.resume, trainer)\n\n # Run the training\n trainer.run()\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/mnist/train_mnist.py"}]} | 2,576 | 216 |
gh_patches_debug_13695 | rasdani/github-patches | git_diff | projectmesa__mesa-373 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow fixed seed for replication
Figure out how to best handle running a model with a fixed seed, to ensure that it will produce the same result. and implement that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesa/model.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 The model class for Mesa framework.
4
5 Core Objects: Model
6
7 """
8 import datetime as dt
9 import random
10
11
12 class Model:
13 """ Base class for models. """
14 def __init__(self, seed=None):
15 """ Create a new model. Overload this method with the actual code to
16 start the model.
17
18 Args:
19 seed: seed for the random number generator
20
21 Attributes:
22 schedule: schedule object
23 running: a bool indicating if the model should continue running
24
25 """
26 if seed is None:
27 self.seed = dt.datetime.now()
28 else:
29 self.seed = seed
30 random.seed(seed)
31 self.running = True
32 self.schedule = None
33
34 def run_model(self):
35 """ Run the model until the end condition is reached. Overload as
36 needed.
37
38 """
39 while self.running:
40 self.step()
41
42 def step(self):
43 """ A single step. Fill in here. """
44 pass
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mesa/model.py b/mesa/model.py
--- a/mesa/model.py
+++ b/mesa/model.py
@@ -7,6 +7,7 @@
"""
import datetime as dt
import random
+import numpy
class Model:
@@ -23,11 +24,14 @@
running: a bool indicating if the model should continue running
"""
+ # seed both the numpy and Python random number generators
if seed is None:
self.seed = dt.datetime.now()
else:
self.seed = seed
random.seed(seed)
+ numpy.random.seed(seed)
+
self.running = True
self.schedule = None
| {"golden_diff": "diff --git a/mesa/model.py b/mesa/model.py\n--- a/mesa/model.py\n+++ b/mesa/model.py\n@@ -7,6 +7,7 @@\n \"\"\"\n import datetime as dt\n import random\n+import numpy\n \n \n class Model:\n@@ -23,11 +24,14 @@\n running: a bool indicating if the model should continue running\n \n \"\"\"\n+ # seed both the numpy and Python random number generators\n if seed is None:\n self.seed = dt.datetime.now()\n else:\n self.seed = seed\n random.seed(seed)\n+ numpy.random.seed(seed)\n+\n self.running = True\n self.schedule = None\n", "issue": "Allow fixed seed for replication\nFigure out how to best handle running a model with a fixed seed, to ensure that it will produce the same result. and implement that.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nThe model class for Mesa framework.\n\nCore Objects: Model\n\n\"\"\"\nimport datetime as dt\nimport random\n\n\nclass Model:\n \"\"\" Base class for models. \"\"\"\n def __init__(self, seed=None):\n \"\"\" Create a new model. Overload this method with the actual code to\n start the model.\n\n Args:\n seed: seed for the random number generator\n\n Attributes:\n schedule: schedule object\n running: a bool indicating if the model should continue running\n\n \"\"\"\n if seed is None:\n self.seed = dt.datetime.now()\n else:\n self.seed = seed\n random.seed(seed)\n self.running = True\n self.schedule = None\n\n def run_model(self):\n \"\"\" Run the model until the end condition is reached. Overload as\n needed.\n\n \"\"\"\n while self.running:\n self.step()\n\n def step(self):\n \"\"\" A single step. Fill in here. \"\"\"\n pass\n", "path": "mesa/model.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nThe model class for Mesa framework.\n\nCore Objects: Model\n\n\"\"\"\nimport datetime as dt\nimport random\nimport numpy\n\n\nclass Model:\n \"\"\" Base class for models. \"\"\"\n def __init__(self, seed=None):\n \"\"\" Create a new model. Overload this method with the actual code to\n start the model.\n\n Args:\n seed: seed for the random number generator\n\n Attributes:\n schedule: schedule object\n running: a bool indicating if the model should continue running\n\n \"\"\"\n # seed both the numpy and Python random number generators\n if seed is None:\n self.seed = dt.datetime.now()\n else:\n self.seed = seed\n random.seed(seed)\n numpy.random.seed(seed)\n\n self.running = True\n self.schedule = None\n\n def run_model(self):\n \"\"\" Run the model until the end condition is reached. Overload as\n needed.\n\n \"\"\"\n while self.running:\n self.step()\n\n def step(self):\n \"\"\" A single step. Fill in here. \"\"\"\n pass\n", "path": "mesa/model.py"}]} | 588 | 145 |
gh_patches_debug_39623 | rasdani/github-patches | git_diff | wright-group__WrightTools-886 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ENHC: Trim should have an option to replace with the neighborhood mean with the outlier not in the neighborhood
Current replacement options are `'nan', 'mean', number`. I vote we add `exclusive_mean`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/data/_channel.py`
Content:
```
1 """Channel class and associated."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import numpy as np
8
9 import h5py
10
11 import warnings
12 import numbers
13
14 from .. import kit as wt_kit
15 from .._dataset import Dataset
16 from .. import exceptions as wt_exceptions
17
18 __all__ = ["Channel"]
19
20 # --- class ---------------------------------------------------------------------------------------
21
22
23 class Channel(Dataset):
24 """Channel."""
25
26 class_name = "Channel"
27
28 def __init__(
29 self,
30 parent,
31 id,
32 *,
33 units=None,
34 null=None,
35 signed=None,
36 label=None,
37 label_seed=None,
38 **kwargs
39 ):
40 """Construct a channel object.
41
42 Parameters
43 ----------
44 values : array-like
45 Values.
46 name : string
47 Channel name.
48 units : string (optional)
49 Channel units. Default is None.
50 null : number (optional)
51 Channel null. Default is None (0).
52 signed : booelan (optional)
53 Channel signed flag. Default is None (guess).
54 label : string.
55 Label. Default is None.
56 label_seed : list of strings
57 Label seed. Default is None.
58 **kwargs
59 Additional keyword arguments are added to the attrs dictionary
60 and to the natural namespace of the object (if possible).
61 """
62 self._parent = parent
63 super().__init__(id)
64 self.label = label
65 self.label_seed = label_seed
66 self.units = units
67 self.dimensionality = len(self.shape)
68 # attrs
69 self.attrs.update(kwargs)
70 self.attrs["name"] = h5py.h5i.get_name(self.id).decode().split("/")[-1]
71 self.attrs["class"] = "Channel"
72 if signed is not None:
73 self.attrs["signed"] = signed
74 if null is not None:
75 self.attrs["null"] = null
76 for key, value in self.attrs.items():
77 identifier = wt_kit.string2identifier(key)
78 if not hasattr(self, identifier):
79 setattr(self, identifier, value)
80
81 @property
82 def major_extent(self) -> complex:
83 """Maximum deviation from null."""
84 return max((self.max() - self.null, self.null - self.min()))
85
86 @property
87 def minor_extent(self) -> complex:
88 """Minimum deviation from null."""
89 return min((self.max() - self.null, self.null - self.min()))
90
91 @property
92 def null(self) -> complex:
93 if "null" not in self.attrs.keys():
94 self.attrs["null"] = 0
95 return self.attrs["null"]
96
97 @null.setter
98 def null(self, value):
99 self.attrs["null"] = value
100
101 @property
102 def signed(self) -> bool:
103 if "signed" not in self.attrs.keys():
104 self.attrs["signed"] = False
105 return self.attrs["signed"]
106
107 @signed.setter
108 def signed(self, value):
109 self.attrs["signed"] = value
110
111 def mag(self) -> complex:
112 """Channel magnitude (maximum deviation from null)."""
113 return self.major_extent
114
115 def normalize(self, mag=1.):
116 """Normalize a Channel, set `null` to 0 and the mag to given value.
117
118 Parameters
119 ----------
120 mag : float (optional)
121 New value of mag. Default is 1.
122 """
123
124 def f(dataset, s, null, mag):
125 dataset[s] -= null
126 dataset[s] /= mag
127
128 if self.signed:
129 mag = self.mag() / mag
130 else:
131 mag = self.max() / mag
132 self.chunkwise(f, null=self.null, mag=mag)
133 self._null = 0
134
135 def trim(self, neighborhood, method="ztest", factor=3, replace="nan", verbose=True):
136 """Remove outliers from the dataset.
137
138 Identifies outliers by comparing each point to its
139 neighbors using a statistical test.
140
141 Parameters
142 ----------
143 neighborhood : list of integers
144 Size of the neighborhood in each dimension. Length of the list must
145 be equal to the dimensionality of the channel.
146 method : {'ztest'} (optional)
147 Statistical test used to detect outliers. Default is ztest.
148
149 ztest
150 Compare point deviation from neighborhood mean to neighborhood
151 standard deviation.
152
153 factor : number (optional)
154 Tolerance factor. Default is 3.
155 replace : {'nan', 'mean', number} (optional)
156 Behavior of outlier replacement. Default is nan.
157
158 nan
159 Outliers are replaced by numpy nans.
160
161 mean
162 Outliers are replaced by the mean of its neighborhood.
163
164 number
165 Array becomes given number.
166
167 Returns
168 -------
169 list of tuples
170 Indicies of trimmed outliers.
171
172 See Also
173 --------
174 clip
175 Remove pixels outside of a certain range.
176 """
177 warnings.warn("trim", category=wt_exceptions.EntireDatasetInMemoryWarning)
178 outliers = []
179 means = []
180 # find outliers
181 for idx in np.ndindex(self.shape):
182 slices = []
183 for i, di, size in zip(idx, neighborhood, self.shape):
184 start = max(0, i - di)
185 stop = min(size, i + di + 1)
186 slices.append(slice(start, stop, 1))
187 neighbors = self[slices]
188 mean = np.nanmean(neighbors)
189 limit = np.nanstd(neighbors) * factor
190 if np.abs(self[idx] - mean) > limit:
191 outliers.append(idx)
192 means.append(mean)
193 # replace outliers
194 i = tuple(zip(*outliers))
195 if replace == "nan":
196 arr = self[:]
197 arr[i] = np.nan
198 self[:] = arr
199 elif replace == "mean":
200 arr = self[:]
201 arr[i] = means
202 self[:] = arr
203 elif isinstance(replace, numbers.Number):
204 arr = self[:]
205 arr[i] = replace
206 self[:] = arr
207 else:
208 raise KeyError("replace must be one of {nan, mean} or some number")
209 # finish
210 if verbose:
211 print("%i outliers removed" % len(outliers))
212 return outliers
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/WrightTools/data/_channel.py b/WrightTools/data/_channel.py
--- a/WrightTools/data/_channel.py
+++ b/WrightTools/data/_channel.py
@@ -152,14 +152,17 @@
factor : number (optional)
Tolerance factor. Default is 3.
- replace : {'nan', 'mean', number} (optional)
+ replace : {'nan', 'mean', 'exclusive_mean', number} (optional)
Behavior of outlier replacement. Default is nan.
nan
Outliers are replaced by numpy nans.
mean
- Outliers are replaced by the mean of its neighborhood.
+ Outliers are replaced by the mean of its neighborhood, including itself.
+
+ exclusive_mean
+ Outilers are replaced by the mean of its neighborhood, not including itself.
number
Array becomes given number.
@@ -177,6 +180,7 @@
warnings.warn("trim", category=wt_exceptions.EntireDatasetInMemoryWarning)
outliers = []
means = []
+ ex_means = []
# find outliers
for idx in np.ndindex(self.shape):
slices = []
@@ -186,26 +190,33 @@
slices.append(slice(start, stop, 1))
neighbors = self[slices]
mean = np.nanmean(neighbors)
+ sum_ = np.nansum(neighbors)
limit = np.nanstd(neighbors) * factor
if np.abs(self[idx] - mean) > limit:
outliers.append(idx)
means.append(mean)
+ # Note, "- 1" is to exclude the point itself, which is not nan, in order
+ # to enter this if block, as `np.abs(nan - mean)` is nan, which would
+ # evaluate to False
+ ex_means.append((sum_ - self[idx]) / (np.sum(~np.isnan(neighbors)) - 1))
+
# replace outliers
i = tuple(zip(*outliers))
- if replace == "nan":
- arr = self[:]
- arr[i] = np.nan
- self[:] = arr
- elif replace == "mean":
- arr = self[:]
- arr[i] = means
- self[:] = arr
- elif isinstance(replace, numbers.Number):
- arr = self[:]
- arr[i] = replace
- self[:] = arr
- else:
- raise KeyError("replace must be one of {nan, mean} or some number")
+
+ if len(i) == 0:
+ if verbose:
+ print("No outliers found")
+ return []
+
+ replace = {"nan": np.nan, "mean": means, "exclusive_mean": ex_means}.get(replace, replace)
+
+ # This may someday be available in h5py directly, but seems that day is not yet.
+ # This is annoying because it is the only reason we hold the whole set in memory.
+ # KFS 2019-03-21
+ arr = self[:]
+ arr[i] = replace
+ self[:] = arr
+
# finish
if verbose:
print("%i outliers removed" % len(outliers))
| {"golden_diff": "diff --git a/WrightTools/data/_channel.py b/WrightTools/data/_channel.py\n--- a/WrightTools/data/_channel.py\n+++ b/WrightTools/data/_channel.py\n@@ -152,14 +152,17 @@\n \n factor : number (optional)\n Tolerance factor. Default is 3.\n- replace : {'nan', 'mean', number} (optional)\n+ replace : {'nan', 'mean', 'exclusive_mean', number} (optional)\n Behavior of outlier replacement. Default is nan.\n \n nan\n Outliers are replaced by numpy nans.\n \n mean\n- Outliers are replaced by the mean of its neighborhood.\n+ Outliers are replaced by the mean of its neighborhood, including itself.\n+\n+ exclusive_mean\n+ Outilers are replaced by the mean of its neighborhood, not including itself.\n \n number\n Array becomes given number.\n@@ -177,6 +180,7 @@\n warnings.warn(\"trim\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n outliers = []\n means = []\n+ ex_means = []\n # find outliers\n for idx in np.ndindex(self.shape):\n slices = []\n@@ -186,26 +190,33 @@\n slices.append(slice(start, stop, 1))\n neighbors = self[slices]\n mean = np.nanmean(neighbors)\n+ sum_ = np.nansum(neighbors)\n limit = np.nanstd(neighbors) * factor\n if np.abs(self[idx] - mean) > limit:\n outliers.append(idx)\n means.append(mean)\n+ # Note, \"- 1\" is to exclude the point itself, which is not nan, in order\n+ # to enter this if block, as `np.abs(nan - mean)` is nan, which would\n+ # evaluate to False\n+ ex_means.append((sum_ - self[idx]) / (np.sum(~np.isnan(neighbors)) - 1))\n+\n # replace outliers\n i = tuple(zip(*outliers))\n- if replace == \"nan\":\n- arr = self[:]\n- arr[i] = np.nan\n- self[:] = arr\n- elif replace == \"mean\":\n- arr = self[:]\n- arr[i] = means\n- self[:] = arr\n- elif isinstance(replace, numbers.Number):\n- arr = self[:]\n- arr[i] = replace\n- self[:] = arr\n- else:\n- raise KeyError(\"replace must be one of {nan, mean} or some number\")\n+\n+ if len(i) == 0:\n+ if verbose:\n+ print(\"No outliers found\")\n+ return []\n+\n+ replace = {\"nan\": np.nan, \"mean\": means, \"exclusive_mean\": ex_means}.get(replace, replace)\n+\n+ # This may someday be available in h5py directly, but seems that day is not yet.\n+ # This is annoying because it is the only reason we hold the whole set in memory.\n+ # KFS 2019-03-21\n+ arr = self[:]\n+ arr[i] = replace\n+ self[:] = arr\n+\n # finish\n if verbose:\n print(\"%i outliers removed\" % len(outliers))\n", "issue": "ENHC: Trim should have an option to replace with the neighborhood mean with the outlier not in the neighborhood\nCurrent replacement options are `'nan', 'mean', number`. I vote we add `exclusive_mean`.\n", "before_files": [{"content": "\"\"\"Channel class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nimport h5py\n\nimport warnings\nimport numbers\n\nfrom .. import kit as wt_kit\nfrom .._dataset import Dataset\nfrom .. import exceptions as wt_exceptions\n\n__all__ = [\"Channel\"]\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Channel(Dataset):\n \"\"\"Channel.\"\"\"\n\n class_name = \"Channel\"\n\n def __init__(\n self,\n parent,\n id,\n *,\n units=None,\n null=None,\n signed=None,\n label=None,\n label_seed=None,\n **kwargs\n ):\n \"\"\"Construct a channel object.\n\n Parameters\n ----------\n values : array-like\n Values.\n name : string\n Channel name.\n units : string (optional)\n Channel units. Default is None.\n null : number (optional)\n Channel null. Default is None (0).\n signed : booelan (optional)\n Channel signed flag. Default is None (guess).\n label : string.\n Label. Default is None.\n label_seed : list of strings\n Label seed. Default is None.\n **kwargs\n Additional keyword arguments are added to the attrs dictionary\n and to the natural namespace of the object (if possible).\n \"\"\"\n self._parent = parent\n super().__init__(id)\n self.label = label\n self.label_seed = label_seed\n self.units = units\n self.dimensionality = len(self.shape)\n # attrs\n self.attrs.update(kwargs)\n self.attrs[\"name\"] = h5py.h5i.get_name(self.id).decode().split(\"/\")[-1]\n self.attrs[\"class\"] = \"Channel\"\n if signed is not None:\n self.attrs[\"signed\"] = signed\n if null is not None:\n self.attrs[\"null\"] = null\n for key, value in self.attrs.items():\n identifier = wt_kit.string2identifier(key)\n if not hasattr(self, identifier):\n setattr(self, identifier, value)\n\n @property\n def major_extent(self) -> complex:\n \"\"\"Maximum deviation from null.\"\"\"\n return max((self.max() - self.null, self.null - self.min()))\n\n @property\n def minor_extent(self) -> complex:\n \"\"\"Minimum deviation from null.\"\"\"\n return min((self.max() - self.null, self.null - self.min()))\n\n @property\n def null(self) -> complex:\n if \"null\" not in self.attrs.keys():\n self.attrs[\"null\"] = 0\n return self.attrs[\"null\"]\n\n @null.setter\n def null(self, value):\n self.attrs[\"null\"] = value\n\n @property\n def signed(self) -> bool:\n if \"signed\" not in self.attrs.keys():\n self.attrs[\"signed\"] = False\n return self.attrs[\"signed\"]\n\n @signed.setter\n def signed(self, value):\n self.attrs[\"signed\"] = value\n\n def mag(self) -> complex:\n \"\"\"Channel magnitude (maximum deviation from null).\"\"\"\n return self.major_extent\n\n def normalize(self, mag=1.):\n \"\"\"Normalize a Channel, set `null` to 0 and the mag to given value.\n\n Parameters\n ----------\n mag : float (optional)\n New value of mag. Default is 1.\n \"\"\"\n\n def f(dataset, s, null, mag):\n dataset[s] -= null\n dataset[s] /= mag\n\n if self.signed:\n mag = self.mag() / mag\n else:\n mag = self.max() / mag\n self.chunkwise(f, null=self.null, mag=mag)\n self._null = 0\n\n def trim(self, neighborhood, method=\"ztest\", factor=3, replace=\"nan\", verbose=True):\n \"\"\"Remove outliers from the dataset.\n\n Identifies outliers by comparing each point to its\n neighbors using a statistical test.\n\n Parameters\n ----------\n neighborhood : list of integers\n Size of the neighborhood in each dimension. Length of the list must\n be equal to the dimensionality of the channel.\n method : {'ztest'} (optional)\n Statistical test used to detect outliers. Default is ztest.\n\n ztest\n Compare point deviation from neighborhood mean to neighborhood\n standard deviation.\n\n factor : number (optional)\n Tolerance factor. Default is 3.\n replace : {'nan', 'mean', number} (optional)\n Behavior of outlier replacement. Default is nan.\n\n nan\n Outliers are replaced by numpy nans.\n\n mean\n Outliers are replaced by the mean of its neighborhood.\n\n number\n Array becomes given number.\n\n Returns\n -------\n list of tuples\n Indicies of trimmed outliers.\n\n See Also\n --------\n clip\n Remove pixels outside of a certain range.\n \"\"\"\n warnings.warn(\"trim\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n outliers = []\n means = []\n # find outliers\n for idx in np.ndindex(self.shape):\n slices = []\n for i, di, size in zip(idx, neighborhood, self.shape):\n start = max(0, i - di)\n stop = min(size, i + di + 1)\n slices.append(slice(start, stop, 1))\n neighbors = self[slices]\n mean = np.nanmean(neighbors)\n limit = np.nanstd(neighbors) * factor\n if np.abs(self[idx] - mean) > limit:\n outliers.append(idx)\n means.append(mean)\n # replace outliers\n i = tuple(zip(*outliers))\n if replace == \"nan\":\n arr = self[:]\n arr[i] = np.nan\n self[:] = arr\n elif replace == \"mean\":\n arr = self[:]\n arr[i] = means\n self[:] = arr\n elif isinstance(replace, numbers.Number):\n arr = self[:]\n arr[i] = replace\n self[:] = arr\n else:\n raise KeyError(\"replace must be one of {nan, mean} or some number\")\n # finish\n if verbose:\n print(\"%i outliers removed\" % len(outliers))\n return outliers\n", "path": "WrightTools/data/_channel.py"}], "after_files": [{"content": "\"\"\"Channel class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nimport h5py\n\nimport warnings\nimport numbers\n\nfrom .. import kit as wt_kit\nfrom .._dataset import Dataset\nfrom .. import exceptions as wt_exceptions\n\n__all__ = [\"Channel\"]\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Channel(Dataset):\n \"\"\"Channel.\"\"\"\n\n class_name = \"Channel\"\n\n def __init__(\n self,\n parent,\n id,\n *,\n units=None,\n null=None,\n signed=None,\n label=None,\n label_seed=None,\n **kwargs\n ):\n \"\"\"Construct a channel object.\n\n Parameters\n ----------\n values : array-like\n Values.\n name : string\n Channel name.\n units : string (optional)\n Channel units. Default is None.\n null : number (optional)\n Channel null. Default is None (0).\n signed : booelan (optional)\n Channel signed flag. Default is None (guess).\n label : string.\n Label. Default is None.\n label_seed : list of strings\n Label seed. Default is None.\n **kwargs\n Additional keyword arguments are added to the attrs dictionary\n and to the natural namespace of the object (if possible).\n \"\"\"\n self._parent = parent\n super().__init__(id)\n self.label = label\n self.label_seed = label_seed\n self.units = units\n self.dimensionality = len(self.shape)\n # attrs\n self.attrs.update(kwargs)\n self.attrs[\"name\"] = h5py.h5i.get_name(self.id).decode().split(\"/\")[-1]\n self.attrs[\"class\"] = \"Channel\"\n if signed is not None:\n self.attrs[\"signed\"] = signed\n if null is not None:\n self.attrs[\"null\"] = null\n for key, value in self.attrs.items():\n identifier = wt_kit.string2identifier(key)\n if not hasattr(self, identifier):\n setattr(self, identifier, value)\n\n @property\n def major_extent(self) -> complex:\n \"\"\"Maximum deviation from null.\"\"\"\n return max((self.max() - self.null, self.null - self.min()))\n\n @property\n def minor_extent(self) -> complex:\n \"\"\"Minimum deviation from null.\"\"\"\n return min((self.max() - self.null, self.null - self.min()))\n\n @property\n def null(self) -> complex:\n if \"null\" not in self.attrs.keys():\n self.attrs[\"null\"] = 0\n return self.attrs[\"null\"]\n\n @null.setter\n def null(self, value):\n self.attrs[\"null\"] = value\n\n @property\n def signed(self) -> bool:\n if \"signed\" not in self.attrs.keys():\n self.attrs[\"signed\"] = False\n return self.attrs[\"signed\"]\n\n @signed.setter\n def signed(self, value):\n self.attrs[\"signed\"] = value\n\n def mag(self) -> complex:\n \"\"\"Channel magnitude (maximum deviation from null).\"\"\"\n return self.major_extent\n\n def normalize(self, mag=1.):\n \"\"\"Normalize a Channel, set `null` to 0 and the mag to given value.\n\n Parameters\n ----------\n mag : float (optional)\n New value of mag. Default is 1.\n \"\"\"\n\n def f(dataset, s, null, mag):\n dataset[s] -= null\n dataset[s] /= mag\n\n if self.signed:\n mag = self.mag() / mag\n else:\n mag = self.max() / mag\n self.chunkwise(f, null=self.null, mag=mag)\n self._null = 0\n\n def trim(self, neighborhood, method=\"ztest\", factor=3, replace=\"nan\", verbose=True):\n \"\"\"Remove outliers from the dataset.\n\n Identifies outliers by comparing each point to its\n neighbors using a statistical test.\n\n Parameters\n ----------\n neighborhood : list of integers\n Size of the neighborhood in each dimension. Length of the list must\n be equal to the dimensionality of the channel.\n method : {'ztest'} (optional)\n Statistical test used to detect outliers. Default is ztest.\n\n ztest\n Compare point deviation from neighborhood mean to neighborhood\n standard deviation.\n\n factor : number (optional)\n Tolerance factor. Default is 3.\n replace : {'nan', 'mean', 'exclusive_mean', number} (optional)\n Behavior of outlier replacement. Default is nan.\n\n nan\n Outliers are replaced by numpy nans.\n\n mean\n Outliers are replaced by the mean of its neighborhood, including itself.\n\n exclusive_mean\n Outilers are replaced by the mean of its neighborhood, not including itself.\n\n number\n Array becomes given number.\n\n Returns\n -------\n list of tuples\n Indicies of trimmed outliers.\n\n See Also\n --------\n clip\n Remove pixels outside of a certain range.\n \"\"\"\n warnings.warn(\"trim\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n outliers = []\n means = []\n ex_means = []\n # find outliers\n for idx in np.ndindex(self.shape):\n slices = []\n for i, di, size in zip(idx, neighborhood, self.shape):\n start = max(0, i - di)\n stop = min(size, i + di + 1)\n slices.append(slice(start, stop, 1))\n neighbors = self[slices]\n mean = np.nanmean(neighbors)\n sum_ = np.nansum(neighbors)\n limit = np.nanstd(neighbors) * factor\n if np.abs(self[idx] - mean) > limit:\n outliers.append(idx)\n means.append(mean)\n # Note, \"- 1\" is to exclude the point itself, which is not nan, in order\n # to enter this if block, as `np.abs(nan - mean)` is nan, which would\n # evaluate to False\n ex_means.append((sum_ - self[idx]) / (np.sum(~np.isnan(neighbors)) - 1))\n\n # replace outliers\n i = tuple(zip(*outliers))\n\n if len(i) == 0:\n if verbose:\n print(\"No outliers found\")\n return []\n\n replace = {\"nan\": np.nan, \"mean\": means, \"exclusive_mean\": ex_means}.get(replace, replace)\n\n # This may someday be available in h5py directly, but seems that day is not yet.\n # This is annoying because it is the only reason we hold the whole set in memory.\n # KFS 2019-03-21\n arr = self[:]\n arr[i] = replace\n self[:] = arr\n\n # finish\n if verbose:\n print(\"%i outliers removed\" % len(outliers))\n return outliers\n", "path": "WrightTools/data/_channel.py"}]} | 2,162 | 721 |
gh_patches_debug_14678 | rasdani/github-patches | git_diff | AUTOMATIC1111__stable-diffusion-webui-7430 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: "AssertionError: bad number of vertical texts: 2; must be 3" on hires and prompt matrix
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Performing a render using both **hires fix** and **prompt matrix** results in an error and a failure to render the image grid (and accompanying legend). The individual images still render and can be accessed in the txt2img output folder. The image preview pane remains blank and the error text _AssertionError: bad number of vertical texts: 2; must be 3_* shows below it.
*These numbers might be different. Sometimes _vertical_ will instead say _horizontal_.
### Steps to reproduce the problem
1. In the txt2img tab, enter a base prompt with 2 or more variable prompts separated by the vertical line character. For example: **Painting of a man|Cowboy hat|beard**
2. Toggle **Hires. fix** to on. ✅
3. Under **Script**, select **Prompt matrix**.
4. Click generate.
### What should have happened?
On completion, the generation should have produced a grid showing the array of possible images with the variable prompts toggled on or off. Instead, no grid is produced and an error appears instead. The images still render (and can be viewed from the output folder), but the image grid does not. Since the image grid is the most important part (allowing you to compare images very easily), this is a problem.
Note that using prompt matrix or hires. fix individually still work just fine.
### Commit where the problem happens
3a0d6b77295162146d0a8d04278804334da6f1b4
### What platforms do you use to access UI ?
Windows
### What browsers do you use to access the UI ?
Google Chrome
### Command Line Arguments
_No response_
### Additional information, context and logs
The following text is an example of what shows in my command window when the error occurs:
```
Error completing request8:54, 1.83s/it]
Arguments: ('task(xe7gus9bhl4yqzh)', 'painting of a beautiful woman|cowboy hat|blindfold', '', 'None', 'None', 20, 1, False, False, 1, 1, 10, -1.0, -1.0, 0, 0, 0, False, 512, 512, True, 0.7, 2, 'Latent', 0, 0, 0, 1, False, False, False, False, '', 1, '', 0, '', True, False, False) {}
Traceback (most recent call last):
File "C:\StableDiffusion\stable-diffusion-webui\modules\call_queue.py", line 56, in f
res = list(func(*args, **kwargs))
File "C:\StableDiffusion\stable-diffusion-webui\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "C:\StableDiffusion\stable-diffusion-webui\modules\txt2img.py", line 49, in txt2img
processed = modules.scripts.scripts_txt2img.run(p, *args)
File "C:\StableDiffusion\stable-diffusion-webui\modules\scripts.py", line 337, in run
processed = script.run(p, *script_args)
File "C:\StableDiffusion\stable-diffusion-webui\scripts\prompt_matrix.py", line 82, in run
grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts)
File "C:\StableDiffusion\stable-diffusion-webui\modules\images.py", line 230, in draw_prompt_matrix
return draw_grid_annotations(im, width, height, hor_texts, ver_texts)
File "C:\StableDiffusion\stable-diffusion-webui\modules\images.py", line 175, in draw_grid_annotations
assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
AssertionError: bad number of horizontal texts: 2; must be 4
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/prompt_matrix.py`
Content:
```
1 import math
2 from collections import namedtuple
3 from copy import copy
4 import random
5
6 import modules.scripts as scripts
7 import gradio as gr
8
9 from modules import images
10 from modules.processing import process_images, Processed
11 from modules.shared import opts, cmd_opts, state
12 import modules.sd_samplers
13
14
15 def draw_xy_grid(xs, ys, x_label, y_label, cell):
16 res = []
17
18 ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys]
19 hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs]
20
21 first_processed = None
22
23 state.job_count = len(xs) * len(ys)
24
25 for iy, y in enumerate(ys):
26 for ix, x in enumerate(xs):
27 state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
28
29 processed = cell(x, y)
30 if first_processed is None:
31 first_processed = processed
32
33 res.append(processed.images[0])
34
35 grid = images.image_grid(res, rows=len(ys))
36 grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
37
38 first_processed.images = [grid]
39
40 return first_processed
41
42
43 class Script(scripts.Script):
44 def title(self):
45 return "Prompt matrix"
46
47 def ui(self, is_img2img):
48 gr.HTML('<br />')
49 with gr.Row():
50 with gr.Column():
51 put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id("put_at_start"))
52 different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id("different_seeds"))
53 with gr.Column():
54 prompt_type = gr.Radio(["positive", "negative"], label="Select prompt", elem_id=self.elem_id("prompt_type"), value="positive")
55 variations_delimiter = gr.Radio(["comma", "space"], label="Select joining char", elem_id=self.elem_id("variations_delimiter"), value="comma")
56 with gr.Column():
57 margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size"))
58
59 return [put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size]
60
61 def run(self, p, put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size):
62 modules.processing.fix_seed(p)
63 # Raise error if promp type is not positive or negative
64 if prompt_type not in ["positive", "negative"]:
65 raise ValueError(f"Unknown prompt type {prompt_type}")
66 # Raise error if variations delimiter is not comma or space
67 if variations_delimiter not in ["comma", "space"]:
68 raise ValueError(f"Unknown variations delimiter {variations_delimiter}")
69
70 prompt = p.prompt if prompt_type == "positive" else p.negative_prompt
71 original_prompt = prompt[0] if type(prompt) == list else prompt
72 positive_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt
73
74 delimiter = ", " if variations_delimiter == "comma" else " "
75
76 all_prompts = []
77 prompt_matrix_parts = original_prompt.split("|")
78 combination_count = 2 ** (len(prompt_matrix_parts) - 1)
79 for combination_num in range(combination_count):
80 selected_prompts = [text.strip().strip(',') for n, text in enumerate(prompt_matrix_parts[1:]) if combination_num & (1 << n)]
81
82 if put_at_start:
83 selected_prompts = selected_prompts + [prompt_matrix_parts[0]]
84 else:
85 selected_prompts = [prompt_matrix_parts[0]] + selected_prompts
86
87 all_prompts.append(delimiter.join(selected_prompts))
88
89 p.n_iter = math.ceil(len(all_prompts) / p.batch_size)
90 p.do_not_save_grid = True
91
92 print(f"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.")
93
94 if prompt_type == "positive":
95 p.prompt = all_prompts
96 else:
97 p.negative_prompt = all_prompts
98 p.seed = [p.seed + (i if different_seeds else 0) for i in range(len(all_prompts))]
99 p.prompt_for_display = positive_prompt
100 processed = process_images(p)
101
102 grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
103 grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts, margin_size)
104 processed.images.insert(0, grid)
105 processed.index_of_first_image = 1
106 processed.infotexts.insert(0, processed.infotexts[0])
107
108 if opts.grid_save:
109 images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", extension=opts.grid_format, prompt=original_prompt, seed=processed.seed, grid=True, p=p)
110
111 return processed
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py
--- a/scripts/prompt_matrix.py
+++ b/scripts/prompt_matrix.py
@@ -99,8 +99,8 @@
p.prompt_for_display = positive_prompt
processed = process_images(p)
- grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
- grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts, margin_size)
+ grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
+ grid = images.draw_prompt_matrix(grid, processed.images[0].width, processed.images[1].height, prompt_matrix_parts, margin_size)
processed.images.insert(0, grid)
processed.index_of_first_image = 1
processed.infotexts.insert(0, processed.infotexts[0])
| {"golden_diff": "diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py\n--- a/scripts/prompt_matrix.py\n+++ b/scripts/prompt_matrix.py\n@@ -99,8 +99,8 @@\n p.prompt_for_display = positive_prompt\r\n processed = process_images(p)\r\n \r\n- grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))\r\n- grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts, margin_size)\r\n+ grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) \r\n+ grid = images.draw_prompt_matrix(grid, processed.images[0].width, processed.images[1].height, prompt_matrix_parts, margin_size)\r\n processed.images.insert(0, grid)\r\n processed.index_of_first_image = 1\r\n processed.infotexts.insert(0, processed.infotexts[0])\n", "issue": "[Bug]: \"AssertionError: bad number of vertical texts: 2; must be 3\" on hires and prompt matrix\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues and checked the recent builds/commits\n\n### What happened?\n\nPerforming a render using both **hires fix** and **prompt matrix** results in an error and a failure to render the image grid (and accompanying legend). The individual images still render and can be accessed in the txt2img output folder. The image preview pane remains blank and the error text _AssertionError: bad number of vertical texts: 2; must be 3_* shows below it.\r\n\r\n*These numbers might be different. Sometimes _vertical_ will instead say _horizontal_.\n\n### Steps to reproduce the problem\n\n1. In the txt2img tab, enter a base prompt with 2 or more variable prompts separated by the vertical line character. For example: **Painting of a man|Cowboy hat|beard**\r\n2. Toggle **Hires. fix** to on. \u2705\r\n3. Under **Script**, select **Prompt matrix**.\r\n4. Click generate.\n\n### What should have happened?\n\nOn completion, the generation should have produced a grid showing the array of possible images with the variable prompts toggled on or off. Instead, no grid is produced and an error appears instead. The images still render (and can be viewed from the output folder), but the image grid does not. Since the image grid is the most important part (allowing you to compare images very easily), this is a problem.\r\n\r\nNote that using prompt matrix or hires. fix individually still work just fine.\n\n### Commit where the problem happens\n\n3a0d6b77295162146d0a8d04278804334da6f1b4\n\n### What platforms do you use to access UI ?\n\nWindows\n\n### What browsers do you use to access the UI ?\n\nGoogle Chrome\n\n### Command Line Arguments\n\n_No response_\n\n### Additional information, context and logs\n\nThe following text is an example of what shows in my command window when the error occurs:\r\n\r\n```\r\nError completing request8:54, 1.83s/it]\r\nArguments: ('task(xe7gus9bhl4yqzh)', 'painting of a beautiful woman|cowboy hat|blindfold', '', 'None', 'None', 20, 1, False, False, 1, 1, 10, -1.0, -1.0, 0, 0, 0, False, 512, 512, True, 0.7, 2, 'Latent', 0, 0, 0, 1, False, False, False, False, '', 1, '', 0, '', True, False, False) {}\r\nTraceback (most recent call last):\r\n File \"C:\\StableDiffusion\\stable-diffusion-webui\\modules\\call_queue.py\", line 56, in f\r\n res = list(func(*args, **kwargs))\r\n File \"C:\\StableDiffusion\\stable-diffusion-webui\\modules\\call_queue.py\", line 37, in f\r\n res = func(*args, **kwargs)\r\n File \"C:\\StableDiffusion\\stable-diffusion-webui\\modules\\txt2img.py\", line 49, in txt2img\r\n processed = modules.scripts.scripts_txt2img.run(p, *args)\r\n File \"C:\\StableDiffusion\\stable-diffusion-webui\\modules\\scripts.py\", line 337, in run\r\n processed = script.run(p, *script_args)\r\n File \"C:\\StableDiffusion\\stable-diffusion-webui\\scripts\\prompt_matrix.py\", line 82, in run\r\n grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts)\r\n File \"C:\\StableDiffusion\\stable-diffusion-webui\\modules\\images.py\", line 230, in draw_prompt_matrix\r\n return draw_grid_annotations(im, width, height, hor_texts, ver_texts)\r\n File \"C:\\StableDiffusion\\stable-diffusion-webui\\modules\\images.py\", line 175, in draw_grid_annotations\r\n assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'\r\nAssertionError: bad number of horizontal texts: 2; must be 4\r\n```\n", "before_files": [{"content": "import math\r\nfrom collections import namedtuple\r\nfrom copy import copy\r\nimport random\r\n\r\nimport modules.scripts as scripts\r\nimport gradio as gr\r\n\r\nfrom modules import images\r\nfrom modules.processing import process_images, Processed\r\nfrom modules.shared import opts, cmd_opts, state\r\nimport modules.sd_samplers\r\n\r\n\r\ndef draw_xy_grid(xs, ys, x_label, y_label, cell):\r\n res = []\r\n\r\n ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys]\r\n hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs]\r\n\r\n first_processed = None\r\n\r\n state.job_count = len(xs) * len(ys)\r\n\r\n for iy, y in enumerate(ys):\r\n for ix, x in enumerate(xs):\r\n state.job = f\"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}\"\r\n\r\n processed = cell(x, y)\r\n if first_processed is None:\r\n first_processed = processed\r\n\r\n res.append(processed.images[0])\r\n\r\n grid = images.image_grid(res, rows=len(ys))\r\n grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)\r\n\r\n first_processed.images = [grid]\r\n\r\n return first_processed\r\n\r\n\r\nclass Script(scripts.Script):\r\n def title(self):\r\n return \"Prompt matrix\"\r\n\r\n def ui(self, is_img2img):\r\n gr.HTML('<br />')\r\n with gr.Row():\r\n with gr.Column():\r\n put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id(\"put_at_start\"))\r\n different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id(\"different_seeds\"))\r\n with gr.Column():\r\n prompt_type = gr.Radio([\"positive\", \"negative\"], label=\"Select prompt\", elem_id=self.elem_id(\"prompt_type\"), value=\"positive\")\r\n variations_delimiter = gr.Radio([\"comma\", \"space\"], label=\"Select joining char\", elem_id=self.elem_id(\"variations_delimiter\"), value=\"comma\")\r\n with gr.Column():\r\n margin_size = gr.Slider(label=\"Grid margins (px)\", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id(\"margin_size\"))\r\n\r\n return [put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size]\r\n\r\n def run(self, p, put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size):\r\n modules.processing.fix_seed(p)\r\n # Raise error if promp type is not positive or negative\r\n if prompt_type not in [\"positive\", \"negative\"]:\r\n raise ValueError(f\"Unknown prompt type {prompt_type}\")\r\n # Raise error if variations delimiter is not comma or space\r\n if variations_delimiter not in [\"comma\", \"space\"]:\r\n raise ValueError(f\"Unknown variations delimiter {variations_delimiter}\")\r\n\r\n prompt = p.prompt if prompt_type == \"positive\" else p.negative_prompt\r\n original_prompt = prompt[0] if type(prompt) == list else prompt\r\n positive_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt\r\n\r\n delimiter = \", \" if variations_delimiter == \"comma\" else \" \"\r\n\r\n all_prompts = []\r\n prompt_matrix_parts = original_prompt.split(\"|\")\r\n combination_count = 2 ** (len(prompt_matrix_parts) - 1)\r\n for combination_num in range(combination_count):\r\n selected_prompts = [text.strip().strip(',') for n, text in enumerate(prompt_matrix_parts[1:]) if combination_num & (1 << n)]\r\n\r\n if put_at_start:\r\n selected_prompts = selected_prompts + [prompt_matrix_parts[0]]\r\n else:\r\n selected_prompts = [prompt_matrix_parts[0]] + selected_prompts\r\n\r\n all_prompts.append(delimiter.join(selected_prompts))\r\n\r\n p.n_iter = math.ceil(len(all_prompts) / p.batch_size)\r\n p.do_not_save_grid = True\r\n\r\n print(f\"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.\")\r\n\r\n if prompt_type == \"positive\":\r\n p.prompt = all_prompts\r\n else:\r\n p.negative_prompt = all_prompts\r\n p.seed = [p.seed + (i if different_seeds else 0) for i in range(len(all_prompts))]\r\n p.prompt_for_display = positive_prompt\r\n processed = process_images(p)\r\n\r\n grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))\r\n grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts, margin_size)\r\n processed.images.insert(0, grid)\r\n processed.index_of_first_image = 1\r\n processed.infotexts.insert(0, processed.infotexts[0])\r\n\r\n if opts.grid_save:\r\n images.save_image(processed.images[0], p.outpath_grids, \"prompt_matrix\", extension=opts.grid_format, prompt=original_prompt, seed=processed.seed, grid=True, p=p)\r\n\r\n return processed\r\n", "path": "scripts/prompt_matrix.py"}], "after_files": [{"content": "import math\r\nfrom collections import namedtuple\r\nfrom copy import copy\r\nimport random\r\n\r\nimport modules.scripts as scripts\r\nimport gradio as gr\r\n\r\nfrom modules import images\r\nfrom modules.processing import process_images, Processed\r\nfrom modules.shared import opts, cmd_opts, state\r\nimport modules.sd_samplers\r\n\r\n\r\ndef draw_xy_grid(xs, ys, x_label, y_label, cell):\r\n res = []\r\n\r\n ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys]\r\n hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs]\r\n\r\n first_processed = None\r\n\r\n state.job_count = len(xs) * len(ys)\r\n\r\n for iy, y in enumerate(ys):\r\n for ix, x in enumerate(xs):\r\n state.job = f\"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}\"\r\n\r\n processed = cell(x, y)\r\n if first_processed is None:\r\n first_processed = processed\r\n\r\n res.append(processed.images[0])\r\n\r\n grid = images.image_grid(res, rows=len(ys))\r\n grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)\r\n\r\n first_processed.images = [grid]\r\n\r\n return first_processed\r\n\r\n\r\nclass Script(scripts.Script):\r\n def title(self):\r\n return \"Prompt matrix\"\r\n\r\n def ui(self, is_img2img):\r\n gr.HTML('<br />')\r\n with gr.Row():\r\n with gr.Column():\r\n put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id(\"put_at_start\"))\r\n different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id(\"different_seeds\"))\r\n with gr.Column():\r\n prompt_type = gr.Radio([\"positive\", \"negative\"], label=\"Select prompt\", elem_id=self.elem_id(\"prompt_type\"), value=\"positive\")\r\n variations_delimiter = gr.Radio([\"comma\", \"space\"], label=\"Select joining char\", elem_id=self.elem_id(\"variations_delimiter\"), value=\"comma\")\r\n with gr.Column():\r\n margin_size = gr.Slider(label=\"Grid margins (px)\", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id(\"margin_size\"))\r\n\r\n return [put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size]\r\n\r\n def run(self, p, put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size):\r\n modules.processing.fix_seed(p)\r\n # Raise error if promp type is not positive or negative\r\n if prompt_type not in [\"positive\", \"negative\"]:\r\n raise ValueError(f\"Unknown prompt type {prompt_type}\")\r\n # Raise error if variations delimiter is not comma or space\r\n if variations_delimiter not in [\"comma\", \"space\"]:\r\n raise ValueError(f\"Unknown variations delimiter {variations_delimiter}\")\r\n\r\n prompt = p.prompt if prompt_type == \"positive\" else p.negative_prompt\r\n original_prompt = prompt[0] if type(prompt) == list else prompt\r\n positive_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt\r\n\r\n delimiter = \", \" if variations_delimiter == \"comma\" else \" \"\r\n\r\n all_prompts = []\r\n prompt_matrix_parts = original_prompt.split(\"|\")\r\n combination_count = 2 ** (len(prompt_matrix_parts) - 1)\r\n for combination_num in range(combination_count):\r\n selected_prompts = [text.strip().strip(',') for n, text in enumerate(prompt_matrix_parts[1:]) if combination_num & (1 << n)]\r\n\r\n if put_at_start:\r\n selected_prompts = selected_prompts + [prompt_matrix_parts[0]]\r\n else:\r\n selected_prompts = [prompt_matrix_parts[0]] + selected_prompts\r\n\r\n all_prompts.append(delimiter.join(selected_prompts))\r\n\r\n p.n_iter = math.ceil(len(all_prompts) / p.batch_size)\r\n p.do_not_save_grid = True\r\n\r\n print(f\"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.\")\r\n\r\n if prompt_type == \"positive\":\r\n p.prompt = all_prompts\r\n else:\r\n p.negative_prompt = all_prompts\r\n p.seed = [p.seed + (i if different_seeds else 0) for i in range(len(all_prompts))]\r\n p.prompt_for_display = positive_prompt\r\n processed = process_images(p)\r\n\r\n grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) \r\n grid = images.draw_prompt_matrix(grid, processed.images[0].width, processed.images[1].height, prompt_matrix_parts, margin_size)\r\n processed.images.insert(0, grid)\r\n processed.index_of_first_image = 1\r\n processed.infotexts.insert(0, processed.infotexts[0])\r\n\r\n if opts.grid_save:\r\n images.save_image(processed.images[0], p.outpath_grids, \"prompt_matrix\", extension=opts.grid_format, prompt=original_prompt, seed=processed.seed, grid=True, p=p)\r\n\r\n return processed\r\n", "path": "scripts/prompt_matrix.py"}]} | 2,581 | 219 |
gh_patches_debug_29583 | rasdani/github-patches | git_diff | Parsl__parsl-2301 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove IPyParallel executor
**Is your feature request related to a problem? Please describe.**
The parsl ipp code isn't really maintained any more. As I try to tidy up some stuff in the test suite I find myself spending more time than I'd like to debugging what is happening inside the IPP parsl executor, while at the same time the folklore is "don't use IPP".
**Describe the solution you'd like**
I'd like to remove all IPP tests from the test suite now. This will inevitably lead to the ipp code rotting fast, and so in addition the IPP executor should probably be removed in its entirely.
**Describe alternatives you've considered**
An alternative is to put developer effort into maintaining IPP as a supported executor.
**Additional context**
The folklore on slack seems to be "don't use IPP". This feature request turns that folklore into reality.
I invite comment in support or against from @kylechard @yadudoc @annawoodard especially as regards how this would affect existing users who have not moved to htex (if any?).
see #1328
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/executors/ipp_controller.py`
Content:
```
1 class Controller():
2 """This stub exists to issue a more helpful warning about the IPyParallel
3 executor being removed from parsl some time after v0.9.
4
5 It can eventually be removed entirely - perhaps after v0.10
6 """
7 def __init__(self, *args, **kwargs):
8 raise RuntimeError("The IPyParallel executor has been removed from parsl")
9
```
Path: `parsl/executors/__init__.py`
Content:
```
1 from parsl.executors.threads import ThreadPoolExecutor
2 from parsl.executors.ipp import IPyParallelExecutor
3 from parsl.executors.workqueue.executor import WorkQueueExecutor
4 from parsl.executors.high_throughput.executor import HighThroughputExecutor
5 from parsl.executors.extreme_scale.executor import ExtremeScaleExecutor
6 from parsl.executors.low_latency.executor import LowLatencyExecutor
7 from parsl.executors.flux.executor import FluxExecutor
8
9 __all__ = ['IPyParallelExecutor',
10 'ThreadPoolExecutor',
11 'HighThroughputExecutor',
12 'ExtremeScaleExecutor',
13 'LowLatencyExecutor',
14 'WorkQueueExecutor',
15 'FluxExecutor']
16
```
Path: `parsl/executors/ipp.py`
Content:
```
1 from parsl.executors.base import ParslExecutor
2
3
4 class IPyParallelExecutor(ParslExecutor):
5 """This stub exists to issue a more helpful warning about the IPyParallel
6 executor being removed from parsl some time after v0.9.
7
8 It can eventually be removed entirely - perhaps after v0.10
9 """
10
11 def __new__(*args, **kwargs):
12 raise RuntimeError("The IPyParallel executor has been removed from parsl")
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/executors/__init__.py b/parsl/executors/__init__.py
--- a/parsl/executors/__init__.py
+++ b/parsl/executors/__init__.py
@@ -1,13 +1,11 @@
from parsl.executors.threads import ThreadPoolExecutor
-from parsl.executors.ipp import IPyParallelExecutor
from parsl.executors.workqueue.executor import WorkQueueExecutor
from parsl.executors.high_throughput.executor import HighThroughputExecutor
from parsl.executors.extreme_scale.executor import ExtremeScaleExecutor
from parsl.executors.low_latency.executor import LowLatencyExecutor
from parsl.executors.flux.executor import FluxExecutor
-__all__ = ['IPyParallelExecutor',
- 'ThreadPoolExecutor',
+__all__ = ['ThreadPoolExecutor',
'HighThroughputExecutor',
'ExtremeScaleExecutor',
'LowLatencyExecutor',
diff --git a/parsl/executors/ipp.py b/parsl/executors/ipp.py
deleted file mode 100644
--- a/parsl/executors/ipp.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from parsl.executors.base import ParslExecutor
-
-
-class IPyParallelExecutor(ParslExecutor):
- """This stub exists to issue a more helpful warning about the IPyParallel
- executor being removed from parsl some time after v0.9.
-
- It can eventually be removed entirely - perhaps after v0.10
- """
-
- def __new__(*args, **kwargs):
- raise RuntimeError("The IPyParallel executor has been removed from parsl")
diff --git a/parsl/executors/ipp_controller.py b/parsl/executors/ipp_controller.py
deleted file mode 100644
--- a/parsl/executors/ipp_controller.py
+++ /dev/null
@@ -1,8 +0,0 @@
-class Controller():
- """This stub exists to issue a more helpful warning about the IPyParallel
- executor being removed from parsl some time after v0.9.
-
- It can eventually be removed entirely - perhaps after v0.10
- """
- def __init__(self, *args, **kwargs):
- raise RuntimeError("The IPyParallel executor has been removed from parsl")
| {"golden_diff": "diff --git a/parsl/executors/__init__.py b/parsl/executors/__init__.py\n--- a/parsl/executors/__init__.py\n+++ b/parsl/executors/__init__.py\n@@ -1,13 +1,11 @@\n from parsl.executors.threads import ThreadPoolExecutor\n-from parsl.executors.ipp import IPyParallelExecutor\n from parsl.executors.workqueue.executor import WorkQueueExecutor\n from parsl.executors.high_throughput.executor import HighThroughputExecutor\n from parsl.executors.extreme_scale.executor import ExtremeScaleExecutor\n from parsl.executors.low_latency.executor import LowLatencyExecutor\n from parsl.executors.flux.executor import FluxExecutor\n \n-__all__ = ['IPyParallelExecutor',\n- 'ThreadPoolExecutor',\n+__all__ = ['ThreadPoolExecutor',\n 'HighThroughputExecutor',\n 'ExtremeScaleExecutor',\n 'LowLatencyExecutor',\ndiff --git a/parsl/executors/ipp.py b/parsl/executors/ipp.py\ndeleted file mode 100644\n--- a/parsl/executors/ipp.py\n+++ /dev/null\n@@ -1,12 +0,0 @@\n-from parsl.executors.base import ParslExecutor\n-\n-\n-class IPyParallelExecutor(ParslExecutor):\n- \"\"\"This stub exists to issue a more helpful warning about the IPyParallel\n- executor being removed from parsl some time after v0.9.\n-\n- It can eventually be removed entirely - perhaps after v0.10\n- \"\"\"\n-\n- def __new__(*args, **kwargs):\n- raise RuntimeError(\"The IPyParallel executor has been removed from parsl\")\ndiff --git a/parsl/executors/ipp_controller.py b/parsl/executors/ipp_controller.py\ndeleted file mode 100644\n--- a/parsl/executors/ipp_controller.py\n+++ /dev/null\n@@ -1,8 +0,0 @@\n-class Controller():\n- \"\"\"This stub exists to issue a more helpful warning about the IPyParallel\n- executor being removed from parsl some time after v0.9.\n-\n- It can eventually be removed entirely - perhaps after v0.10\n- \"\"\"\n- def __init__(self, *args, **kwargs):\n- raise RuntimeError(\"The IPyParallel executor has been removed from parsl\")\n", "issue": "Remove IPyParallel executor\n**Is your feature request related to a problem? Please describe.**\r\nThe parsl ipp code isn't really maintained any more. As I try to tidy up some stuff in the test suite I find myself spending more time than I'd like to debugging what is happening inside the IPP parsl executor, while at the same time the folklore is \"don't use IPP\".\r\n\r\n**Describe the solution you'd like**\r\nI'd like to remove all IPP tests from the test suite now. This will inevitably lead to the ipp code rotting fast, and so in addition the IPP executor should probably be removed in its entirely.\r\n\r\n**Describe alternatives you've considered**\r\nAn alternative is to put developer effort into maintaining IPP as a supported executor.\r\n\r\n**Additional context**\r\nThe folklore on slack seems to be \"don't use IPP\". This feature request turns that folklore into reality.\r\n\r\nI invite comment in support or against from @kylechard @yadudoc @annawoodard especially as regards how this would affect existing users who have not moved to htex (if any?).\r\n\r\nsee #1328 \n", "before_files": [{"content": "class Controller():\n \"\"\"This stub exists to issue a more helpful warning about the IPyParallel\n executor being removed from parsl some time after v0.9.\n\n It can eventually be removed entirely - perhaps after v0.10\n \"\"\"\n def __init__(self, *args, **kwargs):\n raise RuntimeError(\"The IPyParallel executor has been removed from parsl\")\n", "path": "parsl/executors/ipp_controller.py"}, {"content": "from parsl.executors.threads import ThreadPoolExecutor\nfrom parsl.executors.ipp import IPyParallelExecutor\nfrom parsl.executors.workqueue.executor import WorkQueueExecutor\nfrom parsl.executors.high_throughput.executor import HighThroughputExecutor\nfrom parsl.executors.extreme_scale.executor import ExtremeScaleExecutor\nfrom parsl.executors.low_latency.executor import LowLatencyExecutor\nfrom parsl.executors.flux.executor import FluxExecutor\n\n__all__ = ['IPyParallelExecutor',\n 'ThreadPoolExecutor',\n 'HighThroughputExecutor',\n 'ExtremeScaleExecutor',\n 'LowLatencyExecutor',\n 'WorkQueueExecutor',\n 'FluxExecutor']\n", "path": "parsl/executors/__init__.py"}, {"content": "from parsl.executors.base import ParslExecutor\n\n\nclass IPyParallelExecutor(ParslExecutor):\n \"\"\"This stub exists to issue a more helpful warning about the IPyParallel\n executor being removed from parsl some time after v0.9.\n\n It can eventually be removed entirely - perhaps after v0.10\n \"\"\"\n\n def __new__(*args, **kwargs):\n raise RuntimeError(\"The IPyParallel executor has been removed from parsl\")\n", "path": "parsl/executors/ipp.py"}], "after_files": [{"content": null, "path": "parsl/executors/ipp_controller.py"}, {"content": "from parsl.executors.threads import ThreadPoolExecutor\nfrom parsl.executors.workqueue.executor import WorkQueueExecutor\nfrom parsl.executors.high_throughput.executor import HighThroughputExecutor\nfrom parsl.executors.extreme_scale.executor import ExtremeScaleExecutor\nfrom parsl.executors.low_latency.executor import LowLatencyExecutor\nfrom parsl.executors.flux.executor import FluxExecutor\n\n__all__ = ['ThreadPoolExecutor',\n 'HighThroughputExecutor',\n 'ExtremeScaleExecutor',\n 'LowLatencyExecutor',\n 'WorkQueueExecutor',\n 'FluxExecutor']\n", "path": "parsl/executors/__init__.py"}, {"content": null, "path": "parsl/executors/ipp.py"}]} | 904 | 516 |
gh_patches_debug_29758 | rasdani/github-patches | git_diff | mabel-dev__opteryx-1644 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🪲 mabel partitioning bug
### Thank you for taking the time to report a problem with Opteryx.
_To help us to respond to your request we ask that you try to provide the below detail about the bug._
**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._
**Expected behaviour** _A clear and concise description of what you expected to happen._
**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._
~~~sql
~~~
**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opteryx/__version__.py`
Content:
```
1 __build__ = 486
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Store the version here so:
17 1) we don't load dependencies by storing it in __init__.py
18 2) we can import it in setup.py for the same reason
19 """
20 from enum import Enum # isort: skip
21
22
23 class VersionStatus(Enum):
24 ALPHA = "alpha"
25 BETA = "beta"
26 RELEASE = "release"
27
28
29 _major = 0
30 _minor = 15
31 _revision = 0
32 _status = VersionStatus.BETA
33
34 __author__ = "@joocer"
35 __version__ = f"{_major}.{_minor}.{_revision}" + (
36 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else ""
37 )
38
```
Path: `opteryx/managers/schemes/mabel_partitions.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import concurrent.futures
14 import datetime
15 import os
16 from typing import Callable
17 from typing import List
18 from typing import Optional
19
20 from opteryx.exceptions import DataError
21 from opteryx.managers.schemes import BasePartitionScheme
22 from opteryx.utils.file_decoders import DATA_EXTENSIONS
23
24 OS_SEP = os.sep
25
26
27 class UnsupportedSegementationError(DataError):
28 """Exception raised for unsupported segmentations."""
29
30 def __init__(self, dataset: str, segments: set = None):
31 self.dataset = dataset
32 self.segments = segments
33 message = f"'{dataset}' contains unsupported segmentation (`{'`, `'.join(segments)}`), only 'by_hour' segments are supported."
34 super().__init__(message)
35
36
37 def extract_prefix(path, prefix):
38 start_index = path.find(prefix)
39 if start_index == -1:
40 return None
41 end_index = path.find(OS_SEP, start_index)
42 if end_index == -1:
43 return None
44 return path[start_index:end_index]
45
46
47 def is_complete_and_not_invalid(blobs, as_at):
48 # Directly initialize flags to False.
49 complete = False
50 ignore = False
51
52 # Search suffix strings are constant for each call, so define them once.
53 complete_suffix = f"{as_at}{OS_SEP}frame.complete"
54 invalid_suffix = f"{as_at}{OS_SEP}frame.ignore"
55
56 # Iterate over blobs once, checking conditions.
57 for blob in blobs:
58
59 if complete_suffix in blob:
60 complete = True
61 if complete and ignore:
62 break
63 elif invalid_suffix in blob:
64 ignore = True
65 if complete and ignore:
66 break
67
68 return complete and not ignore
69
70
71 class MabelPartitionScheme(BasePartitionScheme):
72 """
73 Handle reading data using the Mabel partition scheme.
74 """
75
76 def get_blobs_in_partition(
77 self,
78 *,
79 blob_list_getter: Callable,
80 prefix: str,
81 start_date: Optional[datetime.datetime],
82 end_date: Optional[datetime.datetime],
83 ) -> List[str]:
84 """filter the blobs acording to the chosen scheme"""
85
86 midnight = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
87 by_label = f"{OS_SEP}by_"
88 as_at_label = f"{OS_SEP}as_at"
89
90 def _inner(*, timestamp):
91 date_path = f"{prefix}{OS_SEP}year_{timestamp.year:04d}{OS_SEP}month_{timestamp.month:02d}{OS_SEP}day_{timestamp.day:02d}"
92 hour_label = f"{OS_SEP}by_hour{OS_SEP}hour={timestamp.hour:02d}/"
93
94 # Call your method to get the list of blob names
95 blob_names = blob_list_getter(prefix=date_path)
96 if len(blob_names) == 0:
97 return []
98
99 control_blobs: List[str] = []
100 data_blobs: List[str] = []
101 segments = set()
102 as_ats = set()
103 hour_blobs: List[str] = []
104
105 for blob in blob_names:
106 extension = os.path.splitext(blob)[1]
107 if extension not in DATA_EXTENSIONS:
108 control_blobs.append(blob)
109 else:
110 data_blobs.append(blob)
111 # Collect hour specific blobs, but only data blobs
112 if hour_label in blob:
113 hour_blobs.append(blob)
114
115 # Collect segments
116 if by_label in blob:
117 segments.add(extract_prefix(blob, "by_"))
118
119 if as_at_label in blob:
120 as_ats.add(extract_prefix(blob, "as_at_"))
121
122 if hour_blobs:
123 data_blobs = hour_blobs
124
125 if segments - {"by_hour", None}:
126 raise UnsupportedSegementationError(dataset=prefix, segments=segments)
127
128 as_at = None
129 as_at_list = sorted(as_ats)
130
131 # Keep popping from as_ats until a valid frame is found
132 while as_at_list:
133 as_at = as_at_list.pop()
134 if as_at is None:
135 continue
136 if is_complete_and_not_invalid(control_blobs, as_at):
137 data_blobs = [blob for blob in data_blobs if as_at in blob]
138 break
139 data_blobs = [blob for blob in data_blobs if as_at not in blob]
140 as_at = None
141
142 return data_blobs
143
144 start_date = start_date or midnight
145 end_date = end_date or midnight.replace(hour=23, minute=59)
146
147 found = set()
148
149 # Use a ThreadPoolExecutor to parallelize fetching blobs for each hour
150 with concurrent.futures.ThreadPoolExecutor() as executor:
151 # Prepare a list of future tasks
152 futures = [
153 executor.submit(_inner, **{"timestamp": ts})
154 for ts in self.hourly_timestamps(start_date, end_date)
155 ]
156 # Wait for all futures to complete and collect results
157 for future in concurrent.futures.as_completed(futures):
158 found.update(future.result())
159
160 return sorted(found)
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opteryx/__version__.py b/opteryx/__version__.py
--- a/opteryx/__version__.py
+++ b/opteryx/__version__.py
@@ -1,4 +1,4 @@
-__build__ = 486
+__build__ = 487
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/opteryx/managers/schemes/mabel_partitions.py b/opteryx/managers/schemes/mabel_partitions.py
--- a/opteryx/managers/schemes/mabel_partitions.py
+++ b/opteryx/managers/schemes/mabel_partitions.py
@@ -98,7 +98,6 @@
control_blobs: List[str] = []
data_blobs: List[str] = []
- segments = set()
as_ats = set()
hour_blobs: List[str] = []
@@ -112,19 +111,19 @@
if hour_label in blob:
hour_blobs.append(blob)
+ if hour_blobs:
+ data_blobs = hour_blobs
+
+ for blob in blob_names:
# Collect segments
if by_label in blob:
- segments.add(extract_prefix(blob, "by_"))
+ segment = extract_prefix(blob, "by_")
+ if segment != "by_hour":
+ raise UnsupportedSegementationError(dataset=prefix, segments=[segment])
if as_at_label in blob:
as_ats.add(extract_prefix(blob, "as_at_"))
- if hour_blobs:
- data_blobs = hour_blobs
-
- if segments - {"by_hour", None}:
- raise UnsupportedSegementationError(dataset=prefix, segments=segments)
-
as_at = None
as_at_list = sorted(as_ats)
| {"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 486\n+__build__ = 487\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\ndiff --git a/opteryx/managers/schemes/mabel_partitions.py b/opteryx/managers/schemes/mabel_partitions.py\n--- a/opteryx/managers/schemes/mabel_partitions.py\n+++ b/opteryx/managers/schemes/mabel_partitions.py\n@@ -98,7 +98,6 @@\n \n control_blobs: List[str] = []\n data_blobs: List[str] = []\n- segments = set()\n as_ats = set()\n hour_blobs: List[str] = []\n \n@@ -112,19 +111,19 @@\n if hour_label in blob:\n hour_blobs.append(blob)\n \n+ if hour_blobs:\n+ data_blobs = hour_blobs\n+\n+ for blob in blob_names:\n # Collect segments\n if by_label in blob:\n- segments.add(extract_prefix(blob, \"by_\"))\n+ segment = extract_prefix(blob, \"by_\")\n+ if segment != \"by_hour\":\n+ raise UnsupportedSegementationError(dataset=prefix, segments=[segment])\n \n if as_at_label in blob:\n as_ats.add(extract_prefix(blob, \"as_at_\"))\n \n- if hour_blobs:\n- data_blobs = hour_blobs\n-\n- if segments - {\"by_hour\", None}:\n- raise UnsupportedSegementationError(dataset=prefix, segments=segments)\n-\n as_at = None\n as_at_list = sorted(as_ats)\n", "issue": "\ud83e\udeb2 mabel partitioning bug\n### Thank you for taking the time to report a problem with Opteryx.\r\n_To help us to respond to your request we ask that you try to provide the below detail about the bug._\r\n\r\n**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._\r\n\r\n\r\n**Expected behaviour** _A clear and concise description of what you expected to happen._\r\n\r\n\r\n**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._\r\n\r\n~~~sql\r\n\r\n~~~\r\n\r\n**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._\r\n\n", "before_files": [{"content": "__build__ = 486\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 15\n_revision = 0\n_status = VersionStatus.BETA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport concurrent.futures\nimport datetime\nimport os\nfrom typing import Callable\nfrom typing import List\nfrom typing import Optional\n\nfrom opteryx.exceptions import DataError\nfrom opteryx.managers.schemes import BasePartitionScheme\nfrom opteryx.utils.file_decoders import DATA_EXTENSIONS\n\nOS_SEP = os.sep\n\n\nclass UnsupportedSegementationError(DataError):\n \"\"\"Exception raised for unsupported segmentations.\"\"\"\n\n def __init__(self, dataset: str, segments: set = None):\n self.dataset = dataset\n self.segments = segments\n message = f\"'{dataset}' contains unsupported segmentation (`{'`, `'.join(segments)}`), only 'by_hour' segments are supported.\"\n super().__init__(message)\n\n\ndef extract_prefix(path, prefix):\n start_index = path.find(prefix)\n if start_index == -1:\n return None\n end_index = path.find(OS_SEP, start_index)\n if end_index == -1:\n return None\n return path[start_index:end_index]\n\n\ndef is_complete_and_not_invalid(blobs, as_at):\n # Directly initialize flags to False.\n complete = False\n ignore = False\n\n # Search suffix strings are constant for each call, so define them once.\n complete_suffix = f\"{as_at}{OS_SEP}frame.complete\"\n invalid_suffix = f\"{as_at}{OS_SEP}frame.ignore\"\n\n # Iterate over blobs once, checking conditions.\n for blob in blobs:\n\n if complete_suffix in blob:\n complete = True\n if complete and ignore:\n break\n elif invalid_suffix in blob:\n ignore = True\n if complete and ignore:\n break\n\n return complete and not ignore\n\n\nclass MabelPartitionScheme(BasePartitionScheme):\n \"\"\"\n Handle reading data using the Mabel partition scheme.\n \"\"\"\n\n def get_blobs_in_partition(\n self,\n *,\n blob_list_getter: Callable,\n prefix: str,\n start_date: Optional[datetime.datetime],\n end_date: Optional[datetime.datetime],\n ) -> List[str]:\n \"\"\"filter the blobs acording to the chosen scheme\"\"\"\n\n midnight = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)\n by_label = f\"{OS_SEP}by_\"\n as_at_label = f\"{OS_SEP}as_at\"\n\n def _inner(*, timestamp):\n date_path = f\"{prefix}{OS_SEP}year_{timestamp.year:04d}{OS_SEP}month_{timestamp.month:02d}{OS_SEP}day_{timestamp.day:02d}\"\n hour_label = f\"{OS_SEP}by_hour{OS_SEP}hour={timestamp.hour:02d}/\"\n\n # Call your method to get the list of blob names\n blob_names = blob_list_getter(prefix=date_path)\n if len(blob_names) == 0:\n return []\n\n control_blobs: List[str] = []\n data_blobs: List[str] = []\n segments = set()\n as_ats = set()\n hour_blobs: List[str] = []\n\n for blob in blob_names:\n extension = os.path.splitext(blob)[1]\n if extension not in DATA_EXTENSIONS:\n control_blobs.append(blob)\n else:\n data_blobs.append(blob)\n # Collect hour specific blobs, but only data blobs\n if hour_label in blob:\n hour_blobs.append(blob)\n\n # Collect segments\n if by_label in blob:\n segments.add(extract_prefix(blob, \"by_\"))\n\n if as_at_label in blob:\n as_ats.add(extract_prefix(blob, \"as_at_\"))\n\n if hour_blobs:\n data_blobs = hour_blobs\n\n if segments - {\"by_hour\", None}:\n raise UnsupportedSegementationError(dataset=prefix, segments=segments)\n\n as_at = None\n as_at_list = sorted(as_ats)\n\n # Keep popping from as_ats until a valid frame is found\n while as_at_list:\n as_at = as_at_list.pop()\n if as_at is None:\n continue\n if is_complete_and_not_invalid(control_blobs, as_at):\n data_blobs = [blob for blob in data_blobs if as_at in blob]\n break\n data_blobs = [blob for blob in data_blobs if as_at not in blob]\n as_at = None\n\n return data_blobs\n\n start_date = start_date or midnight\n end_date = end_date or midnight.replace(hour=23, minute=59)\n\n found = set()\n\n # Use a ThreadPoolExecutor to parallelize fetching blobs for each hour\n with concurrent.futures.ThreadPoolExecutor() as executor:\n # Prepare a list of future tasks\n futures = [\n executor.submit(_inner, **{\"timestamp\": ts})\n for ts in self.hourly_timestamps(start_date, end_date)\n ]\n # Wait for all futures to complete and collect results\n for future in concurrent.futures.as_completed(futures):\n found.update(future.result())\n\n return sorted(found)\n", "path": "opteryx/managers/schemes/mabel_partitions.py"}], "after_files": [{"content": "__build__ = 487\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 15\n_revision = 0\n_status = VersionStatus.BETA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport concurrent.futures\nimport datetime\nimport os\nfrom typing import Callable\nfrom typing import List\nfrom typing import Optional\n\nfrom opteryx.exceptions import DataError\nfrom opteryx.managers.schemes import BasePartitionScheme\nfrom opteryx.utils.file_decoders import DATA_EXTENSIONS\n\nOS_SEP = os.sep\n\n\nclass UnsupportedSegementationError(DataError):\n \"\"\"Exception raised for unsupported segmentations.\"\"\"\n\n def __init__(self, dataset: str, segments: set = None):\n self.dataset = dataset\n self.segments = segments\n message = f\"'{dataset}' contains unsupported segmentation (`{'`, `'.join(segments)}`), only 'by_hour' segments are supported.\"\n super().__init__(message)\n\n\ndef extract_prefix(path, prefix):\n start_index = path.find(prefix)\n if start_index == -1:\n return None\n end_index = path.find(OS_SEP, start_index)\n if end_index == -1:\n return None\n return path[start_index:end_index]\n\n\ndef is_complete_and_not_invalid(blobs, as_at):\n # Directly initialize flags to False.\n complete = False\n ignore = False\n\n # Search suffix strings are constant for each call, so define them once.\n complete_suffix = f\"{as_at}{OS_SEP}frame.complete\"\n invalid_suffix = f\"{as_at}{OS_SEP}frame.ignore\"\n\n # Iterate over blobs once, checking conditions.\n for blob in blobs:\n\n if complete_suffix in blob:\n complete = True\n if complete and ignore:\n break\n elif invalid_suffix in blob:\n ignore = True\n if complete and ignore:\n break\n\n return complete and not ignore\n\n\nclass MabelPartitionScheme(BasePartitionScheme):\n \"\"\"\n Handle reading data using the Mabel partition scheme.\n \"\"\"\n\n def get_blobs_in_partition(\n self,\n *,\n blob_list_getter: Callable,\n prefix: str,\n start_date: Optional[datetime.datetime],\n end_date: Optional[datetime.datetime],\n ) -> List[str]:\n \"\"\"filter the blobs acording to the chosen scheme\"\"\"\n\n midnight = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)\n by_label = f\"{OS_SEP}by_\"\n as_at_label = f\"{OS_SEP}as_at\"\n\n def _inner(*, timestamp):\n date_path = f\"{prefix}{OS_SEP}year_{timestamp.year:04d}{OS_SEP}month_{timestamp.month:02d}{OS_SEP}day_{timestamp.day:02d}\"\n hour_label = f\"{OS_SEP}by_hour{OS_SEP}hour={timestamp.hour:02d}/\"\n\n # Call your method to get the list of blob names\n blob_names = blob_list_getter(prefix=date_path)\n if len(blob_names) == 0:\n return []\n\n control_blobs: List[str] = []\n data_blobs: List[str] = []\n as_ats = set()\n hour_blobs: List[str] = []\n\n for blob in blob_names:\n extension = os.path.splitext(blob)[1]\n if extension not in DATA_EXTENSIONS:\n control_blobs.append(blob)\n else:\n data_blobs.append(blob)\n # Collect hour specific blobs, but only data blobs\n if hour_label in blob:\n hour_blobs.append(blob)\n\n if hour_blobs:\n data_blobs = hour_blobs\n\n for blob in blob_names:\n # Collect segments\n if by_label in blob:\n segment = extract_prefix(blob, \"by_\")\n if segment != \"by_hour\":\n raise UnsupportedSegementationError(dataset=prefix, segments=[segment])\n\n if as_at_label in blob:\n as_ats.add(extract_prefix(blob, \"as_at_\"))\n\n as_at = None\n as_at_list = sorted(as_ats)\n\n # Keep popping from as_ats until a valid frame is found\n while as_at_list:\n as_at = as_at_list.pop()\n if as_at is None:\n continue\n if is_complete_and_not_invalid(control_blobs, as_at):\n data_blobs = [blob for blob in data_blobs if as_at in blob]\n break\n data_blobs = [blob for blob in data_blobs if as_at not in blob]\n as_at = None\n\n return data_blobs\n\n start_date = start_date or midnight\n end_date = end_date or midnight.replace(hour=23, minute=59)\n\n found = set()\n\n # Use a ThreadPoolExecutor to parallelize fetching blobs for each hour\n with concurrent.futures.ThreadPoolExecutor() as executor:\n # Prepare a list of future tasks\n futures = [\n executor.submit(_inner, **{\"timestamp\": ts})\n for ts in self.hourly_timestamps(start_date, end_date)\n ]\n # Wait for all futures to complete and collect results\n for future in concurrent.futures.as_completed(futures):\n found.update(future.result())\n\n return sorted(found)\n", "path": "opteryx/managers/schemes/mabel_partitions.py"}]} | 2,388 | 421 |
gh_patches_debug_11808 | rasdani/github-patches | git_diff | avocado-framework__avocado-4549 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Avocado crash without output
When I run nrunner with the wrong test path avocado crashes without any information. It can be reproduced by:
`avocado run --test-runner=nrunner --ignore-missing-references -- test_utils_partition.py`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `avocado/plugins/run.py`
Content:
```
1 # This program is free software; you can redistribute it and/or modify
2 # it under the terms of the GNU General Public License as published by
3 # the Free Software Foundation; either version 2 of the License, or
4 # (at your option) any later version.
5 #
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9 #
10 # See LICENSE for more details.
11 #
12 # Copyright: Red Hat Inc. 2013-2014
13 # Author: Ruda Moura <[email protected]>
14
15 """
16 Base Test Runner Plugins.
17 """
18
19 import argparse
20 import sys
21
22 from avocado.core import exit_codes, job, loader, parser_common_args
23 from avocado.core.output import LOG_UI
24 from avocado.core.plugin_interfaces import CLICmd, Init
25 from avocado.core.settings import settings
26 from avocado.core.suite import TestSuite, TestSuiteError
27 from avocado.utils import process
28
29
30 class RunInit(Init):
31
32 name = 'run'
33 description = 'Initializes the run options'
34
35 def initialize(self):
36 help_msg = ('Defines the order of iterating through test suite '
37 'and test variants')
38 settings.register_option(section='run',
39 key='execution_order',
40 choices=('tests-per-variant',
41 'variants-per-test'),
42 default='variants-per-test',
43 help_msg=help_msg)
44
45
46 class Run(CLICmd):
47
48 """
49 Implements the avocado 'run' subcommand
50 """
51
52 name = 'run'
53 description = ("Runs one or more tests (native test, test alias, binary "
54 "or script)")
55
56 @staticmethod
57 def _test_parameter(string):
58 param_name_value = string.split('=', 1)
59 if len(param_name_value) < 2:
60 msg = ('Invalid --test-parameter option: "%s". Valid option must '
61 'be a "NAME=VALUE" like expression' % string)
62 raise argparse.ArgumentTypeError(msg)
63 return param_name_value
64
65 def configure(self, parser):
66 """
67 Add the subparser for the run action.
68
69 :param parser: Main test runner parser.
70 """
71 parser = super(Run, self).configure(parser)
72
73 help_msg = 'List of test references (aliases or paths)'
74 settings.register_option(section='run',
75 key='references',
76 key_type=list,
77 default=[],
78 nargs='*',
79 metavar='TEST_REFERENCE',
80 parser=parser,
81 help_msg=help_msg,
82 positional_arg=True)
83
84 help_msg = ('Parameter name and value to pass to all tests. This is '
85 'only applicable when not using a varianter plugin. '
86 'This option format must be given in the NAME=VALUE '
87 'format, and may be given any number of times, or per '
88 'parameter.')
89 settings.register_option(section='run',
90 key='test_parameters',
91 action='append',
92 default=[],
93 key_type=self._test_parameter,
94 metavar="NAME_VALUE",
95 help_msg=help_msg,
96 parser=parser,
97 long_arg='--test-parameter',
98 short_arg='-p')
99
100 help_msg = ('Selects the runner implementation from one of the '
101 'installed and active implementations. You can run '
102 '"avocado plugins" and find the list of valid runners '
103 'under the "Plugins that run test suites on a job '
104 '(runners) section. Defaults to "runner", which is '
105 'the conventional and traditional runner.')
106 settings.register_option(section='run',
107 key='test_runner',
108 default='runner',
109 help_msg=help_msg,
110 parser=parser,
111 long_arg='--test-runner')
112
113 help_msg = ('Instead of running the test only list them and log '
114 'their params.')
115 settings.register_option(section='run.dry_run',
116 key='enabled',
117 default=False,
118 key_type=bool,
119 help_msg=help_msg,
120 parser=parser,
121 short_arg='-d',
122 long_arg='--dry-run')
123
124 help_msg = ('Do not automatically clean up temporary directories '
125 'used by dry-run')
126 settings.register_option(section='run.dry_run',
127 key='no_cleanup',
128 help_msg=help_msg,
129 default=False,
130 key_type=bool,
131 parser=parser,
132 long_arg='--dry-run-no-cleanup')
133
134 help_msg = ('Forces the use of a particular job ID. Used internally '
135 'when interacting with an avocado server. You should not '
136 'use this option unless you know exactly what you\'re '
137 'doing')
138 settings.register_option(section='run',
139 key='unique_job_id',
140 default=None,
141 help_msg=help_msg,
142 parser=parser,
143 long_arg='--force-job-id')
144
145 help_msg = 'Forces to use of an alternate job results directory.'
146 settings.register_option(section='run',
147 key='results_dir',
148 default=None,
149 metavar='DIRECTORY',
150 help_msg=help_msg,
151 parser=parser,
152 long_arg='--job-results-dir')
153
154 help_msg = ('Categorizes this within a directory with the same name, '
155 'by creating a link to the job result directory')
156 settings.register_option(section='run',
157 key='job_category',
158 help_msg=help_msg,
159 parser=parser,
160 default=None,
161 metavar='CATEGORY',
162 long_arg='--job-category')
163
164 settings.add_argparser_to_option(namespace='job.run.timeout',
165 metavar='SECONDS',
166 parser=parser,
167 long_arg='--job-timeout')
168
169 help_msg = 'Enable the job interruption on first failed test.'
170 settings.register_option(section='run',
171 key='failfast',
172 default=False,
173 key_type=bool,
174 action='store_true',
175 help_msg=help_msg,
176 parser=parser,
177 long_arg='--failfast')
178
179 help_msg = 'Keep job temporary files (useful for avocado debugging).'
180 settings.register_option(section='run',
181 key='keep_tmp',
182 default=False,
183 key_type=bool,
184 action='store_true',
185 help_msg=help_msg,
186 parser=parser,
187 long_arg='--keep-tmp')
188
189 help_msg = ('Force the job execution, even if some of the test '
190 'references are not resolved to tests. "on" and '
191 '"off" will be deprecated soon.')
192 settings.register_option(section='run',
193 key='ignore_missing_references',
194 default=False,
195 key_type=bool,
196 help_msg=help_msg,
197 parser=parser,
198 long_arg='--ignore-missing-references')
199
200 settings.add_argparser_to_option(namespace='sysinfo.collect.enabled',
201 parser=parser,
202 action='store_false',
203 long_arg='--disable-sysinfo')
204
205 settings.add_argparser_to_option('run.execution_order',
206 parser=parser,
207 long_arg='--execution-order')
208
209 parser.output = parser.add_argument_group('output and result format')
210
211 help_msg = ('Store given logging STREAMs in '
212 '"$JOB_RESULTS_DIR/$STREAM.$LEVEL."')
213 settings.register_option(section='run',
214 key='store_logging_stream',
215 nargs='+',
216 help_msg=help_msg,
217 default=[],
218 metavar='STREAM[:LEVEL]',
219 key_type=list,
220 parser=parser,
221 long_arg='--store-logging-stream')
222
223 help_msg = ('Logs the possible data directories for each test. This '
224 'is helpful when writing new tests and not being sure '
225 'where to put data files. Look for "Test data '
226 'directories" in your test log')
227 settings.register_option(section='run',
228 key='log_test_data_directories',
229 default=False,
230 key_type=bool,
231 help_msg=help_msg,
232 parser=parser,
233 long_arg='--log-test-data-directories')
234
235 out_check = parser.add_argument_group('output check arguments')
236
237 help_msg = ('Record the output produced by each test (from stdout '
238 'and stderr) into both the current executing result '
239 'and into reference files. Reference files are used on '
240 'subsequent runs to determine if the test produced the '
241 'expected output or not, and the current executing result '
242 'is used to check against a previously recorded reference '
243 'file. Valid values: "none" (to explicitly disable all '
244 'recording) "stdout" (to record standard output *only*), '
245 '"stderr" (to record standard error *only*), "both" (to '
246 'record standard output and error in separate files), '
247 '"combined" (for standard output and error in a single '
248 'file). "all" is also a valid but deprecated option that '
249 'is a synonym of "both".')
250 settings.register_option(section='run',
251 key='output_check_record',
252 help_msg=help_msg,
253 choices=('none', 'stdout', 'stderr',
254 'both', 'combined', 'all'),
255 parser=out_check,
256 default=None,
257 long_arg='--output-check-record')
258
259 help_msg = ('Disables test output (stdout/stderr) check. If this '
260 'option is given, no output will be checked, even if '
261 'there are reference files present for the test.')
262 settings.register_option(section='run',
263 key='output_check',
264 default=True,
265 key_type=bool,
266 action='store_false',
267 help_msg=help_msg,
268 parser=out_check,
269 long_arg='--disable-output-check')
270
271 loader.add_loader_options(parser, 'run')
272 parser_common_args.add_tag_filter_args(parser)
273
274 def run(self, config):
275 """
276 Run test modules or simple tests.
277
278 :param config: Configuration received from command line parser and
279 possibly other sources.
280 :type config: dict
281 """
282 if 'run.output_check_record' in config:
283 check_record = config.get('run.output_check_record')
284 process.OUTPUT_CHECK_RECORD_MODE = check_record
285
286 unique_job_id = config.get('run.unique_job_id')
287 if unique_job_id is not None:
288 try:
289 int(unique_job_id, 16)
290 if len(unique_job_id) != 40:
291 raise ValueError
292 except ValueError:
293 LOG_UI.error('Unique Job ID needs to be a 40 digit hex number')
294 sys.exit(exit_codes.AVOCADO_FAIL)
295
296 try:
297 suite = TestSuite.from_config(config, name='')
298 if suite.size == 0:
299 sys.exit(exit_codes.AVOCADO_JOB_FAIL)
300 except TestSuiteError as err:
301 LOG_UI.error(err)
302 sys.exit(exit_codes.AVOCADO_JOB_FAIL)
303 with job.Job(config, [suite]) as job_instance:
304 return job_instance.run()
305
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/avocado/plugins/run.py b/avocado/plugins/run.py
--- a/avocado/plugins/run.py
+++ b/avocado/plugins/run.py
@@ -296,7 +296,11 @@
try:
suite = TestSuite.from_config(config, name='')
if suite.size == 0:
- sys.exit(exit_codes.AVOCADO_JOB_FAIL)
+ msg = ("Suite is empty. There is no tests to run. This usually "
+ "happens when you pass --ignore-missing-references and "
+ "there is no more references to process.")
+ LOG_UI.warning(msg)
+ sys.exit(exit_codes.AVOCADO_FAIL)
except TestSuiteError as err:
LOG_UI.error(err)
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
| {"golden_diff": "diff --git a/avocado/plugins/run.py b/avocado/plugins/run.py\n--- a/avocado/plugins/run.py\n+++ b/avocado/plugins/run.py\n@@ -296,7 +296,11 @@\n try:\n suite = TestSuite.from_config(config, name='')\n if suite.size == 0:\n- sys.exit(exit_codes.AVOCADO_JOB_FAIL)\n+ msg = (\"Suite is empty. There is no tests to run. This usually \"\n+ \"happens when you pass --ignore-missing-references and \"\n+ \"there is no more references to process.\")\n+ LOG_UI.warning(msg)\n+ sys.exit(exit_codes.AVOCADO_FAIL)\n except TestSuiteError as err:\n LOG_UI.error(err)\n sys.exit(exit_codes.AVOCADO_JOB_FAIL)\n", "issue": "Avocado crash without output\nWhen I run nrunner with the wrong test path avocado crashes without any information. It can be reproduced by:\r\n\r\n`avocado run --test-runner=nrunner --ignore-missing-references -- test_utils_partition.py`\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2013-2014\n# Author: Ruda Moura <[email protected]>\n\n\"\"\"\nBase Test Runner Plugins.\n\"\"\"\n\nimport argparse\nimport sys\n\nfrom avocado.core import exit_codes, job, loader, parser_common_args\nfrom avocado.core.output import LOG_UI\nfrom avocado.core.plugin_interfaces import CLICmd, Init\nfrom avocado.core.settings import settings\nfrom avocado.core.suite import TestSuite, TestSuiteError\nfrom avocado.utils import process\n\n\nclass RunInit(Init):\n\n name = 'run'\n description = 'Initializes the run options'\n\n def initialize(self):\n help_msg = ('Defines the order of iterating through test suite '\n 'and test variants')\n settings.register_option(section='run',\n key='execution_order',\n choices=('tests-per-variant',\n 'variants-per-test'),\n default='variants-per-test',\n help_msg=help_msg)\n\n\nclass Run(CLICmd):\n\n \"\"\"\n Implements the avocado 'run' subcommand\n \"\"\"\n\n name = 'run'\n description = (\"Runs one or more tests (native test, test alias, binary \"\n \"or script)\")\n\n @staticmethod\n def _test_parameter(string):\n param_name_value = string.split('=', 1)\n if len(param_name_value) < 2:\n msg = ('Invalid --test-parameter option: \"%s\". Valid option must '\n 'be a \"NAME=VALUE\" like expression' % string)\n raise argparse.ArgumentTypeError(msg)\n return param_name_value\n\n def configure(self, parser):\n \"\"\"\n Add the subparser for the run action.\n\n :param parser: Main test runner parser.\n \"\"\"\n parser = super(Run, self).configure(parser)\n\n help_msg = 'List of test references (aliases or paths)'\n settings.register_option(section='run',\n key='references',\n key_type=list,\n default=[],\n nargs='*',\n metavar='TEST_REFERENCE',\n parser=parser,\n help_msg=help_msg,\n positional_arg=True)\n\n help_msg = ('Parameter name and value to pass to all tests. This is '\n 'only applicable when not using a varianter plugin. '\n 'This option format must be given in the NAME=VALUE '\n 'format, and may be given any number of times, or per '\n 'parameter.')\n settings.register_option(section='run',\n key='test_parameters',\n action='append',\n default=[],\n key_type=self._test_parameter,\n metavar=\"NAME_VALUE\",\n help_msg=help_msg,\n parser=parser,\n long_arg='--test-parameter',\n short_arg='-p')\n\n help_msg = ('Selects the runner implementation from one of the '\n 'installed and active implementations. You can run '\n '\"avocado plugins\" and find the list of valid runners '\n 'under the \"Plugins that run test suites on a job '\n '(runners) section. Defaults to \"runner\", which is '\n 'the conventional and traditional runner.')\n settings.register_option(section='run',\n key='test_runner',\n default='runner',\n help_msg=help_msg,\n parser=parser,\n long_arg='--test-runner')\n\n help_msg = ('Instead of running the test only list them and log '\n 'their params.')\n settings.register_option(section='run.dry_run',\n key='enabled',\n default=False,\n key_type=bool,\n help_msg=help_msg,\n parser=parser,\n short_arg='-d',\n long_arg='--dry-run')\n\n help_msg = ('Do not automatically clean up temporary directories '\n 'used by dry-run')\n settings.register_option(section='run.dry_run',\n key='no_cleanup',\n help_msg=help_msg,\n default=False,\n key_type=bool,\n parser=parser,\n long_arg='--dry-run-no-cleanup')\n\n help_msg = ('Forces the use of a particular job ID. Used internally '\n 'when interacting with an avocado server. You should not '\n 'use this option unless you know exactly what you\\'re '\n 'doing')\n settings.register_option(section='run',\n key='unique_job_id',\n default=None,\n help_msg=help_msg,\n parser=parser,\n long_arg='--force-job-id')\n\n help_msg = 'Forces to use of an alternate job results directory.'\n settings.register_option(section='run',\n key='results_dir',\n default=None,\n metavar='DIRECTORY',\n help_msg=help_msg,\n parser=parser,\n long_arg='--job-results-dir')\n\n help_msg = ('Categorizes this within a directory with the same name, '\n 'by creating a link to the job result directory')\n settings.register_option(section='run',\n key='job_category',\n help_msg=help_msg,\n parser=parser,\n default=None,\n metavar='CATEGORY',\n long_arg='--job-category')\n\n settings.add_argparser_to_option(namespace='job.run.timeout',\n metavar='SECONDS',\n parser=parser,\n long_arg='--job-timeout')\n\n help_msg = 'Enable the job interruption on first failed test.'\n settings.register_option(section='run',\n key='failfast',\n default=False,\n key_type=bool,\n action='store_true',\n help_msg=help_msg,\n parser=parser,\n long_arg='--failfast')\n\n help_msg = 'Keep job temporary files (useful for avocado debugging).'\n settings.register_option(section='run',\n key='keep_tmp',\n default=False,\n key_type=bool,\n action='store_true',\n help_msg=help_msg,\n parser=parser,\n long_arg='--keep-tmp')\n\n help_msg = ('Force the job execution, even if some of the test '\n 'references are not resolved to tests. \"on\" and '\n '\"off\" will be deprecated soon.')\n settings.register_option(section='run',\n key='ignore_missing_references',\n default=False,\n key_type=bool,\n help_msg=help_msg,\n parser=parser,\n long_arg='--ignore-missing-references')\n\n settings.add_argparser_to_option(namespace='sysinfo.collect.enabled',\n parser=parser,\n action='store_false',\n long_arg='--disable-sysinfo')\n\n settings.add_argparser_to_option('run.execution_order',\n parser=parser,\n long_arg='--execution-order')\n\n parser.output = parser.add_argument_group('output and result format')\n\n help_msg = ('Store given logging STREAMs in '\n '\"$JOB_RESULTS_DIR/$STREAM.$LEVEL.\"')\n settings.register_option(section='run',\n key='store_logging_stream',\n nargs='+',\n help_msg=help_msg,\n default=[],\n metavar='STREAM[:LEVEL]',\n key_type=list,\n parser=parser,\n long_arg='--store-logging-stream')\n\n help_msg = ('Logs the possible data directories for each test. This '\n 'is helpful when writing new tests and not being sure '\n 'where to put data files. Look for \"Test data '\n 'directories\" in your test log')\n settings.register_option(section='run',\n key='log_test_data_directories',\n default=False,\n key_type=bool,\n help_msg=help_msg,\n parser=parser,\n long_arg='--log-test-data-directories')\n\n out_check = parser.add_argument_group('output check arguments')\n\n help_msg = ('Record the output produced by each test (from stdout '\n 'and stderr) into both the current executing result '\n 'and into reference files. Reference files are used on '\n 'subsequent runs to determine if the test produced the '\n 'expected output or not, and the current executing result '\n 'is used to check against a previously recorded reference '\n 'file. Valid values: \"none\" (to explicitly disable all '\n 'recording) \"stdout\" (to record standard output *only*), '\n '\"stderr\" (to record standard error *only*), \"both\" (to '\n 'record standard output and error in separate files), '\n '\"combined\" (for standard output and error in a single '\n 'file). \"all\" is also a valid but deprecated option that '\n 'is a synonym of \"both\".')\n settings.register_option(section='run',\n key='output_check_record',\n help_msg=help_msg,\n choices=('none', 'stdout', 'stderr',\n 'both', 'combined', 'all'),\n parser=out_check,\n default=None,\n long_arg='--output-check-record')\n\n help_msg = ('Disables test output (stdout/stderr) check. If this '\n 'option is given, no output will be checked, even if '\n 'there are reference files present for the test.')\n settings.register_option(section='run',\n key='output_check',\n default=True,\n key_type=bool,\n action='store_false',\n help_msg=help_msg,\n parser=out_check,\n long_arg='--disable-output-check')\n\n loader.add_loader_options(parser, 'run')\n parser_common_args.add_tag_filter_args(parser)\n\n def run(self, config):\n \"\"\"\n Run test modules or simple tests.\n\n :param config: Configuration received from command line parser and\n possibly other sources.\n :type config: dict\n \"\"\"\n if 'run.output_check_record' in config:\n check_record = config.get('run.output_check_record')\n process.OUTPUT_CHECK_RECORD_MODE = check_record\n\n unique_job_id = config.get('run.unique_job_id')\n if unique_job_id is not None:\n try:\n int(unique_job_id, 16)\n if len(unique_job_id) != 40:\n raise ValueError\n except ValueError:\n LOG_UI.error('Unique Job ID needs to be a 40 digit hex number')\n sys.exit(exit_codes.AVOCADO_FAIL)\n\n try:\n suite = TestSuite.from_config(config, name='')\n if suite.size == 0:\n sys.exit(exit_codes.AVOCADO_JOB_FAIL)\n except TestSuiteError as err:\n LOG_UI.error(err)\n sys.exit(exit_codes.AVOCADO_JOB_FAIL)\n with job.Job(config, [suite]) as job_instance:\n return job_instance.run()\n", "path": "avocado/plugins/run.py"}], "after_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2013-2014\n# Author: Ruda Moura <[email protected]>\n\n\"\"\"\nBase Test Runner Plugins.\n\"\"\"\n\nimport argparse\nimport sys\n\nfrom avocado.core import exit_codes, job, loader, parser_common_args\nfrom avocado.core.output import LOG_UI\nfrom avocado.core.plugin_interfaces import CLICmd, Init\nfrom avocado.core.settings import settings\nfrom avocado.core.suite import TestSuite, TestSuiteError\nfrom avocado.utils import process\n\n\nclass RunInit(Init):\n\n name = 'run'\n description = 'Initializes the run options'\n\n def initialize(self):\n help_msg = ('Defines the order of iterating through test suite '\n 'and test variants')\n settings.register_option(section='run',\n key='execution_order',\n choices=('tests-per-variant',\n 'variants-per-test'),\n default='variants-per-test',\n help_msg=help_msg)\n\n\nclass Run(CLICmd):\n\n \"\"\"\n Implements the avocado 'run' subcommand\n \"\"\"\n\n name = 'run'\n description = (\"Runs one or more tests (native test, test alias, binary \"\n \"or script)\")\n\n @staticmethod\n def _test_parameter(string):\n param_name_value = string.split('=', 1)\n if len(param_name_value) < 2:\n msg = ('Invalid --test-parameter option: \"%s\". Valid option must '\n 'be a \"NAME=VALUE\" like expression' % string)\n raise argparse.ArgumentTypeError(msg)\n return param_name_value\n\n def configure(self, parser):\n \"\"\"\n Add the subparser for the run action.\n\n :param parser: Main test runner parser.\n \"\"\"\n parser = super(Run, self).configure(parser)\n\n help_msg = 'List of test references (aliases or paths)'\n settings.register_option(section='run',\n key='references',\n key_type=list,\n default=[],\n nargs='*',\n metavar='TEST_REFERENCE',\n parser=parser,\n help_msg=help_msg,\n positional_arg=True)\n\n help_msg = ('Parameter name and value to pass to all tests. This is '\n 'only applicable when not using a varianter plugin. '\n 'This option format must be given in the NAME=VALUE '\n 'format, and may be given any number of times, or per '\n 'parameter.')\n settings.register_option(section='run',\n key='test_parameters',\n action='append',\n default=[],\n key_type=self._test_parameter,\n metavar=\"NAME_VALUE\",\n help_msg=help_msg,\n parser=parser,\n long_arg='--test-parameter',\n short_arg='-p')\n\n help_msg = ('Selects the runner implementation from one of the '\n 'installed and active implementations. You can run '\n '\"avocado plugins\" and find the list of valid runners '\n 'under the \"Plugins that run test suites on a job '\n '(runners) section. Defaults to \"runner\", which is '\n 'the conventional and traditional runner.')\n settings.register_option(section='run',\n key='test_runner',\n default='runner',\n help_msg=help_msg,\n parser=parser,\n long_arg='--test-runner')\n\n help_msg = ('Instead of running the test only list them and log '\n 'their params.')\n settings.register_option(section='run.dry_run',\n key='enabled',\n default=False,\n key_type=bool,\n help_msg=help_msg,\n parser=parser,\n short_arg='-d',\n long_arg='--dry-run')\n\n help_msg = ('Do not automatically clean up temporary directories '\n 'used by dry-run')\n settings.register_option(section='run.dry_run',\n key='no_cleanup',\n help_msg=help_msg,\n default=False,\n key_type=bool,\n parser=parser,\n long_arg='--dry-run-no-cleanup')\n\n help_msg = ('Forces the use of a particular job ID. Used internally '\n 'when interacting with an avocado server. You should not '\n 'use this option unless you know exactly what you\\'re '\n 'doing')\n settings.register_option(section='run',\n key='unique_job_id',\n default=None,\n help_msg=help_msg,\n parser=parser,\n long_arg='--force-job-id')\n\n help_msg = 'Forces to use of an alternate job results directory.'\n settings.register_option(section='run',\n key='results_dir',\n default=None,\n metavar='DIRECTORY',\n help_msg=help_msg,\n parser=parser,\n long_arg='--job-results-dir')\n\n help_msg = ('Categorizes this within a directory with the same name, '\n 'by creating a link to the job result directory')\n settings.register_option(section='run',\n key='job_category',\n help_msg=help_msg,\n parser=parser,\n default=None,\n metavar='CATEGORY',\n long_arg='--job-category')\n\n settings.add_argparser_to_option(namespace='job.run.timeout',\n metavar='SECONDS',\n parser=parser,\n long_arg='--job-timeout')\n\n help_msg = 'Enable the job interruption on first failed test.'\n settings.register_option(section='run',\n key='failfast',\n default=False,\n key_type=bool,\n action='store_true',\n help_msg=help_msg,\n parser=parser,\n long_arg='--failfast')\n\n help_msg = 'Keep job temporary files (useful for avocado debugging).'\n settings.register_option(section='run',\n key='keep_tmp',\n default=False,\n key_type=bool,\n action='store_true',\n help_msg=help_msg,\n parser=parser,\n long_arg='--keep-tmp')\n\n help_msg = ('Force the job execution, even if some of the test '\n 'references are not resolved to tests. \"on\" and '\n '\"off\" will be deprecated soon.')\n settings.register_option(section='run',\n key='ignore_missing_references',\n default=False,\n key_type=bool,\n help_msg=help_msg,\n parser=parser,\n long_arg='--ignore-missing-references')\n\n settings.add_argparser_to_option(namespace='sysinfo.collect.enabled',\n parser=parser,\n action='store_false',\n long_arg='--disable-sysinfo')\n\n settings.add_argparser_to_option('run.execution_order',\n parser=parser,\n long_arg='--execution-order')\n\n parser.output = parser.add_argument_group('output and result format')\n\n help_msg = ('Store given logging STREAMs in '\n '\"$JOB_RESULTS_DIR/$STREAM.$LEVEL.\"')\n settings.register_option(section='run',\n key='store_logging_stream',\n nargs='+',\n help_msg=help_msg,\n default=[],\n metavar='STREAM[:LEVEL]',\n key_type=list,\n parser=parser,\n long_arg='--store-logging-stream')\n\n help_msg = ('Logs the possible data directories for each test. This '\n 'is helpful when writing new tests and not being sure '\n 'where to put data files. Look for \"Test data '\n 'directories\" in your test log')\n settings.register_option(section='run',\n key='log_test_data_directories',\n default=False,\n key_type=bool,\n help_msg=help_msg,\n parser=parser,\n long_arg='--log-test-data-directories')\n\n out_check = parser.add_argument_group('output check arguments')\n\n help_msg = ('Record the output produced by each test (from stdout '\n 'and stderr) into both the current executing result '\n 'and into reference files. Reference files are used on '\n 'subsequent runs to determine if the test produced the '\n 'expected output or not, and the current executing result '\n 'is used to check against a previously recorded reference '\n 'file. Valid values: \"none\" (to explicitly disable all '\n 'recording) \"stdout\" (to record standard output *only*), '\n '\"stderr\" (to record standard error *only*), \"both\" (to '\n 'record standard output and error in separate files), '\n '\"combined\" (for standard output and error in a single '\n 'file). \"all\" is also a valid but deprecated option that '\n 'is a synonym of \"both\".')\n settings.register_option(section='run',\n key='output_check_record',\n help_msg=help_msg,\n choices=('none', 'stdout', 'stderr',\n 'both', 'combined', 'all'),\n parser=out_check,\n default=None,\n long_arg='--output-check-record')\n\n help_msg = ('Disables test output (stdout/stderr) check. If this '\n 'option is given, no output will be checked, even if '\n 'there are reference files present for the test.')\n settings.register_option(section='run',\n key='output_check',\n default=True,\n key_type=bool,\n action='store_false',\n help_msg=help_msg,\n parser=out_check,\n long_arg='--disable-output-check')\n\n loader.add_loader_options(parser, 'run')\n parser_common_args.add_tag_filter_args(parser)\n\n def run(self, config):\n \"\"\"\n Run test modules or simple tests.\n\n :param config: Configuration received from command line parser and\n possibly other sources.\n :type config: dict\n \"\"\"\n if 'run.output_check_record' in config:\n check_record = config.get('run.output_check_record')\n process.OUTPUT_CHECK_RECORD_MODE = check_record\n\n unique_job_id = config.get('run.unique_job_id')\n if unique_job_id is not None:\n try:\n int(unique_job_id, 16)\n if len(unique_job_id) != 40:\n raise ValueError\n except ValueError:\n LOG_UI.error('Unique Job ID needs to be a 40 digit hex number')\n sys.exit(exit_codes.AVOCADO_FAIL)\n\n try:\n suite = TestSuite.from_config(config, name='')\n if suite.size == 0:\n msg = (\"Suite is empty. There is no tests to run. This usually \"\n \"happens when you pass --ignore-missing-references and \"\n \"there is no more references to process.\")\n LOG_UI.warning(msg)\n sys.exit(exit_codes.AVOCADO_FAIL)\n except TestSuiteError as err:\n LOG_UI.error(err)\n sys.exit(exit_codes.AVOCADO_JOB_FAIL)\n with job.Job(config, [suite]) as job_instance:\n return job_instance.run()\n", "path": "avocado/plugins/run.py"}]} | 3,420 | 178 |
gh_patches_debug_12424 | rasdani/github-patches | git_diff | bridgecrewio__checkov-619 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Azure NSG Security rule loop error
I got the following error when I execute to scan Azure NSG terraform file.
_ERROR:checkov.terraform.checks.resource.azure.NSGRuleSSHAccessRestricted:Failed to run check: Ensure that SSH access is restricted from the internet for configuration: {'name': ['${var.nsg_name}'], 'resource_group_name': ['${azurerm_resource_group.rg.name}'], 'location': ['${var.location}'], 'security_rule': ['${[for rule in var.security_rules : {\'name\': \'${rule.name}\', \'priority\': \'${rule.priority}\', \'source_address_prefix\': \'${lookup(rule,"source_address_prefixes",[]) == [] ? lookup(rule,"source_address_prefix",var.nsg_default_source_address_prefix) : ""}\', \'source_address_prefixes\': \'${lookup(rule,"source_address_prefixes",[])}\', \'access\': \'${lookup(rule,"access",var.nsg_default_access)}\', \'destination_port_range\': \'${lookup(rule,"destination_port_ranges",[]) == [] ? lookup(rule,"destination_port_range",var.nsg_default_destination_port_range) : ""}\', \'destination_port_ranges\': \'${lookup(rule,"destination_port_ranges",[])}\', \'direction\': \'${lookup(rule,"direction",var.nsg_default_direction)}\', \'protocol\': \'${lookup(rule,"protocol",var.nsg_default_protocol)}\', \'source_port_range\': \'${lookup(rule,"source_port_range",var.nsg_default_source_port_range)}\', \'description\': \'\', \'destination_address_prefix\': \'*\', \'destination_address_prefixes\': [], \'destination_application_security_group_ids\': [], \'source_application_security_group_ids\': [], \'source_port_ranges\': []}]}'], 'tags': ['${var.tags}']} at file: /aaa.tf
2020-09-11 11:23:01,213 [MainThread ] [ERROR] Failed to run check: Ensure that SSH access is restricted from the internet for configuration: {'name': ['${var.nsg_name}'], 'resource_group_name': ['${azurerm_resource_group.rg.name}'], 'location': ['${var.location}'], 'security_rule': ['${[for rule in var.security_rules : {\'name\': \'${rule.name}\', \'priority\': \'${rule.priority}\', \'source_address_prefix\': \'${lookup(rule,"source_address_prefixes",[]) == [] ? lookup(rule,"source_address_prefix",var.nsg_default_source_address_prefix) : ""}\', \'source_address_prefixes\': \'${lookup(rule,"source_address_prefixes",[])}\', \'access\': \'${lookup(rule,"access",var.nsg_default_access)}\', \'destination_port_range\': \'${lookup(rule,"destination_port_ranges",[]) == [] ? lookup(rule,"destination_port_range",var.nsg_default_destination_port_range) : ""}\', \'destination_port_ranges\': \'${lookup(rule,"destination_port_ranges",[])}\', \'direction\': \'${lookup(rule,"direction",var.nsg_default_direction)}\', \'protocol\': \'${lookup(rule,"protocol",var.nsg_default_protocol)}\', \'source_port_range\': \'${lookup(rule,"source_port_range",var.nsg_default_source_port_range)}\', \'description\': \'\', \'destination_address_prefix\': \'*\', \'destination_address_prefixes\': [], \'destination_application_security_group_ids\': [], \'source_application_security_group_ids\': [], \'source_port_ranges\': []}]}'], 'tags': ['${var.tags}']} at file: /aaa.tf
Traceback (most recent call last):
File "/usr/local/bin/checkov", line 5, in <module>
run()
File "/usr/local/lib/python3.8/site-packages/checkov/main.py", line 65, in run
scan_reports = runner_registry.run(root_folder=root_folder, external_checks_dir=external_checks_dir,
File "/usr/local/lib/python3.8/site-packages/checkov/common/runners/runner_registry.py", line 27, in run
scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py", line 52, in run
self.check_tf_definition(report, root_folder, runner_filter)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py", line 94, in check_tf_definition
self.run_block(definition[block_type], definitions_context, full_file_path, report, scanned_file,
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py", line 117, in run_block
results = registry.scan(scanned_file, entity, skipped_checks, runner_filter)
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py", line 104, in scan
result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info)
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py", line 110, in run_check
result = check.run(scanned_file=scanned_file, entity_configuration=entity_configuration,
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py", line 58, in run
raise e
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py", line 39, in run
check_result['result'] = self.scan_entity_conf(entity_configuration, entity_type)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/base_resource_check.py", line 16, in scan_entity_conf
return self.scan_resource_conf(conf, entity_type)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/base_resource_check.py", line 28, in wrapper
return wrapped(self, conf)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py", line 37, in scan_resource_conf
if 'access' in rule_conf and rule_conf['access'][0] == "Allow":
TypeError: string indices must be integers_
**Scanned NSG:**
resource "azurerm_network_security_group" "mynsg" {
name = var.nsg_name
resource_group_name = azurerm_resource_group.rg.name
location = var.location
security_rule = [for rule in var.security_rules : {
name = rule.name
priority = rule.priority
source_address_prefix = lookup(rule, "source_address_prefixes", []) == [] ? lookup(rule, "source_address_prefix", var.nsg_default_source_address_prefix) : ""
source_address_prefixes = lookup(rule, "source_address_prefixes", [])
access = lookup(rule, "access", var.nsg_default_access)
destination_port_range = lookup(rule, "destination_port_ranges", []) == [] ? lookup(rule, "destination_port_range", var.nsg_default_destination_port_range) : ""
destination_port_ranges = lookup(rule, "destination_port_ranges", [])
direction = lookup(rule, "direction", var.nsg_default_direction)
protocol = lookup(rule, "protocol", var.nsg_default_protocol)
source_port_range = lookup(rule, "source_port_range", var.nsg_default_source_port_range)
description = ""
destination_address_prefix = "*"
destination_address_prefixes = []
destination_application_security_group_ids = []
source_application_security_group_ids = []
source_port_ranges = []
}
]
}
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py`
Content:
```
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck
3 from checkov.common.util.type_forcers import force_list
4 import re
5
6 INTERNET_ADDRESSES = ["*", "0.0.0.0", "<nw>/0", "/0", "internet", "any"]
7 PORT_RANGE = re.compile('\d+-\d+')
8
9
10 class NSGRulePortAccessRestricted(BaseResourceCheck):
11 def __init__(self, name, check_id, port):
12 supported_resources = ['azurerm_network_security_rule', 'azurerm_network_security_group']
13 categories = [CheckCategories.NETWORKING]
14 super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)
15 self.port = port
16
17 def is_port_in_range(self, conf):
18 ports = force_list(conf['destination_port_range'][0])
19 for range in ports:
20 if re.match(PORT_RANGE, range):
21 start, end = int(range.split('-')[0]), int(range.split('-')[1])
22 if start <= self.port <= end:
23 return True
24 if range in [str(self.port), '*']:
25 return True
26 return False
27
28 def scan_resource_conf(self, conf):
29 if "dynamic" in conf:
30 return CheckResult.UNKNOWN
31
32 rule_confs = [conf]
33 if 'security_rule' in conf:
34 rule_confs = conf['security_rule']
35
36 for rule_conf in rule_confs:
37 if 'access' in rule_conf and rule_conf['access'][0] == "Allow":
38 if 'direction' in rule_conf and rule_conf['direction'][0] == "Inbound":
39 if 'protocol' in rule_conf and rule_conf['protocol'][0].upper() == 'TCP':
40 if 'destination_port_range' in rule_conf and self.is_port_in_range(rule_conf):
41 if 'source_address_prefix' in rule_conf and rule_conf['source_address_prefix'][0] in INTERNET_ADDRESSES:
42 return CheckResult.FAILED
43 return CheckResult.PASSED
44
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py b/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py
--- a/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py
+++ b/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py
@@ -34,6 +34,8 @@
rule_confs = conf['security_rule']
for rule_conf in rule_confs:
+ if not isinstance(rule_conf, dict):
+ return CheckResult.UNKNOWN
if 'access' in rule_conf and rule_conf['access'][0] == "Allow":
if 'direction' in rule_conf and rule_conf['direction'][0] == "Inbound":
if 'protocol' in rule_conf and rule_conf['protocol'][0].upper() == 'TCP':
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py b/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py\n--- a/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py\n+++ b/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py\n@@ -34,6 +34,8 @@\n rule_confs = conf['security_rule']\n \n for rule_conf in rule_confs:\n+ if not isinstance(rule_conf, dict):\n+ return CheckResult.UNKNOWN\n if 'access' in rule_conf and rule_conf['access'][0] == \"Allow\":\n if 'direction' in rule_conf and rule_conf['direction'][0] == \"Inbound\":\n if 'protocol' in rule_conf and rule_conf['protocol'][0].upper() == 'TCP':\n", "issue": "Azure NSG Security rule loop error\nI got the following error when I execute to scan Azure NSG terraform file.\r\n\r\n_ERROR:checkov.terraform.checks.resource.azure.NSGRuleSSHAccessRestricted:Failed to run check: Ensure that SSH access is restricted from the internet for configuration: {'name': ['${var.nsg_name}'], 'resource_group_name': ['${azurerm_resource_group.rg.name}'], 'location': ['${var.location}'], 'security_rule': ['${[for rule in var.security_rules : {\\'name\\': \\'${rule.name}\\', \\'priority\\': \\'${rule.priority}\\', \\'source_address_prefix\\': \\'${lookup(rule,\"source_address_prefixes\",[]) == [] ? lookup(rule,\"source_address_prefix\",var.nsg_default_source_address_prefix) : \"\"}\\', \\'source_address_prefixes\\': \\'${lookup(rule,\"source_address_prefixes\",[])}\\', \\'access\\': \\'${lookup(rule,\"access\",var.nsg_default_access)}\\', \\'destination_port_range\\': \\'${lookup(rule,\"destination_port_ranges\",[]) == [] ? lookup(rule,\"destination_port_range\",var.nsg_default_destination_port_range) : \"\"}\\', \\'destination_port_ranges\\': \\'${lookup(rule,\"destination_port_ranges\",[])}\\', \\'direction\\': \\'${lookup(rule,\"direction\",var.nsg_default_direction)}\\', \\'protocol\\': \\'${lookup(rule,\"protocol\",var.nsg_default_protocol)}\\', \\'source_port_range\\': \\'${lookup(rule,\"source_port_range\",var.nsg_default_source_port_range)}\\', \\'description\\': \\'\\', \\'destination_address_prefix\\': \\'*\\', \\'destination_address_prefixes\\': [], \\'destination_application_security_group_ids\\': [], \\'source_application_security_group_ids\\': [], \\'source_port_ranges\\': []}]}'], 'tags': ['${var.tags}']} at file: /aaa.tf\r\n2020-09-11 11:23:01,213 [MainThread ] [ERROR] Failed to run check: Ensure that SSH access is restricted from the internet for configuration: {'name': ['${var.nsg_name}'], 'resource_group_name': ['${azurerm_resource_group.rg.name}'], 'location': ['${var.location}'], 'security_rule': ['${[for rule in var.security_rules : {\\'name\\': \\'${rule.name}\\', \\'priority\\': \\'${rule.priority}\\', \\'source_address_prefix\\': \\'${lookup(rule,\"source_address_prefixes\",[]) == [] ? lookup(rule,\"source_address_prefix\",var.nsg_default_source_address_prefix) : \"\"}\\', \\'source_address_prefixes\\': \\'${lookup(rule,\"source_address_prefixes\",[])}\\', \\'access\\': \\'${lookup(rule,\"access\",var.nsg_default_access)}\\', \\'destination_port_range\\': \\'${lookup(rule,\"destination_port_ranges\",[]) == [] ? lookup(rule,\"destination_port_range\",var.nsg_default_destination_port_range) : \"\"}\\', \\'destination_port_ranges\\': \\'${lookup(rule,\"destination_port_ranges\",[])}\\', \\'direction\\': \\'${lookup(rule,\"direction\",var.nsg_default_direction)}\\', \\'protocol\\': \\'${lookup(rule,\"protocol\",var.nsg_default_protocol)}\\', \\'source_port_range\\': \\'${lookup(rule,\"source_port_range\",var.nsg_default_source_port_range)}\\', \\'description\\': \\'\\', \\'destination_address_prefix\\': \\'*\\', \\'destination_address_prefixes\\': [], \\'destination_application_security_group_ids\\': [], \\'source_application_security_group_ids\\': [], \\'source_port_ranges\\': []}]}'], 'tags': ['${var.tags}']} at file: /aaa.tf\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/checkov\", line 5, in <module>\r\n run()\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/main.py\", line 65, in run\r\n scan_reports = runner_registry.run(root_folder=root_folder, external_checks_dir=external_checks_dir,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/runners/runner_registry.py\", line 27, in run\r\n scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\", line 52, in run\r\n self.check_tf_definition(report, root_folder, runner_filter)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\", line 94, in check_tf_definition\r\n self.run_block(definition[block_type], definitions_context, full_file_path, report, scanned_file,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\", line 117, in run_block\r\n results = registry.scan(scanned_file, entity, skipped_checks, runner_filter)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py\", line 104, in scan\r\n result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py\", line 110, in run_check\r\n result = check.run(scanned_file=scanned_file, entity_configuration=entity_configuration,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py\", line 58, in run\r\n raise e\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py\", line 39, in run\r\n check_result['result'] = self.scan_entity_conf(entity_configuration, entity_type)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/base_resource_check.py\", line 16, in scan_entity_conf\r\n return self.scan_resource_conf(conf, entity_type)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/base_resource_check.py\", line 28, in wrapper\r\n return wrapped(self, conf)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py\", line 37, in scan_resource_conf\r\n if 'access' in rule_conf and rule_conf['access'][0] == \"Allow\":\r\nTypeError: string indices must be integers_\r\n\r\n**Scanned NSG:**\r\n\r\nresource \"azurerm_network_security_group\" \"mynsg\" {\r\n name = var.nsg_name\r\n resource_group_name = azurerm_resource_group.rg.name\r\n location = var.location\r\n\r\n security_rule = [for rule in var.security_rules : {\r\n name = rule.name\r\n priority = rule.priority\r\n source_address_prefix = lookup(rule, \"source_address_prefixes\", []) == [] ? lookup(rule, \"source_address_prefix\", var.nsg_default_source_address_prefix) : \"\"\r\n source_address_prefixes = lookup(rule, \"source_address_prefixes\", [])\r\n access = lookup(rule, \"access\", var.nsg_default_access)\r\n destination_port_range = lookup(rule, \"destination_port_ranges\", []) == [] ? lookup(rule, \"destination_port_range\", var.nsg_default_destination_port_range) : \"\"\r\n destination_port_ranges = lookup(rule, \"destination_port_ranges\", [])\r\n direction = lookup(rule, \"direction\", var.nsg_default_direction)\r\n protocol = lookup(rule, \"protocol\", var.nsg_default_protocol)\r\n source_port_range = lookup(rule, \"source_port_range\", var.nsg_default_source_port_range)\r\n description = \"\"\r\n destination_address_prefix = \"*\"\r\n destination_address_prefixes = []\r\n destination_application_security_group_ids = []\r\n source_application_security_group_ids = []\r\n source_port_ranges = []\r\n }\r\n ]\r\n}\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\nfrom checkov.common.util.type_forcers import force_list\nimport re\n\nINTERNET_ADDRESSES = [\"*\", \"0.0.0.0\", \"<nw>/0\", \"/0\", \"internet\", \"any\"]\nPORT_RANGE = re.compile('\\d+-\\d+')\n\n\nclass NSGRulePortAccessRestricted(BaseResourceCheck):\n def __init__(self, name, check_id, port):\n supported_resources = ['azurerm_network_security_rule', 'azurerm_network_security_group']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)\n self.port = port\n\n def is_port_in_range(self, conf):\n ports = force_list(conf['destination_port_range'][0])\n for range in ports:\n if re.match(PORT_RANGE, range):\n start, end = int(range.split('-')[0]), int(range.split('-')[1])\n if start <= self.port <= end:\n return True\n if range in [str(self.port), '*']:\n return True\n return False\n\n def scan_resource_conf(self, conf):\n if \"dynamic\" in conf:\n return CheckResult.UNKNOWN\n \n rule_confs = [conf]\n if 'security_rule' in conf:\n rule_confs = conf['security_rule']\n\n for rule_conf in rule_confs:\n if 'access' in rule_conf and rule_conf['access'][0] == \"Allow\":\n if 'direction' in rule_conf and rule_conf['direction'][0] == \"Inbound\":\n if 'protocol' in rule_conf and rule_conf['protocol'][0].upper() == 'TCP':\n if 'destination_port_range' in rule_conf and self.is_port_in_range(rule_conf):\n if 'source_address_prefix' in rule_conf and rule_conf['source_address_prefix'][0] in INTERNET_ADDRESSES:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n", "path": "checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\nfrom checkov.common.util.type_forcers import force_list\nimport re\n\nINTERNET_ADDRESSES = [\"*\", \"0.0.0.0\", \"<nw>/0\", \"/0\", \"internet\", \"any\"]\nPORT_RANGE = re.compile('\\d+-\\d+')\n\n\nclass NSGRulePortAccessRestricted(BaseResourceCheck):\n def __init__(self, name, check_id, port):\n supported_resources = ['azurerm_network_security_rule', 'azurerm_network_security_group']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)\n self.port = port\n\n def is_port_in_range(self, conf):\n ports = force_list(conf['destination_port_range'][0])\n for range in ports:\n if re.match(PORT_RANGE, range):\n start, end = int(range.split('-')[0]), int(range.split('-')[1])\n if start <= self.port <= end:\n return True\n if range in [str(self.port), '*']:\n return True\n return False\n\n def scan_resource_conf(self, conf):\n if \"dynamic\" in conf:\n return CheckResult.UNKNOWN\n \n rule_confs = [conf]\n if 'security_rule' in conf:\n rule_confs = conf['security_rule']\n\n for rule_conf in rule_confs:\n if not isinstance(rule_conf, dict):\n return CheckResult.UNKNOWN\n if 'access' in rule_conf and rule_conf['access'][0] == \"Allow\":\n if 'direction' in rule_conf and rule_conf['direction'][0] == \"Inbound\":\n if 'protocol' in rule_conf and rule_conf['protocol'][0].upper() == 'TCP':\n if 'destination_port_range' in rule_conf and self.is_port_in_range(rule_conf):\n if 'source_address_prefix' in rule_conf and rule_conf['source_address_prefix'][0] in INTERNET_ADDRESSES:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n", "path": "checkov/terraform/checks/resource/azure/NSGRulePortAccessRestricted.py"}]} | 2,483 | 192 |
gh_patches_debug_66765 | rasdani/github-patches | git_diff | pymedusa__Medusa-7069 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
1. Ignore words/filter - does not except "." ? 2. Show page is not showing global defined filters/"words"
After upgrading from 0.30 to 0.34 -
1. Seems like my previous ignore words that had "." in them now the "." is interpreted as wild card :( See log below.
**To Reproduce**
1. define 2.0 as ignore
2. search names that has 2**x**0 (210 in my example below)
3. see log with wrong ignore...
**Expected behavior**
Filter word should be used as is, no semi- regex interpretation, or add comment to use "" if needed, and explain the special symbols.
**Medusa (please complete the following information):**
Medusa Configuration
Medusa Info: Branch: master
Commit: 74c3f124fb599ad923e23f46fc320778b8b4217c
Version: 0.3.4
Database: 44.14
Python Version: 2.7.13 (v2.7.13:a06454b1afa1, Dec 17 2016, 20:42:59) [MSC v.1500 32 bit (Intel)]
SSL Version: OpenSSL 1.0.2j 26 Sep 2016
OS: Windows-10-10.0.17763
Locale: en_US.cp1255
User: RAFI-PC$
Program Folder: D:\Program Files\Medusa\Medusa
Config File: D:\Program Files\Medusa\Data\config.ini
Database File: D:\Program Files\Medusa\Data\main.db
Cache Folder: D:\Program Files\Medusa\Data\cache
Log Folder: D:\Program Files\Medusa\Data\Logs
Arguments:
--nolaunch --port=8081 --datadir=D:\Program Files\Medusa\Data
**Debug logs (at least 50 lines):**
```
2019-08-19 16:15:25 INFO SEARCHQUEUE-DAILY-SEARCH :: Rarbg :: [74c3f12] Ignoring Fear.the.Walking.Dead.S05E10.210.Words.Per.Minute.1080p.AMZN.WEBRip.DDP5.1.x264-NTG[rartv] based on ignored words filter: 2.0
2019-08-19 16:15:25 INFO SEARCHQUEUE-DAILY-SEARCH :: Rarbg :: [74c3f12] Torrent meets the file size requirements.file Name:Fear.the.Walking.Dead.S05E10.210.Words.Per.Minute.1080p.AMZN.WEBRip.DDP5.1.x264-NTG[rartv] Size:3629018180
2019-08-19 16:15:25 INFO SEARCHQUEUE-DAILY-SEARCH :: Rarbg :: [74c3f12] Quality of Fear.the.Walking.Dead.S05E10.210.Words.Per.Minute.1080p.AMZN.WEBRip.DDP5.1.x264-NTG[rartv] is 1080p WEB-DL
2019-08-19 16:15:25 INFO SEARCHQUEUE-DAILY-SEARCH :: Rarbg :: [74c3f12] Quality of Fear.the.Walking.Dead.S05E10.1080p.WEB.h264-TBS[rartv] is 1080p WEB-DL
2019-08-19 16:15:21 INFO SEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of The.Handmaids.Tale.S03E13.Mayday.1080p.HULU.WEBRip.AAC2.0.H264-NTb[TGx] is 1080p WEB-DL
2019-08-19 16:15:21 INFO SEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of The.Handmaids.Tale.S03E13.Mayday.1080p.HULU.WEBRip.AAC2.0.H264-NTb is 1080p WEB-DL
2019-08-19 16:15:21 INFO SEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of The.Handmaids.Tale.S03E13.Mayday.1080p.HULU.WEB-DL.AAC2.0.H.264-NTb.chs.eng.mp4 is 1080p WEB-DL
2019-08-19 16:15:21 INFO SEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of The.Handmaids.Tale.S03E13.1080p.WEB.x264-worldmkv.mkv is 1080p WEB-DL
2019-08-19 16:15:21 INFO SEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of The.Handmaids.Tale.S03E13.Mayday.1080p.HULU.WEBRip.AAC2.0.H264-NTb[rarbg] is 1080p WEB-DL
2019-08-19 16:15:21 INFO SEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of The.Handmaids.Tale.S03E13.Mayday.1080p.HULU.WEBRip.AAC2.0.H264-NTb[TGx] is 1080p WEB-DL
2019-08-19 16:15:21 INFO SEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Ignoring Fear.the.Walking.Dead.S05E10.210.Words.Per.Minute.1080p.AMZN.WEBRip.DDP5.1.x264-NTG[rarbg] based on ignored words filter: 2.0
2019-08-19 16:15:21 INFO SEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Torrent meets the file size requirements.file Name:Fear.the.Walking.Dead.S05E10.210.Words.Per.Minute.1080p.AMZN.WEBRip.DDP5.1.x264-NTG[rarbg] Size:3629121536
2019-08-19 16:15:21 INFO SEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of Fear.the.Walking.Dead.S05E10.210.Words.Per.Minute.1080p.AMZN.WEBRip.DDP5.1.x264-NTG[rarbg] is 1080p WEB-DL
```
</details>
**Additional context**
2. Show's page is now not showing any global filters/words :(
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `medusa/show/naming.py`
Content:
```
1 # coding=utf-8
2
3 """Series naming helpers for selecting results."""
4
5 from __future__ import unicode_literals
6
7 import fnmatch
8 import logging
9 import os
10 import re
11
12 from medusa import app
13 from medusa.logger.adapters.style import BraceAdapter
14 from medusa.name_parser.parser import InvalidNameException, InvalidShowException, NameParser
15
16 from six import string_types
17
18 log = BraceAdapter(logging.getLogger(__name__))
19 log.logger.addHandler(logging.NullHandler())
20
21
22 result_filters = [
23 '(sub|nfo)fix',
24 '(?<!shomin.)sample',
25 '(dvd)?extras',
26 ]
27
28 if hasattr('General', 'ignore_und_subs') and app.IGNORE_UND_SUBS:
29 result_filters.append('sub(bed|ed|pack|s)')
30
31 if hasattr('General', 'ignored_subs_list') and app.IGNORED_SUBS_LIST:
32 result_filters.append('(' + app.IGNORED_SUBS_LIST.replace(',', '|') + ')sub(bed|ed|s)?')
33
34
35 def contains_words(item, words, strict=True):
36 """
37 Yield words that are contained in an item.
38
39 :param item: item to search for words
40 :param words: iterable of words to search for in item
41 :param strict: exclude substring matches
42 If strict find exact existence of a word in the item but exclude matches
43 where the word is part of a substring. For example `word` would not
44 match 'words' or 'word1'. Regex expressions as words can only
45 be used in strict mode!
46 """
47 log.debug('Searching {item} for {words}. (strict={strict})',
48 {'item': item, 'words': words, 'strict': strict})
49
50 def _strict(_word):
51 # Use a regex to make sure the match is not part of a substring
52 pattern = r'(^|[\W_]){word}($|[\W_])'.format(word=_word)
53 return re.search(pattern, item, re.I)
54
55 def _lenient(_word):
56 # Use string.__contains__ for a quick lenient test
57 return _word in item
58
59 # select strict or lenient method for the test
60 item_contains = _strict if strict else _lenient
61
62 for word in words:
63 if item_contains(word):
64 yield word
65
66
67 def contains_at_least_one_word(name, words):
68 """
69 Filter out results based on filter_words.
70
71 :param name: name to check
72 :param words: string of words separated by a ',' or list of words
73 :return: False if the name doesn't contain any word of words list, or the found word from the list.
74 """
75 if not (name and words):
76 return False
77
78 if isinstance(words, string_types):
79 words = words.split(',')
80 items = [(re.compile(r'(^|[\W_])%s($|[\W_])' % word.strip(), re.I), word.strip()) for word in words]
81 for regexp, word in items:
82 if regexp.search(name):
83 # subs_words = '.dub.' or '.dksub.' or else
84 subs_word = regexp.search(name).group(0)
85 # If word is a regex like 'dub(bed)?' or 'sub(bed|ed|pack|s)'
86 # then return just the matched word: 'dub' and not full regex
87 if word in result_filters:
88 return subs_word.replace('.', '')
89 else:
90 return word
91
92 return False
93
94
95 def filter_bad_releases(name, parse=True):
96 """
97 Filter out non-english and invalid releases by comparing them to the result_filters contents.
98
99 :param parse: parse the name
100 :param name: the release name to check
101 :return: True if the release name is OK, False if it's bad.
102 """
103 try:
104 if parse:
105 NameParser().parse(name)
106 except InvalidNameException as error:
107 log.debug('{0}', error)
108 return False
109 except InvalidShowException:
110 pass
111
112 # if any of the bad strings are in the name then say no
113 word = contains_at_least_one_word(name, result_filters)
114 if word:
115 log.debug('Unwanted scene release: {0}. Contains unwanted word: {1}.'
116 ' Ignoring it', name, word)
117 return False
118 return True
119
120
121 def determine_release_name(dir_name=None, nzb_name=None):
122 """Determine a release name from an nzb and/or folder name."""
123 if nzb_name is not None:
124 log.info('Using nzb_name for release name.')
125 return nzb_name.rpartition('.')[0]
126
127 if dir_name is None:
128 return None
129
130 # try to get the release name from nzb/nfo
131 file_types = ['*.nzb', '*.nfo']
132
133 for search in file_types:
134
135 reg_expr = re.compile(fnmatch.translate(search), re.IGNORECASE)
136 files = [file_name for file_name in os.listdir(dir_name) if
137 os.path.isfile(os.path.join(dir_name, file_name))]
138
139 results = [f for f in files if reg_expr.search(f)]
140
141 if len(results) == 1:
142 found_file = os.path.basename(results[0])
143 found_file = found_file.rpartition('.')[0]
144 if filter_bad_releases(found_file):
145 log.info('Release name ({0}) found from file ({1})',
146 found_file, results[0])
147 return found_file.rpartition('.')[0]
148
149 # If that fails, we try the folder
150 folder = os.path.basename(dir_name)
151 if filter_bad_releases(folder):
152 # NOTE: Multiple failed downloads will change the folder name.
153 # (e.g., appending #s)
154 # Should we handle that?
155 log.debug('Folder name ({0}) appears to be a valid release name.'
156 ' Using it.', folder)
157 return folder
158
159 return None
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/medusa/show/naming.py b/medusa/show/naming.py
--- a/medusa/show/naming.py
+++ b/medusa/show/naming.py
@@ -77,7 +77,7 @@
if isinstance(words, string_types):
words = words.split(',')
- items = [(re.compile(r'(^|[\W_])%s($|[\W_])' % word.strip(), re.I), word.strip()) for word in words]
+ items = [(re.compile(r'(^|[\W_])%s($|[\W_])' % re.escape(word.strip()), re.I), word.strip()) for word in words]
for regexp, word in items:
if regexp.search(name):
# subs_words = '.dub.' or '.dksub.' or else
| {"golden_diff": "diff --git a/medusa/show/naming.py b/medusa/show/naming.py\n--- a/medusa/show/naming.py\n+++ b/medusa/show/naming.py\n@@ -77,7 +77,7 @@\n \n if isinstance(words, string_types):\n words = words.split(',')\n- items = [(re.compile(r'(^|[\\W_])%s($|[\\W_])' % word.strip(), re.I), word.strip()) for word in words]\n+ items = [(re.compile(r'(^|[\\W_])%s($|[\\W_])' % re.escape(word.strip()), re.I), word.strip()) for word in words]\n for regexp, word in items:\n if regexp.search(name):\n # subs_words = '.dub.' or '.dksub.' or else\n", "issue": "1. Ignore words/filter - does not except \".\" ? 2. Show page is not showing global defined filters/\"words\" \nAfter upgrading from 0.30 to 0.34 - \r\n1. Seems like my previous ignore words that had \".\" in them now the \".\" is interpreted as wild card :( See log below.\r\n\r\n**To Reproduce**\r\n1. define 2.0 as ignore\r\n2. search names that has 2**x**0 (210 in my example below)\r\n3. see log with wrong ignore... \r\n\r\n**Expected behavior**\r\nFilter word should be used as is, no semi- regex interpretation, or add comment to use \"\" if needed, and explain the special symbols.\r\n\r\n**Medusa (please complete the following information):**\r\n\r\n\r\nMedusa Configuration\r\nMedusa Info: \tBranch: master\r\nCommit: 74c3f124fb599ad923e23f46fc320778b8b4217c\r\nVersion: 0.3.4\r\nDatabase: 44.14\r\nPython Version:\t2.7.13 (v2.7.13:a06454b1afa1, Dec 17 2016, 20:42:59) [MSC v.1500 32 bit (Intel)]\r\nSSL Version:\tOpenSSL 1.0.2j 26 Sep 2016\r\nOS:\tWindows-10-10.0.17763\r\nLocale:\ten_US.cp1255\r\n \t \r\n \t \r\nUser:\tRAFI-PC$\r\nProgram Folder:\tD:\\Program Files\\Medusa\\Medusa\r\nConfig File:\tD:\\Program Files\\Medusa\\Data\\config.ini\r\nDatabase File:\tD:\\Program Files\\Medusa\\Data\\main.db\r\nCache Folder:\tD:\\Program Files\\Medusa\\Data\\cache\r\nLog Folder:\tD:\\Program Files\\Medusa\\Data\\Logs\r\nArguments:\t\r\n--nolaunch --port=8081 --datadir=D:\\Program Files\\Medusa\\Data\r\n\r\n**Debug logs (at least 50 lines):**\r\n```\r\n\r\n2019-08-19 16:15:25 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Rarbg :: [74c3f12] Ignoring Fear.the.Walking.Dead.S05E10.210.Words.Per.Minute.1080p.AMZN.WEBRip.DDP5.1.x264-NTG[rartv] based on ignored words filter: 2.0\r\n2019-08-19 16:15:25 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Rarbg :: [74c3f12] Torrent meets the file size requirements.file Name:Fear.the.Walking.Dead.S05E10.210.Words.Per.Minute.1080p.AMZN.WEBRip.DDP5.1.x264-NTG[rartv] Size:3629018180\r\n2019-08-19 16:15:25 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Rarbg :: [74c3f12] Quality of Fear.the.Walking.Dead.S05E10.210.Words.Per.Minute.1080p.AMZN.WEBRip.DDP5.1.x264-NTG[rartv] is 1080p WEB-DL\r\n2019-08-19 16:15:25 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Rarbg :: [74c3f12] Quality of Fear.the.Walking.Dead.S05E10.1080p.WEB.h264-TBS[rartv] is 1080p WEB-DL\r\n2019-08-19 16:15:21 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of The.Handmaids.Tale.S03E13.Mayday.1080p.HULU.WEBRip.AAC2.0.H264-NTb[TGx] is 1080p WEB-DL\r\n2019-08-19 16:15:21 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of The.Handmaids.Tale.S03E13.Mayday.1080p.HULU.WEBRip.AAC2.0.H264-NTb is 1080p WEB-DL\r\n2019-08-19 16:15:21 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of The.Handmaids.Tale.S03E13.Mayday.1080p.HULU.WEB-DL.AAC2.0.H.264-NTb.chs.eng.mp4 is 1080p WEB-DL\r\n2019-08-19 16:15:21 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of The.Handmaids.Tale.S03E13.1080p.WEB.x264-worldmkv.mkv is 1080p WEB-DL\r\n2019-08-19 16:15:21 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of The.Handmaids.Tale.S03E13.Mayday.1080p.HULU.WEBRip.AAC2.0.H264-NTb[rarbg] is 1080p WEB-DL\r\n2019-08-19 16:15:21 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of The.Handmaids.Tale.S03E13.Mayday.1080p.HULU.WEBRip.AAC2.0.H264-NTb[TGx] is 1080p WEB-DL\r\n2019-08-19 16:15:21 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Ignoring Fear.the.Walking.Dead.S05E10.210.Words.Per.Minute.1080p.AMZN.WEBRip.DDP5.1.x264-NTG[rarbg] based on ignored words filter: 2.0\r\n2019-08-19 16:15:21 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Torrent meets the file size requirements.file Name:Fear.the.Walking.Dead.S05E10.210.Words.Per.Minute.1080p.AMZN.WEBRip.DDP5.1.x264-NTG[rarbg] Size:3629121536\r\n2019-08-19 16:15:21 INFO\tSEARCHQUEUE-DAILY-SEARCH :: Torrentz2 :: [74c3f12] Quality of Fear.the.Walking.Dead.S05E10.210.Words.Per.Minute.1080p.AMZN.WEBRip.DDP5.1.x264-NTG[rarbg] is 1080p WEB-DL\r\n\r\n\r\n\r\n```\r\n</details>\r\n\r\n**Additional context**\r\n\r\n2. Show's page is now not showing any global filters/words :(\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Series naming helpers for selecting results.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport fnmatch\nimport logging\nimport os\nimport re\n\nfrom medusa import app\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.name_parser.parser import InvalidNameException, InvalidShowException, NameParser\n\nfrom six import string_types\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nresult_filters = [\n '(sub|nfo)fix',\n '(?<!shomin.)sample',\n '(dvd)?extras',\n]\n\nif hasattr('General', 'ignore_und_subs') and app.IGNORE_UND_SUBS:\n result_filters.append('sub(bed|ed|pack|s)')\n\nif hasattr('General', 'ignored_subs_list') and app.IGNORED_SUBS_LIST:\n result_filters.append('(' + app.IGNORED_SUBS_LIST.replace(',', '|') + ')sub(bed|ed|s)?')\n\n\ndef contains_words(item, words, strict=True):\n \"\"\"\n Yield words that are contained in an item.\n\n :param item: item to search for words\n :param words: iterable of words to search for in item\n :param strict: exclude substring matches\n If strict find exact existence of a word in the item but exclude matches\n where the word is part of a substring. For example `word` would not\n match 'words' or 'word1'. Regex expressions as words can only\n be used in strict mode!\n \"\"\"\n log.debug('Searching {item} for {words}. (strict={strict})',\n {'item': item, 'words': words, 'strict': strict})\n\n def _strict(_word):\n # Use a regex to make sure the match is not part of a substring\n pattern = r'(^|[\\W_]){word}($|[\\W_])'.format(word=_word)\n return re.search(pattern, item, re.I)\n\n def _lenient(_word):\n # Use string.__contains__ for a quick lenient test\n return _word in item\n\n # select strict or lenient method for the test\n item_contains = _strict if strict else _lenient\n\n for word in words:\n if item_contains(word):\n yield word\n\n\ndef contains_at_least_one_word(name, words):\n \"\"\"\n Filter out results based on filter_words.\n\n :param name: name to check\n :param words: string of words separated by a ',' or list of words\n :return: False if the name doesn't contain any word of words list, or the found word from the list.\n \"\"\"\n if not (name and words):\n return False\n\n if isinstance(words, string_types):\n words = words.split(',')\n items = [(re.compile(r'(^|[\\W_])%s($|[\\W_])' % word.strip(), re.I), word.strip()) for word in words]\n for regexp, word in items:\n if regexp.search(name):\n # subs_words = '.dub.' or '.dksub.' or else\n subs_word = regexp.search(name).group(0)\n # If word is a regex like 'dub(bed)?' or 'sub(bed|ed|pack|s)'\n # then return just the matched word: 'dub' and not full regex\n if word in result_filters:\n return subs_word.replace('.', '')\n else:\n return word\n\n return False\n\n\ndef filter_bad_releases(name, parse=True):\n \"\"\"\n Filter out non-english and invalid releases by comparing them to the result_filters contents.\n\n :param parse: parse the name\n :param name: the release name to check\n :return: True if the release name is OK, False if it's bad.\n \"\"\"\n try:\n if parse:\n NameParser().parse(name)\n except InvalidNameException as error:\n log.debug('{0}', error)\n return False\n except InvalidShowException:\n pass\n\n # if any of the bad strings are in the name then say no\n word = contains_at_least_one_word(name, result_filters)\n if word:\n log.debug('Unwanted scene release: {0}. Contains unwanted word: {1}.'\n ' Ignoring it', name, word)\n return False\n return True\n\n\ndef determine_release_name(dir_name=None, nzb_name=None):\n \"\"\"Determine a release name from an nzb and/or folder name.\"\"\"\n if nzb_name is not None:\n log.info('Using nzb_name for release name.')\n return nzb_name.rpartition('.')[0]\n\n if dir_name is None:\n return None\n\n # try to get the release name from nzb/nfo\n file_types = ['*.nzb', '*.nfo']\n\n for search in file_types:\n\n reg_expr = re.compile(fnmatch.translate(search), re.IGNORECASE)\n files = [file_name for file_name in os.listdir(dir_name) if\n os.path.isfile(os.path.join(dir_name, file_name))]\n\n results = [f for f in files if reg_expr.search(f)]\n\n if len(results) == 1:\n found_file = os.path.basename(results[0])\n found_file = found_file.rpartition('.')[0]\n if filter_bad_releases(found_file):\n log.info('Release name ({0}) found from file ({1})',\n found_file, results[0])\n return found_file.rpartition('.')[0]\n\n # If that fails, we try the folder\n folder = os.path.basename(dir_name)\n if filter_bad_releases(folder):\n # NOTE: Multiple failed downloads will change the folder name.\n # (e.g., appending #s)\n # Should we handle that?\n log.debug('Folder name ({0}) appears to be a valid release name.'\n ' Using it.', folder)\n return folder\n\n return None\n", "path": "medusa/show/naming.py"}], "after_files": [{"content": "# coding=utf-8\n\n\"\"\"Series naming helpers for selecting results.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport fnmatch\nimport logging\nimport os\nimport re\n\nfrom medusa import app\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.name_parser.parser import InvalidNameException, InvalidShowException, NameParser\n\nfrom six import string_types\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nresult_filters = [\n '(sub|nfo)fix',\n '(?<!shomin.)sample',\n '(dvd)?extras',\n]\n\nif hasattr('General', 'ignore_und_subs') and app.IGNORE_UND_SUBS:\n result_filters.append('sub(bed|ed|pack|s)')\n\nif hasattr('General', 'ignored_subs_list') and app.IGNORED_SUBS_LIST:\n result_filters.append('(' + app.IGNORED_SUBS_LIST.replace(',', '|') + ')sub(bed|ed|s)?')\n\n\ndef contains_words(item, words, strict=True):\n \"\"\"\n Yield words that are contained in an item.\n\n :param item: item to search for words\n :param words: iterable of words to search for in item\n :param strict: exclude substring matches\n If strict find exact existence of a word in the item but exclude matches\n where the word is part of a substring. For example `word` would not\n match 'words' or 'word1'. Regex expressions as words can only\n be used in strict mode!\n \"\"\"\n log.debug('Searching {item} for {words}. (strict={strict})',\n {'item': item, 'words': words, 'strict': strict})\n\n def _strict(_word):\n # Use a regex to make sure the match is not part of a substring\n pattern = r'(^|[\\W_]){word}($|[\\W_])'.format(word=_word)\n return re.search(pattern, item, re.I)\n\n def _lenient(_word):\n # Use string.__contains__ for a quick lenient test\n return _word in item\n\n # select strict or lenient method for the test\n item_contains = _strict if strict else _lenient\n\n for word in words:\n if item_contains(word):\n yield word\n\n\ndef contains_at_least_one_word(name, words):\n \"\"\"\n Filter out results based on filter_words.\n\n :param name: name to check\n :param words: string of words separated by a ',' or list of words\n :return: False if the name doesn't contain any word of words list, or the found word from the list.\n \"\"\"\n if not (name and words):\n return False\n\n if isinstance(words, string_types):\n words = words.split(',')\n items = [(re.compile(r'(^|[\\W_])%s($|[\\W_])' % re.escape(word.strip()), re.I), word.strip()) for word in words]\n for regexp, word in items:\n if regexp.search(name):\n # subs_words = '.dub.' or '.dksub.' or else\n subs_word = regexp.search(name).group(0)\n # If word is a regex like 'dub(bed)?' or 'sub(bed|ed|pack|s)'\n # then return just the matched word: 'dub' and not full regex\n if word in result_filters:\n return subs_word.replace('.', '')\n else:\n return word\n\n return False\n\n\ndef filter_bad_releases(name, parse=True):\n \"\"\"\n Filter out non-english and invalid releases by comparing them to the result_filters contents.\n\n :param parse: parse the name\n :param name: the release name to check\n :return: True if the release name is OK, False if it's bad.\n \"\"\"\n try:\n if parse:\n NameParser().parse(name)\n except InvalidNameException as error:\n log.debug('{0}', error)\n return False\n except InvalidShowException:\n pass\n\n # if any of the bad strings are in the name then say no\n word = contains_at_least_one_word(name, result_filters)\n if word:\n log.debug('Unwanted scene release: {0}. Contains unwanted word: {1}.'\n ' Ignoring it', name, word)\n return False\n return True\n\n\ndef determine_release_name(dir_name=None, nzb_name=None):\n \"\"\"Determine a release name from an nzb and/or folder name.\"\"\"\n if nzb_name is not None:\n log.info('Using nzb_name for release name.')\n return nzb_name.rpartition('.')[0]\n\n if dir_name is None:\n return None\n\n # try to get the release name from nzb/nfo\n file_types = ['*.nzb', '*.nfo']\n\n for search in file_types:\n\n reg_expr = re.compile(fnmatch.translate(search), re.IGNORECASE)\n files = [file_name for file_name in os.listdir(dir_name) if\n os.path.isfile(os.path.join(dir_name, file_name))]\n\n results = [f for f in files if reg_expr.search(f)]\n\n if len(results) == 1:\n found_file = os.path.basename(results[0])\n found_file = found_file.rpartition('.')[0]\n if filter_bad_releases(found_file):\n log.info('Release name ({0}) found from file ({1})',\n found_file, results[0])\n return found_file.rpartition('.')[0]\n\n # If that fails, we try the folder\n folder = os.path.basename(dir_name)\n if filter_bad_releases(folder):\n # NOTE: Multiple failed downloads will change the folder name.\n # (e.g., appending #s)\n # Should we handle that?\n log.debug('Folder name ({0}) appears to be a valid release name.'\n ' Using it.', folder)\n return folder\n\n return None\n", "path": "medusa/show/naming.py"}]} | 3,695 | 178 |
gh_patches_debug_15651 | rasdani/github-patches | git_diff | explosion__spaCy-1389 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lemmatizer exceptions: `lemma_strings` get sorted anyway
Lemmatization exceptions have been working inconsistently, even when they are added directly in `corpora/en/wordnet/dict/verb.exc`
The minimal test case of `coping` at https://github.com/explosion/spaCy/issues/389 reveals that, at
https://github.com/explosion/spaCy/blob/master/spacy/lemmatizer.py#L94, the lemmatizer's list of potential forms (where the thing in lemmatizer.exceptions is item [0]) is cast to a `set` (and so loses ordering); then in https://github.com/explosion/spaCy/blob/master/spacy/morphology.pyx#L149, that `set` gets sorted. So lemmatizer exceptions only work if they also come first alphabetically!
I've implemented the fix for this, but I need this issue in order to submit the PR!
One question, though, for @honnibal: this can be fixed either as I did it locally -- return the whole list, with exceptions and then anything that comes back from the lemmatizer's `rules` -- or skip the rules altogether if we used an exception. I think it's more useful downstream if we keep all lemma candidates, even if we're not using them in the default pipeline. But it also seems only destructive to do `sorted(set())` on them!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spacy/lemmatizer.py`
Content:
```
1 # coding: utf8
2 from __future__ import unicode_literals
3
4 from .symbols import POS, NOUN, VERB, ADJ, PUNCT
5 from .symbols import VerbForm_inf, VerbForm_none, Number_sing, Degree_pos
6
7
8 class Lemmatizer(object):
9 @classmethod
10 def load(cls, path, index=None, exc=None, rules=None):
11 return cls(index or {}, exc or {}, rules or {})
12
13 def __init__(self, index, exceptions, rules):
14 self.index = index
15 self.exc = exceptions
16 self.rules = rules
17
18 def __call__(self, string, univ_pos, morphology=None):
19 if univ_pos == NOUN:
20 univ_pos = 'noun'
21 elif univ_pos == VERB:
22 univ_pos = 'verb'
23 elif univ_pos == ADJ:
24 univ_pos = 'adj'
25 elif univ_pos == PUNCT:
26 univ_pos = 'punct'
27 # See Issue #435 for example of where this logic is requied.
28 if self.is_base_form(univ_pos, morphology):
29 return set([string.lower()])
30 lemmas = lemmatize(string, self.index.get(univ_pos, {}),
31 self.exc.get(univ_pos, {}),
32 self.rules.get(univ_pos, []))
33 return lemmas
34
35 def is_base_form(self, univ_pos, morphology=None):
36 """
37 Check whether we're dealing with an uninflected paradigm, so we can
38 avoid lemmatization entirely.
39 """
40 morphology = {} if morphology is None else morphology
41 others = [key for key in morphology if key not in (POS, 'number', 'pos', 'verbform')]
42 true_morph_key = morphology.get('morph', 0)
43 if univ_pos == 'noun' and morphology.get('Number') == 'sing':
44 return True
45 elif univ_pos == 'verb' and morphology.get('VerbForm') == 'inf':
46 return True
47 elif univ_pos == 'adj' and morphology.get('Degree') == 'pos':
48 return True
49 elif VerbForm_inf in morphology:
50 return True
51 elif VerbForm_none in morphology:
52 return True
53 elif Number_sing in morphology:
54 return True
55 elif Degree_pos in morphology:
56 return True
57 else:
58 return False
59
60 def noun(self, string, morphology=None):
61 return self(string, 'noun', morphology)
62
63 def verb(self, string, morphology=None):
64 return self(string, 'verb', morphology)
65
66 def adj(self, string, morphology=None):
67 return self(string, 'adj', morphology)
68
69 def punct(self, string, morphology=None):
70 return self(string, 'punct', morphology)
71
72
73 def lemmatize(string, index, exceptions, rules):
74 string = string.lower()
75 forms = []
76 # TODO: Is this correct? See discussion in Issue #435.
77 #if string in index:
78 # forms.append(string)
79 forms.extend(exceptions.get(string, []))
80 oov_forms = []
81 for old, new in rules:
82 if string.endswith(old):
83 form = string[:len(string) - len(old)] + new
84 if not form:
85 pass
86 elif form in index or not form.isalpha():
87 forms.append(form)
88 else:
89 oov_forms.append(form)
90 if not forms:
91 forms.extend(oov_forms)
92 if not forms:
93 forms.append(string)
94 return set(forms)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/spacy/lemmatizer.py b/spacy/lemmatizer.py
--- a/spacy/lemmatizer.py
+++ b/spacy/lemmatizer.py
@@ -78,15 +78,16 @@
# forms.append(string)
forms.extend(exceptions.get(string, []))
oov_forms = []
- for old, new in rules:
- if string.endswith(old):
- form = string[:len(string) - len(old)] + new
- if not form:
- pass
- elif form in index or not form.isalpha():
- forms.append(form)
- else:
- oov_forms.append(form)
+ if not forms:
+ for old, new in rules:
+ if string.endswith(old):
+ form = string[:len(string) - len(old)] + new
+ if not form:
+ pass
+ elif form in index or not form.isalpha():
+ forms.append(form)
+ else:
+ oov_forms.append(form)
if not forms:
forms.extend(oov_forms)
if not forms:
| {"golden_diff": "diff --git a/spacy/lemmatizer.py b/spacy/lemmatizer.py\n--- a/spacy/lemmatizer.py\n+++ b/spacy/lemmatizer.py\n@@ -78,15 +78,16 @@\n # forms.append(string)\n forms.extend(exceptions.get(string, []))\n oov_forms = []\n- for old, new in rules:\n- if string.endswith(old):\n- form = string[:len(string) - len(old)] + new\n- if not form:\n- pass\n- elif form in index or not form.isalpha():\n- forms.append(form)\n- else:\n- oov_forms.append(form)\n+ if not forms:\n+ for old, new in rules:\n+ if string.endswith(old):\n+ form = string[:len(string) - len(old)] + new\n+ if not form:\n+ pass\n+ elif form in index or not form.isalpha():\n+ forms.append(form)\n+ else:\n+ oov_forms.append(form)\n if not forms:\n forms.extend(oov_forms)\n if not forms:\n", "issue": "Lemmatizer exceptions: `lemma_strings` get sorted anyway\nLemmatization exceptions have been working inconsistently, even when they are added directly in `corpora/en/wordnet/dict/verb.exc`\r\n\r\nThe minimal test case of `coping` at https://github.com/explosion/spaCy/issues/389 reveals that, at \r\nhttps://github.com/explosion/spaCy/blob/master/spacy/lemmatizer.py#L94, the lemmatizer's list of potential forms (where the thing in lemmatizer.exceptions is item [0]) is cast to a `set` (and so loses ordering); then in https://github.com/explosion/spaCy/blob/master/spacy/morphology.pyx#L149, that `set` gets sorted. So lemmatizer exceptions only work if they also come first alphabetically!\r\n\r\nI've implemented the fix for this, but I need this issue in order to submit the PR!\r\n\r\nOne question, though, for @honnibal: this can be fixed either as I did it locally -- return the whole list, with exceptions and then anything that comes back from the lemmatizer's `rules` -- or skip the rules altogether if we used an exception. I think it's more useful downstream if we keep all lemma candidates, even if we're not using them in the default pipeline. But it also seems only destructive to do `sorted(set())` on them!\n", "before_files": [{"content": "# coding: utf8\nfrom __future__ import unicode_literals\n\nfrom .symbols import POS, NOUN, VERB, ADJ, PUNCT\nfrom .symbols import VerbForm_inf, VerbForm_none, Number_sing, Degree_pos\n\n\nclass Lemmatizer(object):\n @classmethod\n def load(cls, path, index=None, exc=None, rules=None):\n return cls(index or {}, exc or {}, rules or {})\n\n def __init__(self, index, exceptions, rules):\n self.index = index\n self.exc = exceptions\n self.rules = rules\n\n def __call__(self, string, univ_pos, morphology=None):\n if univ_pos == NOUN:\n univ_pos = 'noun'\n elif univ_pos == VERB:\n univ_pos = 'verb'\n elif univ_pos == ADJ:\n univ_pos = 'adj'\n elif univ_pos == PUNCT:\n univ_pos = 'punct'\n # See Issue #435 for example of where this logic is requied.\n if self.is_base_form(univ_pos, morphology):\n return set([string.lower()])\n lemmas = lemmatize(string, self.index.get(univ_pos, {}),\n self.exc.get(univ_pos, {}),\n self.rules.get(univ_pos, []))\n return lemmas\n\n def is_base_form(self, univ_pos, morphology=None):\n \"\"\"\n Check whether we're dealing with an uninflected paradigm, so we can\n avoid lemmatization entirely.\n \"\"\"\n morphology = {} if morphology is None else morphology\n others = [key for key in morphology if key not in (POS, 'number', 'pos', 'verbform')]\n true_morph_key = morphology.get('morph', 0)\n if univ_pos == 'noun' and morphology.get('Number') == 'sing':\n return True\n elif univ_pos == 'verb' and morphology.get('VerbForm') == 'inf':\n return True\n elif univ_pos == 'adj' and morphology.get('Degree') == 'pos':\n return True\n elif VerbForm_inf in morphology:\n return True\n elif VerbForm_none in morphology:\n return True\n elif Number_sing in morphology:\n return True\n elif Degree_pos in morphology:\n return True\n else:\n return False\n\n def noun(self, string, morphology=None):\n return self(string, 'noun', morphology)\n\n def verb(self, string, morphology=None):\n return self(string, 'verb', morphology)\n\n def adj(self, string, morphology=None):\n return self(string, 'adj', morphology)\n\n def punct(self, string, morphology=None):\n return self(string, 'punct', morphology)\n\n\ndef lemmatize(string, index, exceptions, rules):\n string = string.lower()\n forms = []\n # TODO: Is this correct? See discussion in Issue #435.\n #if string in index:\n # forms.append(string)\n forms.extend(exceptions.get(string, []))\n oov_forms = []\n for old, new in rules:\n if string.endswith(old):\n form = string[:len(string) - len(old)] + new\n if not form:\n pass\n elif form in index or not form.isalpha():\n forms.append(form)\n else:\n oov_forms.append(form)\n if not forms:\n forms.extend(oov_forms)\n if not forms:\n forms.append(string)\n return set(forms)\n", "path": "spacy/lemmatizer.py"}], "after_files": [{"content": "# coding: utf8\nfrom __future__ import unicode_literals\n\nfrom .symbols import POS, NOUN, VERB, ADJ, PUNCT\nfrom .symbols import VerbForm_inf, VerbForm_none, Number_sing, Degree_pos\n\n\nclass Lemmatizer(object):\n @classmethod\n def load(cls, path, index=None, exc=None, rules=None):\n return cls(index or {}, exc or {}, rules or {})\n\n def __init__(self, index, exceptions, rules):\n self.index = index\n self.exc = exceptions\n self.rules = rules\n\n def __call__(self, string, univ_pos, morphology=None):\n if univ_pos == NOUN:\n univ_pos = 'noun'\n elif univ_pos == VERB:\n univ_pos = 'verb'\n elif univ_pos == ADJ:\n univ_pos = 'adj'\n elif univ_pos == PUNCT:\n univ_pos = 'punct'\n # See Issue #435 for example of where this logic is requied.\n if self.is_base_form(univ_pos, morphology):\n return set([string.lower()])\n lemmas = lemmatize(string, self.index.get(univ_pos, {}),\n self.exc.get(univ_pos, {}),\n self.rules.get(univ_pos, []))\n return lemmas\n\n def is_base_form(self, univ_pos, morphology=None):\n \"\"\"\n Check whether we're dealing with an uninflected paradigm, so we can\n avoid lemmatization entirely.\n \"\"\"\n morphology = {} if morphology is None else morphology\n others = [key for key in morphology if key not in (POS, 'number', 'pos', 'verbform')]\n true_morph_key = morphology.get('morph', 0)\n if univ_pos == 'noun' and morphology.get('Number') == 'sing':\n return True\n elif univ_pos == 'verb' and morphology.get('VerbForm') == 'inf':\n return True\n elif univ_pos == 'adj' and morphology.get('Degree') == 'pos':\n return True\n elif VerbForm_inf in morphology:\n return True\n elif VerbForm_none in morphology:\n return True\n elif Number_sing in morphology:\n return True\n elif Degree_pos in morphology:\n return True\n else:\n return False\n\n def noun(self, string, morphology=None):\n return self(string, 'noun', morphology)\n\n def verb(self, string, morphology=None):\n return self(string, 'verb', morphology)\n\n def adj(self, string, morphology=None):\n return self(string, 'adj', morphology)\n\n def punct(self, string, morphology=None):\n return self(string, 'punct', morphology)\n\n\ndef lemmatize(string, index, exceptions, rules):\n string = string.lower()\n forms = []\n # TODO: Is this correct? See discussion in Issue #435.\n #if string in index:\n # forms.append(string)\n forms.extend(exceptions.get(string, []))\n oov_forms = []\n if not forms:\n for old, new in rules:\n if string.endswith(old):\n form = string[:len(string) - len(old)] + new\n if not form:\n pass\n elif form in index or not form.isalpha():\n forms.append(form)\n else:\n oov_forms.append(form)\n if not forms:\n forms.extend(oov_forms)\n if not forms:\n forms.append(string)\n return set(forms)\n", "path": "spacy/lemmatizer.py"}]} | 1,504 | 244 |
gh_patches_debug_51225 | rasdani/github-patches | git_diff | python-pillow__Pillow-7555 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[10.1.0 regression] Palette images save as blank PDFs
Minimal example (you can use [this tiny PNG](https://github.com/python-pillow/Pillow/assets/1119169/d8d45152-7734-4fe3-a2d3-fb49839a0893) for example):
```python
from PIL import Image
image = Image.open('test.png')
image = image.convert('P')
image.save('test.pdf')
```
Output PDF with Pillow 10.0.1:

Output PDF with Pillow 10.1.0:

Issue faced with Python 3.11.6 on Ubuntu 22.04 and Debian 12 (bookworm). I also had the same issue in Docker environments, so I could make a Docker image if needed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/PIL/PdfImagePlugin.py`
Content:
```
1 #
2 # The Python Imaging Library.
3 # $Id$
4 #
5 # PDF (Acrobat) file handling
6 #
7 # History:
8 # 1996-07-16 fl Created
9 # 1997-01-18 fl Fixed header
10 # 2004-02-21 fl Fixes for 1/L/CMYK images, etc.
11 # 2004-02-24 fl Fixes for 1 and P images.
12 #
13 # Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved.
14 # Copyright (c) 1996-1997 by Fredrik Lundh.
15 #
16 # See the README file for information on usage and redistribution.
17 #
18
19 ##
20 # Image plugin for PDF images (output only).
21 ##
22
23 import io
24 import math
25 import os
26 import time
27
28 from . import Image, ImageFile, ImageSequence, PdfParser, __version__, features
29
30 #
31 # --------------------------------------------------------------------
32
33 # object ids:
34 # 1. catalogue
35 # 2. pages
36 # 3. image
37 # 4. page
38 # 5. page contents
39
40
41 def _save_all(im, fp, filename):
42 _save(im, fp, filename, save_all=True)
43
44
45 ##
46 # (Internal) Image save plugin for the PDF format.
47
48
49 def _write_image(im, filename, existing_pdf, image_refs):
50 # FIXME: Should replace ASCIIHexDecode with RunLengthDecode
51 # (packbits) or LZWDecode (tiff/lzw compression). Note that
52 # PDF 1.2 also supports Flatedecode (zip compression).
53
54 params = None
55 decode = None
56
57 #
58 # Get image characteristics
59
60 width, height = im.size
61
62 dict_obj = {"BitsPerComponent": 8}
63 if im.mode == "1":
64 if features.check("libtiff"):
65 filter = "CCITTFaxDecode"
66 dict_obj["BitsPerComponent"] = 1
67 params = PdfParser.PdfArray(
68 [
69 PdfParser.PdfDict(
70 {
71 "K": -1,
72 "BlackIs1": True,
73 "Columns": width,
74 "Rows": height,
75 }
76 )
77 ]
78 )
79 else:
80 filter = "DCTDecode"
81 dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceGray")
82 procset = "ImageB" # grayscale
83 elif im.mode == "L":
84 filter = "DCTDecode"
85 # params = f"<< /Predictor 15 /Columns {width-2} >>"
86 dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceGray")
87 procset = "ImageB" # grayscale
88 elif im.mode == "LA":
89 filter = "JPXDecode"
90 # params = f"<< /Predictor 15 /Columns {width-2} >>"
91 procset = "ImageB" # grayscale
92 dict_obj["SMaskInData"] = 1
93 elif im.mode == "P":
94 filter = "ASCIIHexDecode"
95 palette = im.getpalette()
96 dict_obj["ColorSpace"] = [
97 PdfParser.PdfName("Indexed"),
98 PdfParser.PdfName("DeviceRGB"),
99 255,
100 PdfParser.PdfBinary(palette),
101 ]
102 procset = "ImageI" # indexed color
103
104 if "transparency" in im.info:
105 smask = im.convert("LA").getchannel("A")
106 smask.encoderinfo = {}
107
108 image_ref = _write_image(smask, filename, existing_pdf, image_refs)[0]
109 dict_obj["SMask"] = image_ref
110 elif im.mode == "RGB":
111 filter = "DCTDecode"
112 dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceRGB")
113 procset = "ImageC" # color images
114 elif im.mode == "RGBA":
115 filter = "JPXDecode"
116 procset = "ImageC" # color images
117 dict_obj["SMaskInData"] = 1
118 elif im.mode == "CMYK":
119 filter = "DCTDecode"
120 dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceCMYK")
121 procset = "ImageC" # color images
122 decode = [1, 0, 1, 0, 1, 0, 1, 0]
123 else:
124 msg = f"cannot save mode {im.mode}"
125 raise ValueError(msg)
126
127 #
128 # image
129
130 op = io.BytesIO()
131
132 if filter == "ASCIIHexDecode":
133 ImageFile._save(im, op, [("hex", (0, 0) + im.size, 0, im.mode)])
134 elif filter == "CCITTFaxDecode":
135 im.save(
136 op,
137 "TIFF",
138 compression="group4",
139 # use a single strip
140 strip_size=math.ceil(width / 8) * height,
141 )
142 elif filter == "DCTDecode":
143 Image.SAVE["JPEG"](im, op, filename)
144 elif filter == "JPXDecode":
145 del dict_obj["BitsPerComponent"]
146 Image.SAVE["JPEG2000"](im, op, filename)
147 else:
148 msg = f"unsupported PDF filter ({filter})"
149 raise ValueError(msg)
150
151 stream = op.getvalue()
152 if filter == "CCITTFaxDecode":
153 stream = stream[8:]
154 filter = PdfParser.PdfArray([PdfParser.PdfName(filter)])
155 else:
156 filter = PdfParser.PdfName(filter)
157
158 image_ref = image_refs.pop(0)
159 existing_pdf.write_obj(
160 image_ref,
161 stream=stream,
162 Type=PdfParser.PdfName("XObject"),
163 Subtype=PdfParser.PdfName("Image"),
164 Width=width, # * 72.0 / x_resolution,
165 Height=height, # * 72.0 / y_resolution,
166 Filter=filter,
167 Decode=decode,
168 DecodeParms=params,
169 **dict_obj,
170 )
171
172 return image_ref, procset
173
174
175 def _save(im, fp, filename, save_all=False):
176 is_appending = im.encoderinfo.get("append", False)
177 if is_appending:
178 existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="r+b")
179 else:
180 existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="w+b")
181
182 dpi = im.encoderinfo.get("dpi")
183 if dpi:
184 x_resolution = dpi[0]
185 y_resolution = dpi[1]
186 else:
187 x_resolution = y_resolution = im.encoderinfo.get("resolution", 72.0)
188
189 info = {
190 "title": None
191 if is_appending
192 else os.path.splitext(os.path.basename(filename))[0],
193 "author": None,
194 "subject": None,
195 "keywords": None,
196 "creator": None,
197 "producer": None,
198 "creationDate": None if is_appending else time.gmtime(),
199 "modDate": None if is_appending else time.gmtime(),
200 }
201 for k, default in info.items():
202 v = im.encoderinfo.get(k) if k in im.encoderinfo else default
203 if v:
204 existing_pdf.info[k[0].upper() + k[1:]] = v
205
206 #
207 # make sure image data is available
208 im.load()
209
210 existing_pdf.start_writing()
211 existing_pdf.write_header()
212 existing_pdf.write_comment(f"created by Pillow {__version__} PDF driver")
213
214 #
215 # pages
216 ims = [im]
217 if save_all:
218 append_images = im.encoderinfo.get("append_images", [])
219 for append_im in append_images:
220 append_im.encoderinfo = im.encoderinfo.copy()
221 ims.append(append_im)
222 number_of_pages = 0
223 image_refs = []
224 page_refs = []
225 contents_refs = []
226 for im in ims:
227 im_number_of_pages = 1
228 if save_all:
229 try:
230 im_number_of_pages = im.n_frames
231 except AttributeError:
232 # Image format does not have n_frames.
233 # It is a single frame image
234 pass
235 number_of_pages += im_number_of_pages
236 for i in range(im_number_of_pages):
237 image_refs.append(existing_pdf.next_object_id(0))
238 if im.mode == "P" and "transparency" in im.info:
239 image_refs.append(existing_pdf.next_object_id(0))
240
241 page_refs.append(existing_pdf.next_object_id(0))
242 contents_refs.append(existing_pdf.next_object_id(0))
243 existing_pdf.pages.append(page_refs[-1])
244
245 #
246 # catalog and list of pages
247 existing_pdf.write_catalog()
248
249 page_number = 0
250 for im_sequence in ims:
251 im_pages = ImageSequence.Iterator(im_sequence) if save_all else [im_sequence]
252 for im in im_pages:
253 image_ref, procset = _write_image(im, filename, existing_pdf, image_refs)
254
255 #
256 # page
257
258 existing_pdf.write_page(
259 page_refs[page_number],
260 Resources=PdfParser.PdfDict(
261 ProcSet=[PdfParser.PdfName("PDF"), PdfParser.PdfName(procset)],
262 XObject=PdfParser.PdfDict(image=image_ref),
263 ),
264 MediaBox=[
265 0,
266 0,
267 im.width * 72.0 / x_resolution,
268 im.height * 72.0 / y_resolution,
269 ],
270 Contents=contents_refs[page_number],
271 )
272
273 #
274 # page contents
275
276 page_contents = b"q %f 0 0 %f 0 0 cm /image Do Q\n" % (
277 im.width * 72.0 / x_resolution,
278 im.height * 72.0 / y_resolution,
279 )
280
281 existing_pdf.write_obj(contents_refs[page_number], stream=page_contents)
282
283 page_number += 1
284
285 #
286 # trailer
287 existing_pdf.write_xref_and_trailer()
288 if hasattr(fp, "flush"):
289 fp.flush()
290 existing_pdf.close()
291
292
293 #
294 # --------------------------------------------------------------------
295
296
297 Image.register_save("PDF", _save)
298 Image.register_save_all("PDF", _save_all)
299
300 Image.register_extension("PDF", ".pdf")
301
302 Image.register_mime("PDF", "application/pdf")
303
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/PIL/PdfImagePlugin.py b/src/PIL/PdfImagePlugin.py
--- a/src/PIL/PdfImagePlugin.py
+++ b/src/PIL/PdfImagePlugin.py
@@ -96,7 +96,7 @@
dict_obj["ColorSpace"] = [
PdfParser.PdfName("Indexed"),
PdfParser.PdfName("DeviceRGB"),
- 255,
+ len(palette) // 3 - 1,
PdfParser.PdfBinary(palette),
]
procset = "ImageI" # indexed color
| {"golden_diff": "diff --git a/src/PIL/PdfImagePlugin.py b/src/PIL/PdfImagePlugin.py\n--- a/src/PIL/PdfImagePlugin.py\n+++ b/src/PIL/PdfImagePlugin.py\n@@ -96,7 +96,7 @@\n dict_obj[\"ColorSpace\"] = [\n PdfParser.PdfName(\"Indexed\"),\n PdfParser.PdfName(\"DeviceRGB\"),\n- 255,\n+ len(palette) // 3 - 1,\n PdfParser.PdfBinary(palette),\n ]\n procset = \"ImageI\" # indexed color\n", "issue": "[10.1.0 regression] Palette images save as blank PDFs\nMinimal example (you can use [this tiny PNG](https://github.com/python-pillow/Pillow/assets/1119169/d8d45152-7734-4fe3-a2d3-fb49839a0893) for example):\r\n\r\n```python\r\nfrom PIL import Image\r\n\r\nimage = Image.open('test.png')\r\nimage = image.convert('P')\r\nimage.save('test.pdf')\r\n```\r\n\r\nOutput PDF with Pillow 10.0.1:\r\n\r\n\r\nOutput PDF with Pillow 10.1.0:\r\n\r\n\r\nIssue faced with Python 3.11.6 on Ubuntu 22.04 and Debian 12 (bookworm). I also had the same issue in Docker environments, so I could make a Docker image if needed.\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# PDF (Acrobat) file handling\n#\n# History:\n# 1996-07-16 fl Created\n# 1997-01-18 fl Fixed header\n# 2004-02-21 fl Fixes for 1/L/CMYK images, etc.\n# 2004-02-24 fl Fixes for 1 and P images.\n#\n# Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved.\n# Copyright (c) 1996-1997 by Fredrik Lundh.\n#\n# See the README file for information on usage and redistribution.\n#\n\n##\n# Image plugin for PDF images (output only).\n##\n\nimport io\nimport math\nimport os\nimport time\n\nfrom . import Image, ImageFile, ImageSequence, PdfParser, __version__, features\n\n#\n# --------------------------------------------------------------------\n\n# object ids:\n# 1. catalogue\n# 2. pages\n# 3. image\n# 4. page\n# 5. page contents\n\n\ndef _save_all(im, fp, filename):\n _save(im, fp, filename, save_all=True)\n\n\n##\n# (Internal) Image save plugin for the PDF format.\n\n\ndef _write_image(im, filename, existing_pdf, image_refs):\n # FIXME: Should replace ASCIIHexDecode with RunLengthDecode\n # (packbits) or LZWDecode (tiff/lzw compression). Note that\n # PDF 1.2 also supports Flatedecode (zip compression).\n\n params = None\n decode = None\n\n #\n # Get image characteristics\n\n width, height = im.size\n\n dict_obj = {\"BitsPerComponent\": 8}\n if im.mode == \"1\":\n if features.check(\"libtiff\"):\n filter = \"CCITTFaxDecode\"\n dict_obj[\"BitsPerComponent\"] = 1\n params = PdfParser.PdfArray(\n [\n PdfParser.PdfDict(\n {\n \"K\": -1,\n \"BlackIs1\": True,\n \"Columns\": width,\n \"Rows\": height,\n }\n )\n ]\n )\n else:\n filter = \"DCTDecode\"\n dict_obj[\"ColorSpace\"] = PdfParser.PdfName(\"DeviceGray\")\n procset = \"ImageB\" # grayscale\n elif im.mode == \"L\":\n filter = \"DCTDecode\"\n # params = f\"<< /Predictor 15 /Columns {width-2} >>\"\n dict_obj[\"ColorSpace\"] = PdfParser.PdfName(\"DeviceGray\")\n procset = \"ImageB\" # grayscale\n elif im.mode == \"LA\":\n filter = \"JPXDecode\"\n # params = f\"<< /Predictor 15 /Columns {width-2} >>\"\n procset = \"ImageB\" # grayscale\n dict_obj[\"SMaskInData\"] = 1\n elif im.mode == \"P\":\n filter = \"ASCIIHexDecode\"\n palette = im.getpalette()\n dict_obj[\"ColorSpace\"] = [\n PdfParser.PdfName(\"Indexed\"),\n PdfParser.PdfName(\"DeviceRGB\"),\n 255,\n PdfParser.PdfBinary(palette),\n ]\n procset = \"ImageI\" # indexed color\n\n if \"transparency\" in im.info:\n smask = im.convert(\"LA\").getchannel(\"A\")\n smask.encoderinfo = {}\n\n image_ref = _write_image(smask, filename, existing_pdf, image_refs)[0]\n dict_obj[\"SMask\"] = image_ref\n elif im.mode == \"RGB\":\n filter = \"DCTDecode\"\n dict_obj[\"ColorSpace\"] = PdfParser.PdfName(\"DeviceRGB\")\n procset = \"ImageC\" # color images\n elif im.mode == \"RGBA\":\n filter = \"JPXDecode\"\n procset = \"ImageC\" # color images\n dict_obj[\"SMaskInData\"] = 1\n elif im.mode == \"CMYK\":\n filter = \"DCTDecode\"\n dict_obj[\"ColorSpace\"] = PdfParser.PdfName(\"DeviceCMYK\")\n procset = \"ImageC\" # color images\n decode = [1, 0, 1, 0, 1, 0, 1, 0]\n else:\n msg = f\"cannot save mode {im.mode}\"\n raise ValueError(msg)\n\n #\n # image\n\n op = io.BytesIO()\n\n if filter == \"ASCIIHexDecode\":\n ImageFile._save(im, op, [(\"hex\", (0, 0) + im.size, 0, im.mode)])\n elif filter == \"CCITTFaxDecode\":\n im.save(\n op,\n \"TIFF\",\n compression=\"group4\",\n # use a single strip\n strip_size=math.ceil(width / 8) * height,\n )\n elif filter == \"DCTDecode\":\n Image.SAVE[\"JPEG\"](im, op, filename)\n elif filter == \"JPXDecode\":\n del dict_obj[\"BitsPerComponent\"]\n Image.SAVE[\"JPEG2000\"](im, op, filename)\n else:\n msg = f\"unsupported PDF filter ({filter})\"\n raise ValueError(msg)\n\n stream = op.getvalue()\n if filter == \"CCITTFaxDecode\":\n stream = stream[8:]\n filter = PdfParser.PdfArray([PdfParser.PdfName(filter)])\n else:\n filter = PdfParser.PdfName(filter)\n\n image_ref = image_refs.pop(0)\n existing_pdf.write_obj(\n image_ref,\n stream=stream,\n Type=PdfParser.PdfName(\"XObject\"),\n Subtype=PdfParser.PdfName(\"Image\"),\n Width=width, # * 72.0 / x_resolution,\n Height=height, # * 72.0 / y_resolution,\n Filter=filter,\n Decode=decode,\n DecodeParms=params,\n **dict_obj,\n )\n\n return image_ref, procset\n\n\ndef _save(im, fp, filename, save_all=False):\n is_appending = im.encoderinfo.get(\"append\", False)\n if is_appending:\n existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode=\"r+b\")\n else:\n existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode=\"w+b\")\n\n dpi = im.encoderinfo.get(\"dpi\")\n if dpi:\n x_resolution = dpi[0]\n y_resolution = dpi[1]\n else:\n x_resolution = y_resolution = im.encoderinfo.get(\"resolution\", 72.0)\n\n info = {\n \"title\": None\n if is_appending\n else os.path.splitext(os.path.basename(filename))[0],\n \"author\": None,\n \"subject\": None,\n \"keywords\": None,\n \"creator\": None,\n \"producer\": None,\n \"creationDate\": None if is_appending else time.gmtime(),\n \"modDate\": None if is_appending else time.gmtime(),\n }\n for k, default in info.items():\n v = im.encoderinfo.get(k) if k in im.encoderinfo else default\n if v:\n existing_pdf.info[k[0].upper() + k[1:]] = v\n\n #\n # make sure image data is available\n im.load()\n\n existing_pdf.start_writing()\n existing_pdf.write_header()\n existing_pdf.write_comment(f\"created by Pillow {__version__} PDF driver\")\n\n #\n # pages\n ims = [im]\n if save_all:\n append_images = im.encoderinfo.get(\"append_images\", [])\n for append_im in append_images:\n append_im.encoderinfo = im.encoderinfo.copy()\n ims.append(append_im)\n number_of_pages = 0\n image_refs = []\n page_refs = []\n contents_refs = []\n for im in ims:\n im_number_of_pages = 1\n if save_all:\n try:\n im_number_of_pages = im.n_frames\n except AttributeError:\n # Image format does not have n_frames.\n # It is a single frame image\n pass\n number_of_pages += im_number_of_pages\n for i in range(im_number_of_pages):\n image_refs.append(existing_pdf.next_object_id(0))\n if im.mode == \"P\" and \"transparency\" in im.info:\n image_refs.append(existing_pdf.next_object_id(0))\n\n page_refs.append(existing_pdf.next_object_id(0))\n contents_refs.append(existing_pdf.next_object_id(0))\n existing_pdf.pages.append(page_refs[-1])\n\n #\n # catalog and list of pages\n existing_pdf.write_catalog()\n\n page_number = 0\n for im_sequence in ims:\n im_pages = ImageSequence.Iterator(im_sequence) if save_all else [im_sequence]\n for im in im_pages:\n image_ref, procset = _write_image(im, filename, existing_pdf, image_refs)\n\n #\n # page\n\n existing_pdf.write_page(\n page_refs[page_number],\n Resources=PdfParser.PdfDict(\n ProcSet=[PdfParser.PdfName(\"PDF\"), PdfParser.PdfName(procset)],\n XObject=PdfParser.PdfDict(image=image_ref),\n ),\n MediaBox=[\n 0,\n 0,\n im.width * 72.0 / x_resolution,\n im.height * 72.0 / y_resolution,\n ],\n Contents=contents_refs[page_number],\n )\n\n #\n # page contents\n\n page_contents = b\"q %f 0 0 %f 0 0 cm /image Do Q\\n\" % (\n im.width * 72.0 / x_resolution,\n im.height * 72.0 / y_resolution,\n )\n\n existing_pdf.write_obj(contents_refs[page_number], stream=page_contents)\n\n page_number += 1\n\n #\n # trailer\n existing_pdf.write_xref_and_trailer()\n if hasattr(fp, \"flush\"):\n fp.flush()\n existing_pdf.close()\n\n\n#\n# --------------------------------------------------------------------\n\n\nImage.register_save(\"PDF\", _save)\nImage.register_save_all(\"PDF\", _save_all)\n\nImage.register_extension(\"PDF\", \".pdf\")\n\nImage.register_mime(\"PDF\", \"application/pdf\")\n", "path": "src/PIL/PdfImagePlugin.py"}], "after_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# PDF (Acrobat) file handling\n#\n# History:\n# 1996-07-16 fl Created\n# 1997-01-18 fl Fixed header\n# 2004-02-21 fl Fixes for 1/L/CMYK images, etc.\n# 2004-02-24 fl Fixes for 1 and P images.\n#\n# Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved.\n# Copyright (c) 1996-1997 by Fredrik Lundh.\n#\n# See the README file for information on usage and redistribution.\n#\n\n##\n# Image plugin for PDF images (output only).\n##\n\nimport io\nimport math\nimport os\nimport time\n\nfrom . import Image, ImageFile, ImageSequence, PdfParser, __version__, features\n\n#\n# --------------------------------------------------------------------\n\n# object ids:\n# 1. catalogue\n# 2. pages\n# 3. image\n# 4. page\n# 5. page contents\n\n\ndef _save_all(im, fp, filename):\n _save(im, fp, filename, save_all=True)\n\n\n##\n# (Internal) Image save plugin for the PDF format.\n\n\ndef _write_image(im, filename, existing_pdf, image_refs):\n # FIXME: Should replace ASCIIHexDecode with RunLengthDecode\n # (packbits) or LZWDecode (tiff/lzw compression). Note that\n # PDF 1.2 also supports Flatedecode (zip compression).\n\n params = None\n decode = None\n\n #\n # Get image characteristics\n\n width, height = im.size\n\n dict_obj = {\"BitsPerComponent\": 8}\n if im.mode == \"1\":\n if features.check(\"libtiff\"):\n filter = \"CCITTFaxDecode\"\n dict_obj[\"BitsPerComponent\"] = 1\n params = PdfParser.PdfArray(\n [\n PdfParser.PdfDict(\n {\n \"K\": -1,\n \"BlackIs1\": True,\n \"Columns\": width,\n \"Rows\": height,\n }\n )\n ]\n )\n else:\n filter = \"DCTDecode\"\n dict_obj[\"ColorSpace\"] = PdfParser.PdfName(\"DeviceGray\")\n procset = \"ImageB\" # grayscale\n elif im.mode == \"L\":\n filter = \"DCTDecode\"\n # params = f\"<< /Predictor 15 /Columns {width-2} >>\"\n dict_obj[\"ColorSpace\"] = PdfParser.PdfName(\"DeviceGray\")\n procset = \"ImageB\" # grayscale\n elif im.mode == \"LA\":\n filter = \"JPXDecode\"\n # params = f\"<< /Predictor 15 /Columns {width-2} >>\"\n procset = \"ImageB\" # grayscale\n dict_obj[\"SMaskInData\"] = 1\n elif im.mode == \"P\":\n filter = \"ASCIIHexDecode\"\n palette = im.getpalette()\n dict_obj[\"ColorSpace\"] = [\n PdfParser.PdfName(\"Indexed\"),\n PdfParser.PdfName(\"DeviceRGB\"),\n len(palette) // 3 - 1,\n PdfParser.PdfBinary(palette),\n ]\n procset = \"ImageI\" # indexed color\n\n if \"transparency\" in im.info:\n smask = im.convert(\"LA\").getchannel(\"A\")\n smask.encoderinfo = {}\n\n image_ref = _write_image(smask, filename, existing_pdf, image_refs)[0]\n dict_obj[\"SMask\"] = image_ref\n elif im.mode == \"RGB\":\n filter = \"DCTDecode\"\n dict_obj[\"ColorSpace\"] = PdfParser.PdfName(\"DeviceRGB\")\n procset = \"ImageC\" # color images\n elif im.mode == \"RGBA\":\n filter = \"JPXDecode\"\n procset = \"ImageC\" # color images\n dict_obj[\"SMaskInData\"] = 1\n elif im.mode == \"CMYK\":\n filter = \"DCTDecode\"\n dict_obj[\"ColorSpace\"] = PdfParser.PdfName(\"DeviceCMYK\")\n procset = \"ImageC\" # color images\n decode = [1, 0, 1, 0, 1, 0, 1, 0]\n else:\n msg = f\"cannot save mode {im.mode}\"\n raise ValueError(msg)\n\n #\n # image\n\n op = io.BytesIO()\n\n if filter == \"ASCIIHexDecode\":\n ImageFile._save(im, op, [(\"hex\", (0, 0) + im.size, 0, im.mode)])\n elif filter == \"CCITTFaxDecode\":\n im.save(\n op,\n \"TIFF\",\n compression=\"group4\",\n # use a single strip\n strip_size=math.ceil(width / 8) * height,\n )\n elif filter == \"DCTDecode\":\n Image.SAVE[\"JPEG\"](im, op, filename)\n elif filter == \"JPXDecode\":\n del dict_obj[\"BitsPerComponent\"]\n Image.SAVE[\"JPEG2000\"](im, op, filename)\n else:\n msg = f\"unsupported PDF filter ({filter})\"\n raise ValueError(msg)\n\n stream = op.getvalue()\n if filter == \"CCITTFaxDecode\":\n stream = stream[8:]\n filter = PdfParser.PdfArray([PdfParser.PdfName(filter)])\n else:\n filter = PdfParser.PdfName(filter)\n\n image_ref = image_refs.pop(0)\n existing_pdf.write_obj(\n image_ref,\n stream=stream,\n Type=PdfParser.PdfName(\"XObject\"),\n Subtype=PdfParser.PdfName(\"Image\"),\n Width=width, # * 72.0 / x_resolution,\n Height=height, # * 72.0 / y_resolution,\n Filter=filter,\n Decode=decode,\n DecodeParms=params,\n **dict_obj,\n )\n\n return image_ref, procset\n\n\ndef _save(im, fp, filename, save_all=False):\n is_appending = im.encoderinfo.get(\"append\", False)\n if is_appending:\n existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode=\"r+b\")\n else:\n existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode=\"w+b\")\n\n dpi = im.encoderinfo.get(\"dpi\")\n if dpi:\n x_resolution = dpi[0]\n y_resolution = dpi[1]\n else:\n x_resolution = y_resolution = im.encoderinfo.get(\"resolution\", 72.0)\n\n info = {\n \"title\": None\n if is_appending\n else os.path.splitext(os.path.basename(filename))[0],\n \"author\": None,\n \"subject\": None,\n \"keywords\": None,\n \"creator\": None,\n \"producer\": None,\n \"creationDate\": None if is_appending else time.gmtime(),\n \"modDate\": None if is_appending else time.gmtime(),\n }\n for k, default in info.items():\n v = im.encoderinfo.get(k) if k in im.encoderinfo else default\n if v:\n existing_pdf.info[k[0].upper() + k[1:]] = v\n\n #\n # make sure image data is available\n im.load()\n\n existing_pdf.start_writing()\n existing_pdf.write_header()\n existing_pdf.write_comment(f\"created by Pillow {__version__} PDF driver\")\n\n #\n # pages\n ims = [im]\n if save_all:\n append_images = im.encoderinfo.get(\"append_images\", [])\n for append_im in append_images:\n append_im.encoderinfo = im.encoderinfo.copy()\n ims.append(append_im)\n number_of_pages = 0\n image_refs = []\n page_refs = []\n contents_refs = []\n for im in ims:\n im_number_of_pages = 1\n if save_all:\n try:\n im_number_of_pages = im.n_frames\n except AttributeError:\n # Image format does not have n_frames.\n # It is a single frame image\n pass\n number_of_pages += im_number_of_pages\n for i in range(im_number_of_pages):\n image_refs.append(existing_pdf.next_object_id(0))\n if im.mode == \"P\" and \"transparency\" in im.info:\n image_refs.append(existing_pdf.next_object_id(0))\n\n page_refs.append(existing_pdf.next_object_id(0))\n contents_refs.append(existing_pdf.next_object_id(0))\n existing_pdf.pages.append(page_refs[-1])\n\n #\n # catalog and list of pages\n existing_pdf.write_catalog()\n\n page_number = 0\n for im_sequence in ims:\n im_pages = ImageSequence.Iterator(im_sequence) if save_all else [im_sequence]\n for im in im_pages:\n image_ref, procset = _write_image(im, filename, existing_pdf, image_refs)\n\n #\n # page\n\n existing_pdf.write_page(\n page_refs[page_number],\n Resources=PdfParser.PdfDict(\n ProcSet=[PdfParser.PdfName(\"PDF\"), PdfParser.PdfName(procset)],\n XObject=PdfParser.PdfDict(image=image_ref),\n ),\n MediaBox=[\n 0,\n 0,\n im.width * 72.0 / x_resolution,\n im.height * 72.0 / y_resolution,\n ],\n Contents=contents_refs[page_number],\n )\n\n #\n # page contents\n\n page_contents = b\"q %f 0 0 %f 0 0 cm /image Do Q\\n\" % (\n im.width * 72.0 / x_resolution,\n im.height * 72.0 / y_resolution,\n )\n\n existing_pdf.write_obj(contents_refs[page_number], stream=page_contents)\n\n page_number += 1\n\n #\n # trailer\n existing_pdf.write_xref_and_trailer()\n if hasattr(fp, \"flush\"):\n fp.flush()\n existing_pdf.close()\n\n\n#\n# --------------------------------------------------------------------\n\n\nImage.register_save(\"PDF\", _save)\nImage.register_save_all(\"PDF\", _save_all)\n\nImage.register_extension(\"PDF\", \".pdf\")\n\nImage.register_mime(\"PDF\", \"application/pdf\")\n", "path": "src/PIL/PdfImagePlugin.py"}]} | 3,634 | 128 |
gh_patches_debug_7553 | rasdani/github-patches | git_diff | CTFd__CTFd-1336 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Files not Downloadable
**Environment**:
- CTFd Version/Commit: 2.3.3
- Operating System: Windows
- Web Browser and Version: Chrome 80.0.3987.132
**What happened?**
When building a challenge that has a file, the file is not downloadable.
**What did you expect to happen?**
A file should be downloadable from the frontend.
**How to reproduce your issue**
Create a fresh CTFd instance. Run serve.py. Create a challenge and upload a file. Navigate to the challenge on the front end. The file link is 404.
**Any associated stack traces or error logs**
```
CTFd-Base> python .\serve.py
* Loaded module, <module 'CTFd.plugins.challenges' from 'C:\Users\StormCTF\Downloads\CTFd-Base\CTFd\plugins\challenges\__init__.pyc'>
* Loaded module, <module 'CTFd.plugins.dynamic_challenges' from 'C:\Users\StormCTF\Downloads\CTFd-Base\CTFd\plugins\dynamic_challenges\__init__.pyc'>
* Loaded module, <module 'CTFd.plugins.flags' from 'C:\Users\StormCTF\Downloads\CTFd-Base\CTFd\plugins\flags\__init__.pyc'>
* Serving Flask app "CTFd" (lazy loading)
* Environment: development
* Debug mode: on
* Restarting with stat
* Loaded module, <module 'CTFd.plugins.challenges' from 'C:\Users\StormCTF\Downloads\CTFd-Base\CTFd\plugins\challenges\__init__.pyc'>
* Loaded module, <module 'CTFd.plugins.dynamic_challenges' from 'C:\Users\StormCTF\Downloads\CTFd-Base\CTFd\plugins\dynamic_challenges\__init__.pyc'>
* Loaded module, <module 'CTFd.plugins.flags' from 'C:\Users\StormCTF\Downloads\CTFd-Base\CTFd\plugins\flags\__init__.pyc'>
* Debugger is active!
* Debugger PIN: 310-671-378
* Running on http://127.0.0.1:4000/ (Press CTRL+C to quit)
127.0.0.1 - - [19/Apr/2020 12:40:42] "GET /challenges HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:43] "GET /themes/core/static/css/challenge-board.dev.css?d=a5654bce HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:43] "GET /themes/core/static/css/main.dev.css?d=a5654bce HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:43] "GET /themes/core/static/css/core.dev.css?d=a5654bce HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:43] "GET /themes/core/static/js/vendor.bundle.dev.js?d=a5654bce HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:43] "GET /themes/core/static/css/fonts.dev.css?d=a5654bce HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:43] "GET /themes/core/static/js/core.dev.js?d=a5654bce HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:43] "GET /themes/core/static/js/helpers.dev.js?d=a5654bce HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:43] "GET /themes/core/static/js/pages/challenges.dev.js?d=a5654bce HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:43] "GET /themes/core/static/img/favicon.ico?d=a5654bce HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:43] "GET /api/v1/teams/me/solves HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:43] "GET /api/v1/challenges HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:43] "GET /api/v1/teams/me/solves HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:46] "GET /plugins/challenges/assets/view.js?_=1587314443544 HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:46] "GET /api/v1/challenges/1/solves HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:46] "GET /api/v1/challenges/1 HTTP/1.1" 200 -
127.0.0.1 - - [19/Apr/2020 12:40:50] "GET /files/e57e22cdfa601592e5239c2d358994d6%5COrange-Icon-Black-BG.png?token=.eJyrVipJTcyNz0xRsjLSUSotTi2CsdMyc1LBbMNaAObrC-o.Xpx_Dg.vcuDUI8hJiIgPLExAPan7WVg4FU HTTP/1.1" 404 -
127.0.0.1 - - [19/Apr/2020 12:40:50] "GET /themes/core/static/js/pages/main.dev.js?d=a5654bce HTTP/1.1" 200 -```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/utils/uploads/uploaders.py`
Content:
```
1 import os
2 import string
3 from shutil import copyfileobj
4
5 import boto3
6 from flask import current_app, redirect, send_file
7 from flask.helpers import safe_join
8 from werkzeug.utils import secure_filename
9
10 from CTFd.utils import get_app_config
11 from CTFd.utils.encoding import hexencode
12
13
14 class BaseUploader(object):
15 def __init__(self):
16 raise NotImplementedError
17
18 def store(self, fileobj, filename):
19 raise NotImplementedError
20
21 def upload(self, file_obj, filename):
22 raise NotImplementedError
23
24 def download(self, filename):
25 raise NotImplementedError
26
27 def delete(self, filename):
28 raise NotImplementedError
29
30 def sync(self):
31 raise NotImplementedError
32
33
34 class FilesystemUploader(BaseUploader):
35 def __init__(self, base_path=None):
36 super(BaseUploader, self).__init__()
37 self.base_path = base_path or current_app.config.get("UPLOAD_FOLDER")
38
39 def store(self, fileobj, filename):
40 location = os.path.join(self.base_path, filename)
41 directory = os.path.dirname(location)
42
43 if not os.path.exists(directory):
44 os.makedirs(directory)
45
46 with open(location, "wb") as dst:
47 copyfileobj(fileobj, dst, 16384)
48
49 return filename
50
51 def upload(self, file_obj, filename):
52 if len(filename) == 0:
53 raise Exception("Empty filenames cannot be used")
54
55 filename = secure_filename(filename)
56 md5hash = hexencode(os.urandom(16))
57 file_path = os.path.join(md5hash, filename)
58
59 return self.store(file_obj, file_path)
60
61 def download(self, filename):
62 return send_file(safe_join(self.base_path, filename), as_attachment=True)
63
64 def delete(self, filename):
65 if os.path.exists(os.path.join(self.base_path, filename)):
66 os.unlink(os.path.join(self.base_path, filename))
67 return True
68 return False
69
70 def sync(self):
71 pass
72
73
74 class S3Uploader(BaseUploader):
75 def __init__(self):
76 super(BaseUploader, self).__init__()
77 self.s3 = self._get_s3_connection()
78 self.bucket = get_app_config("AWS_S3_BUCKET")
79
80 def _get_s3_connection(self):
81 access_key = get_app_config("AWS_ACCESS_KEY_ID")
82 secret_key = get_app_config("AWS_SECRET_ACCESS_KEY")
83 endpoint = get_app_config("AWS_S3_ENDPOINT_URL")
84 client = boto3.client(
85 "s3",
86 aws_access_key_id=access_key,
87 aws_secret_access_key=secret_key,
88 endpoint_url=endpoint,
89 )
90 return client
91
92 def _clean_filename(self, c):
93 if c in string.ascii_letters + string.digits + "-" + "_" + ".":
94 return True
95
96 def store(self, fileobj, filename):
97 self.s3.upload_fileobj(fileobj, self.bucket, filename)
98 return filename
99
100 def upload(self, file_obj, filename):
101 filename = filter(
102 self._clean_filename, secure_filename(filename).replace(" ", "_")
103 )
104 filename = "".join(filename)
105 if len(filename) <= 0:
106 return False
107
108 md5hash = hexencode(os.urandom(16))
109
110 dst = md5hash + "/" + filename
111 self.s3.upload_fileobj(file_obj, self.bucket, dst)
112 return dst
113
114 def download(self, filename):
115 key = filename
116 filename = filename.split("/").pop()
117 url = self.s3.generate_presigned_url(
118 "get_object",
119 Params={
120 "Bucket": self.bucket,
121 "Key": key,
122 "ResponseContentDisposition": "attachment; filename={}".format(
123 filename
124 ),
125 },
126 )
127 return redirect(url)
128
129 def delete(self, filename):
130 self.s3.delete_object(Bucket=self.bucket, Key=filename)
131 return True
132
133 def sync(self):
134 local_folder = current_app.config.get("UPLOAD_FOLDER")
135 # If the bucket is empty then Contents will not be in the response
136 bucket_list = self.s3.list_objects(Bucket=self.bucket).get("Contents", [])
137
138 for s3_key in bucket_list:
139 s3_object = s3_key["Key"]
140 # We don't want to download any directories
141 if s3_object.endswith("/") is False:
142 local_path = os.path.join(local_folder, s3_object)
143 directory = os.path.dirname(local_path)
144 if not os.path.exists(directory):
145 os.makedirs(directory)
146
147 self.s3.download_file(self.bucket, s3_object, local_path)
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/utils/uploads/uploaders.py b/CTFd/utils/uploads/uploaders.py
--- a/CTFd/utils/uploads/uploaders.py
+++ b/CTFd/utils/uploads/uploaders.py
@@ -1,4 +1,5 @@
import os
+import posixpath
import string
from shutil import copyfileobj
@@ -54,7 +55,7 @@
filename = secure_filename(filename)
md5hash = hexencode(os.urandom(16))
- file_path = os.path.join(md5hash, filename)
+ file_path = posixpath.join(md5hash, filename)
return self.store(file_obj, file_path)
| {"golden_diff": "diff --git a/CTFd/utils/uploads/uploaders.py b/CTFd/utils/uploads/uploaders.py\n--- a/CTFd/utils/uploads/uploaders.py\n+++ b/CTFd/utils/uploads/uploaders.py\n@@ -1,4 +1,5 @@\n import os\n+import posixpath\n import string\n from shutil import copyfileobj\n \n@@ -54,7 +55,7 @@\n \n filename = secure_filename(filename)\n md5hash = hexencode(os.urandom(16))\n- file_path = os.path.join(md5hash, filename)\n+ file_path = posixpath.join(md5hash, filename)\n \n return self.store(file_obj, file_path)\n", "issue": "Files not Downloadable\n**Environment**:\r\n\r\n - CTFd Version/Commit: 2.3.3\r\n - Operating System: Windows\r\n - Web Browser and Version: Chrome 80.0.3987.132\r\n\r\n**What happened?**\r\nWhen building a challenge that has a file, the file is not downloadable.\r\n\r\n**What did you expect to happen?**\r\nA file should be downloadable from the frontend.\r\n\r\n**How to reproduce your issue**\r\nCreate a fresh CTFd instance. Run serve.py. Create a challenge and upload a file. Navigate to the challenge on the front end. The file link is 404.\r\n\r\n**Any associated stack traces or error logs**\r\n```\r\nCTFd-Base> python .\\serve.py\r\n * Loaded module, <module 'CTFd.plugins.challenges' from 'C:\\Users\\StormCTF\\Downloads\\CTFd-Base\\CTFd\\plugins\\challenges\\__init__.pyc'>\r\n * Loaded module, <module 'CTFd.plugins.dynamic_challenges' from 'C:\\Users\\StormCTF\\Downloads\\CTFd-Base\\CTFd\\plugins\\dynamic_challenges\\__init__.pyc'>\r\n * Loaded module, <module 'CTFd.plugins.flags' from 'C:\\Users\\StormCTF\\Downloads\\CTFd-Base\\CTFd\\plugins\\flags\\__init__.pyc'>\r\n * Serving Flask app \"CTFd\" (lazy loading)\r\n * Environment: development\r\n * Debug mode: on\r\n * Restarting with stat\r\n * Loaded module, <module 'CTFd.plugins.challenges' from 'C:\\Users\\StormCTF\\Downloads\\CTFd-Base\\CTFd\\plugins\\challenges\\__init__.pyc'>\r\n * Loaded module, <module 'CTFd.plugins.dynamic_challenges' from 'C:\\Users\\StormCTF\\Downloads\\CTFd-Base\\CTFd\\plugins\\dynamic_challenges\\__init__.pyc'>\r\n * Loaded module, <module 'CTFd.plugins.flags' from 'C:\\Users\\StormCTF\\Downloads\\CTFd-Base\\CTFd\\plugins\\flags\\__init__.pyc'>\r\n * Debugger is active!\r\n * Debugger PIN: 310-671-378\r\n * Running on http://127.0.0.1:4000/ (Press CTRL+C to quit)\r\n127.0.0.1 - - [19/Apr/2020 12:40:42] \"GET /challenges HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:43] \"GET /themes/core/static/css/challenge-board.dev.css?d=a5654bce HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:43] \"GET /themes/core/static/css/main.dev.css?d=a5654bce HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:43] \"GET /themes/core/static/css/core.dev.css?d=a5654bce HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:43] \"GET /themes/core/static/js/vendor.bundle.dev.js?d=a5654bce HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:43] \"GET /themes/core/static/css/fonts.dev.css?d=a5654bce HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:43] \"GET /themes/core/static/js/core.dev.js?d=a5654bce HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:43] \"GET /themes/core/static/js/helpers.dev.js?d=a5654bce HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:43] \"GET /themes/core/static/js/pages/challenges.dev.js?d=a5654bce HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:43] \"GET /themes/core/static/img/favicon.ico?d=a5654bce HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:43] \"GET /api/v1/teams/me/solves HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:43] \"GET /api/v1/challenges HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:43] \"GET /api/v1/teams/me/solves HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:46] \"GET /plugins/challenges/assets/view.js?_=1587314443544 HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:46] \"GET /api/v1/challenges/1/solves HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:46] \"GET /api/v1/challenges/1 HTTP/1.1\" 200 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:50] \"GET /files/e57e22cdfa601592e5239c2d358994d6%5COrange-Icon-Black-BG.png?token=.eJyrVipJTcyNz0xRsjLSUSotTi2CsdMyc1LBbMNaAObrC-o.Xpx_Dg.vcuDUI8hJiIgPLExAPan7WVg4FU HTTP/1.1\" 404 -\r\n127.0.0.1 - - [19/Apr/2020 12:40:50] \"GET /themes/core/static/js/pages/main.dev.js?d=a5654bce HTTP/1.1\" 200 -```\r\n\n", "before_files": [{"content": "import os\nimport string\nfrom shutil import copyfileobj\n\nimport boto3\nfrom flask import current_app, redirect, send_file\nfrom flask.helpers import safe_join\nfrom werkzeug.utils import secure_filename\n\nfrom CTFd.utils import get_app_config\nfrom CTFd.utils.encoding import hexencode\n\n\nclass BaseUploader(object):\n def __init__(self):\n raise NotImplementedError\n\n def store(self, fileobj, filename):\n raise NotImplementedError\n\n def upload(self, file_obj, filename):\n raise NotImplementedError\n\n def download(self, filename):\n raise NotImplementedError\n\n def delete(self, filename):\n raise NotImplementedError\n\n def sync(self):\n raise NotImplementedError\n\n\nclass FilesystemUploader(BaseUploader):\n def __init__(self, base_path=None):\n super(BaseUploader, self).__init__()\n self.base_path = base_path or current_app.config.get(\"UPLOAD_FOLDER\")\n\n def store(self, fileobj, filename):\n location = os.path.join(self.base_path, filename)\n directory = os.path.dirname(location)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with open(location, \"wb\") as dst:\n copyfileobj(fileobj, dst, 16384)\n\n return filename\n\n def upload(self, file_obj, filename):\n if len(filename) == 0:\n raise Exception(\"Empty filenames cannot be used\")\n\n filename = secure_filename(filename)\n md5hash = hexencode(os.urandom(16))\n file_path = os.path.join(md5hash, filename)\n\n return self.store(file_obj, file_path)\n\n def download(self, filename):\n return send_file(safe_join(self.base_path, filename), as_attachment=True)\n\n def delete(self, filename):\n if os.path.exists(os.path.join(self.base_path, filename)):\n os.unlink(os.path.join(self.base_path, filename))\n return True\n return False\n\n def sync(self):\n pass\n\n\nclass S3Uploader(BaseUploader):\n def __init__(self):\n super(BaseUploader, self).__init__()\n self.s3 = self._get_s3_connection()\n self.bucket = get_app_config(\"AWS_S3_BUCKET\")\n\n def _get_s3_connection(self):\n access_key = get_app_config(\"AWS_ACCESS_KEY_ID\")\n secret_key = get_app_config(\"AWS_SECRET_ACCESS_KEY\")\n endpoint = get_app_config(\"AWS_S3_ENDPOINT_URL\")\n client = boto3.client(\n \"s3\",\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n endpoint_url=endpoint,\n )\n return client\n\n def _clean_filename(self, c):\n if c in string.ascii_letters + string.digits + \"-\" + \"_\" + \".\":\n return True\n\n def store(self, fileobj, filename):\n self.s3.upload_fileobj(fileobj, self.bucket, filename)\n return filename\n\n def upload(self, file_obj, filename):\n filename = filter(\n self._clean_filename, secure_filename(filename).replace(\" \", \"_\")\n )\n filename = \"\".join(filename)\n if len(filename) <= 0:\n return False\n\n md5hash = hexencode(os.urandom(16))\n\n dst = md5hash + \"/\" + filename\n self.s3.upload_fileobj(file_obj, self.bucket, dst)\n return dst\n\n def download(self, filename):\n key = filename\n filename = filename.split(\"/\").pop()\n url = self.s3.generate_presigned_url(\n \"get_object\",\n Params={\n \"Bucket\": self.bucket,\n \"Key\": key,\n \"ResponseContentDisposition\": \"attachment; filename={}\".format(\n filename\n ),\n },\n )\n return redirect(url)\n\n def delete(self, filename):\n self.s3.delete_object(Bucket=self.bucket, Key=filename)\n return True\n\n def sync(self):\n local_folder = current_app.config.get(\"UPLOAD_FOLDER\")\n # If the bucket is empty then Contents will not be in the response\n bucket_list = self.s3.list_objects(Bucket=self.bucket).get(\"Contents\", [])\n\n for s3_key in bucket_list:\n s3_object = s3_key[\"Key\"]\n # We don't want to download any directories\n if s3_object.endswith(\"/\") is False:\n local_path = os.path.join(local_folder, s3_object)\n directory = os.path.dirname(local_path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n self.s3.download_file(self.bucket, s3_object, local_path)\n", "path": "CTFd/utils/uploads/uploaders.py"}], "after_files": [{"content": "import os\nimport posixpath\nimport string\nfrom shutil import copyfileobj\n\nimport boto3\nfrom flask import current_app, redirect, send_file\nfrom flask.helpers import safe_join\nfrom werkzeug.utils import secure_filename\n\nfrom CTFd.utils import get_app_config\nfrom CTFd.utils.encoding import hexencode\n\n\nclass BaseUploader(object):\n def __init__(self):\n raise NotImplementedError\n\n def store(self, fileobj, filename):\n raise NotImplementedError\n\n def upload(self, file_obj, filename):\n raise NotImplementedError\n\n def download(self, filename):\n raise NotImplementedError\n\n def delete(self, filename):\n raise NotImplementedError\n\n def sync(self):\n raise NotImplementedError\n\n\nclass FilesystemUploader(BaseUploader):\n def __init__(self, base_path=None):\n super(BaseUploader, self).__init__()\n self.base_path = base_path or current_app.config.get(\"UPLOAD_FOLDER\")\n\n def store(self, fileobj, filename):\n location = os.path.join(self.base_path, filename)\n directory = os.path.dirname(location)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with open(location, \"wb\") as dst:\n copyfileobj(fileobj, dst, 16384)\n\n return filename\n\n def upload(self, file_obj, filename):\n if len(filename) == 0:\n raise Exception(\"Empty filenames cannot be used\")\n\n filename = secure_filename(filename)\n md5hash = hexencode(os.urandom(16))\n file_path = posixpath.join(md5hash, filename)\n\n return self.store(file_obj, file_path)\n\n def download(self, filename):\n return send_file(safe_join(self.base_path, filename), as_attachment=True)\n\n def delete(self, filename):\n if os.path.exists(os.path.join(self.base_path, filename)):\n os.unlink(os.path.join(self.base_path, filename))\n return True\n return False\n\n def sync(self):\n pass\n\n\nclass S3Uploader(BaseUploader):\n def __init__(self):\n super(BaseUploader, self).__init__()\n self.s3 = self._get_s3_connection()\n self.bucket = get_app_config(\"AWS_S3_BUCKET\")\n\n def _get_s3_connection(self):\n access_key = get_app_config(\"AWS_ACCESS_KEY_ID\")\n secret_key = get_app_config(\"AWS_SECRET_ACCESS_KEY\")\n endpoint = get_app_config(\"AWS_S3_ENDPOINT_URL\")\n client = boto3.client(\n \"s3\",\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n endpoint_url=endpoint,\n )\n return client\n\n def _clean_filename(self, c):\n if c in string.ascii_letters + string.digits + \"-\" + \"_\" + \".\":\n return True\n\n def store(self, fileobj, filename):\n self.s3.upload_fileobj(fileobj, self.bucket, filename)\n return filename\n\n def upload(self, file_obj, filename):\n filename = filter(\n self._clean_filename, secure_filename(filename).replace(\" \", \"_\")\n )\n filename = \"\".join(filename)\n if len(filename) <= 0:\n return False\n\n md5hash = hexencode(os.urandom(16))\n\n dst = md5hash + \"/\" + filename\n self.s3.upload_fileobj(file_obj, self.bucket, dst)\n return dst\n\n def download(self, filename):\n key = filename\n filename = filename.split(\"/\").pop()\n url = self.s3.generate_presigned_url(\n \"get_object\",\n Params={\n \"Bucket\": self.bucket,\n \"Key\": key,\n \"ResponseContentDisposition\": \"attachment; filename={}\".format(\n filename\n ),\n },\n )\n return redirect(url)\n\n def delete(self, filename):\n self.s3.delete_object(Bucket=self.bucket, Key=filename)\n return True\n\n def sync(self):\n local_folder = current_app.config.get(\"UPLOAD_FOLDER\")\n # If the bucket is empty then Contents will not be in the response\n bucket_list = self.s3.list_objects(Bucket=self.bucket).get(\"Contents\", [])\n\n for s3_key in bucket_list:\n s3_object = s3_key[\"Key\"]\n # We don't want to download any directories\n if s3_object.endswith(\"/\") is False:\n local_path = os.path.join(local_folder, s3_object)\n directory = os.path.dirname(local_path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n self.s3.download_file(self.bucket, s3_object, local_path)\n", "path": "CTFd/utils/uploads/uploaders.py"}]} | 3,235 | 145 |
gh_patches_debug_8108 | rasdani/github-patches | git_diff | common-workflow-language__cwltool-792 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cwltool fails to run with an input of record and an InlineJavascriptRequirement
`test.cwl`
```
cwlVersion: v1.0
class: CommandLineTool
requirements:
- class: InlineJavascriptRequirement
baseCommand: ['echo']
inputs:
- id: input
type:
type: record
fields: []
outputs: []
```
fails with:
```
/Users/th10/checkouts/cwltool/venv/bin/cwltool 1.0.20180611202326
Resolved 'test.cwl' to 'file:///Users/th10/checkouts/cwltool/test.cwl'
I'm sorry, I couldn't load this CWL file. The error was:
Traceback (most recent call last):
File "/Users/th10/checkouts/cwltool/cwltool/main.py", line 527, in main
job_script_provider, make_tool_kwds)
File "/Users/th10/checkouts/cwltool/cwltool/load_tool.py", line 351, in make_tool
job_script_provider=job_script_provider, **kwargs)
File "/Users/th10/checkouts/cwltool/cwltool/workflow.py", line 51, in default_make_tool
force_docker_pull, job_script_provider, **kwargs)
File "/Users/th10/checkouts/cwltool/cwltool/command_line_tool.py", line 224, in __init__
force_docker_pull, job_script_provider, **kwargs)
File "/Users/th10/checkouts/cwltool/cwltool/process.py", line 568, in __init__
validate_js_expressions(cast(CommentedMap, toolpath_object), self.doc_schema.names[toolpath_object["class"]], validate_js_options)
File "/Users/th10/checkouts/cwltool/cwltool/validate_js.py", line 181, in validate_js_expressions
expressions = get_expressions(tool, schema)
File "/Users/th10/checkouts/cwltool/cwltool/validate_js.py", line 76, in get_expressions
SourceLine(tool, schema_field.name)
File "/Users/th10/checkouts/cwltool/cwltool/validate_js.py", line 62, in get_expressions
map(lambda x: get_expressions(x[1], schema.items, SourceLine(tool, x[0])), enumerate(tool)) # type: ignore # https://github.com/python/mypy/issues/4679
File "/Users/th10/checkouts/cwltool/cwltool/validate_js.py", line 62, in <lambda>
map(lambda x: get_expressions(x[1], schema.items, SourceLine(tool, x[0])), enumerate(tool)) # type: ignore # https://github.com/python/mypy/issues/4679
File "/Users/th10/checkouts/cwltool/cwltool/validate_js.py", line 76, in get_expressions
SourceLine(tool, schema_field.name)
File "/Users/th10/checkouts/cwltool/cwltool/validate_js.py", line 55, in get_expressions
assert valid_schema is not None
AssertionError
```
as https://github.com/common-workflow-language/cwltool/blob/886a6ac41c685f20d39e352f9c657e59f3312265/cwltool/validate_js.py#L51-L52 is parsing using strict validation and earlier parts of the code are setting the "name" attribute of records and arrays. This will also produce an error when someone is not strict validating a CWL script and a CWL warning would appear. A quick fix for this would be to get rid of the `strict=True` parameter.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cwltool/validate_js.py`
Content:
```
1 import copy
2 import itertools
3 import json
4 import logging
5 from collections import namedtuple
6 from typing import (Any, Dict, List, # pylint: disable=unused-import
7 Optional, Text, Tuple, Union)
8
9 import avro.schema # always import after schema_salad, never before
10 from pkg_resources import resource_stream
11 from ruamel.yaml.comments import CommentedMap # pylint: disable=unused-import
12 from schema_salad.sourceline import SourceLine
13 from schema_salad.validate import (Schema, # pylint: disable=unused-import
14 ValidationException, validate_ex)
15 from six import string_types
16
17 from .expression import scanner as scan_expression
18 from .loghandler import _logger
19 from .sandboxjs import code_fragment_to_js, exec_js_process
20 from .utils import json_dumps
21
22 def is_expression(tool, schema):
23 # type: (Union[CommentedMap, Any], Optional[Schema]) -> bool
24 return isinstance(schema, avro.schema.EnumSchema) \
25 and schema.name == "Expression" and isinstance(tool, string_types)
26
27 class SuppressLog(logging.Filter):
28 def __init__(self, name): # type: (Text) -> None
29 name = str(name)
30 super(SuppressLog, self).__init__(name)
31
32 def filter(self, record):
33 return False
34
35
36 _logger_validation_warnings = logging.getLogger("cwltool.validation_warnings")
37 _logger_validation_warnings.addFilter(SuppressLog("cwltool.validation_warnings"))
38
39 def get_expressions(tool, # type: Union[CommentedMap, Any]
40 schema, # type: Optional[avro.schema.Schema]
41 source_line=None # type: Optional[SourceLine]
42 ): # type: (...) -> List[Tuple[Text, Optional[SourceLine]]]
43 if is_expression(tool, schema):
44 return [(tool, source_line)]
45 elif isinstance(schema, avro.schema.UnionSchema):
46 valid_schema = None
47
48 for possible_schema in schema.schemas:
49 if is_expression(tool, possible_schema):
50 return [(tool, source_line)]
51 elif validate_ex(possible_schema, tool, strict=True, raise_ex=False,
52 logger=_logger_validation_warnings):
53 valid_schema = possible_schema
54
55 return get_expressions(tool, valid_schema, source_line)
56 elif isinstance(schema, avro.schema.ArraySchema):
57 if not isinstance(tool, list):
58 return []
59
60 return list(itertools.chain(*
61 map(lambda x: get_expressions(x[1], schema.items, SourceLine(tool, x[0])), enumerate(tool)) # type: ignore # https://github.com/python/mypy/issues/4679
62 ))
63
64 elif isinstance(schema, avro.schema.RecordSchema):
65 if not isinstance(tool, Dict):
66 return []
67
68 expression_nodes = []
69
70 for schema_field in schema.fields:
71 if schema_field.name in tool:
72 expression_nodes.extend(get_expressions(
73 tool[schema_field.name],
74 schema_field.type,
75 SourceLine(tool, schema_field.name)
76 ))
77
78 return expression_nodes
79 else:
80 return []
81
82
83 JSHintJSReturn = namedtuple("jshint_return", ["errors", "globals"])
84
85 def jshint_js(js_text, globals=None, options=None):
86 # type: (Text, List[Text], Dict) -> Tuple[List[Text], List[Text]]
87 if globals is None:
88 globals = []
89 if options is None:
90 options = {
91 "includewarnings": [
92 "W117", # <VARIABLE> not defined
93 "W104", "W119" # using ES6 features
94 ],
95 "strict": "implied",
96 "esversion": 5
97 }
98
99 with resource_stream(__name__, "jshint/jshint.js") as file:
100 # NOTE: we need a global variable for lodash (which jshint depends on)
101 jshint_functions_text = "var global = this;" + file.read().decode('utf-8')
102
103 with resource_stream(__name__, "jshint/jshint_wrapper.js") as file:
104 # NOTE: we need to assign to ob, as the expression {validateJS: validateJS} as an expression
105 # is interpreted as a block with a label `validateJS`
106 jshint_functions_text += "\n" + file.read().decode('utf-8') + "\nvar ob = {validateJS: validateJS}; ob"
107
108 returncode, stdout, stderr = exec_js_process(
109 "validateJS(%s)" % json_dumps({
110 "code": js_text,
111 "options": options,
112 "globals": globals
113 }),
114 timeout=30,
115 context=jshint_functions_text
116 )
117
118 def dump_jshint_error():
119 # type: () -> None
120 raise RuntimeError("jshint failed to run succesfully\nreturncode: %d\nstdout: \"%s\"\nstderr: \"%s\"" % (
121 returncode,
122 stdout,
123 stderr
124 ))
125
126 if returncode == -1:
127 _logger.warn("jshint process timed out")
128
129 if returncode != 0:
130 dump_jshint_error()
131
132 try:
133 jshint_json = json.loads(stdout)
134 except ValueError:
135 dump_jshint_error()
136
137 jshint_errors = [] # type: List[Text]
138
139 js_text_lines = js_text.split("\n")
140
141 for jshint_error_obj in jshint_json.get("errors", []):
142 text = u"JSHINT: " + js_text_lines[jshint_error_obj["line"] - 1] + "\n"
143 text += u"JSHINT: " + " " * (jshint_error_obj["character"] - 1) + "^\n"
144 text += u"JSHINT: %s: %s" % (jshint_error_obj["code"], jshint_error_obj["reason"])
145 jshint_errors.append(text)
146
147 return JSHintJSReturn(jshint_errors, jshint_json.get("globals", []))
148
149
150 def print_js_hint_messages(js_hint_messages, source_line):
151 # type: (List[Text], Optional[SourceLine]) -> None
152 if source_line:
153 for js_hint_message in js_hint_messages:
154 _logger.warn(source_line.makeError(js_hint_message))
155
156 def validate_js_expressions(tool, schema, jshint_options=None):
157 # type: (CommentedMap, Schema, Dict) -> None
158
159 if tool.get("requirements") is None:
160 return
161
162 requirements = tool["requirements"]
163
164 default_globals = [u"self", u"inputs", u"runtime", u"console"]
165
166 for i, prop in enumerate(reversed(requirements)):
167 if prop["class"] == "InlineJavascriptRequirement":
168 expression_lib = prop.get("expressionLib", [])
169 break
170 else:
171 return
172
173 js_globals = copy.deepcopy(default_globals)
174
175 for i, expression_lib_line in enumerate(expression_lib):
176 expression_lib_line_errors, expression_lib_line_globals = jshint_js(expression_lib_line, js_globals, jshint_options)
177 js_globals.extend(expression_lib_line_globals)
178 print_js_hint_messages(expression_lib_line_errors, SourceLine(expression_lib, i))
179
180 expressions = get_expressions(tool, schema)
181
182 for expression, source_line in expressions:
183 unscanned_str = expression.strip()
184 scan_slice = scan_expression(unscanned_str)
185
186 while scan_slice:
187 if unscanned_str[scan_slice[0]] == '$':
188 code_fragment = unscanned_str[scan_slice[0] + 1:scan_slice[1]]
189 code_fragment_js = code_fragment_to_js(code_fragment, "")
190 expression_errors, _ = jshint_js(code_fragment_js, js_globals, jshint_options)
191 print_js_hint_messages(expression_errors, source_line)
192
193 unscanned_str = unscanned_str[scan_slice[1]:]
194 scan_slice = scan_expression(unscanned_str)
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cwltool/validate_js.py b/cwltool/validate_js.py
--- a/cwltool/validate_js.py
+++ b/cwltool/validate_js.py
@@ -48,7 +48,7 @@
for possible_schema in schema.schemas:
if is_expression(tool, possible_schema):
return [(tool, source_line)]
- elif validate_ex(possible_schema, tool, strict=True, raise_ex=False,
+ elif validate_ex(possible_schema, tool, raise_ex=False,
logger=_logger_validation_warnings):
valid_schema = possible_schema
| {"golden_diff": "diff --git a/cwltool/validate_js.py b/cwltool/validate_js.py\n--- a/cwltool/validate_js.py\n+++ b/cwltool/validate_js.py\n@@ -48,7 +48,7 @@\n for possible_schema in schema.schemas:\n if is_expression(tool, possible_schema):\n return [(tool, source_line)]\n- elif validate_ex(possible_schema, tool, strict=True, raise_ex=False,\n+ elif validate_ex(possible_schema, tool, raise_ex=False,\n logger=_logger_validation_warnings):\n valid_schema = possible_schema\n", "issue": "cwltool fails to run with an input of record and an InlineJavascriptRequirement\n`test.cwl`\r\n```\r\ncwlVersion: v1.0\r\nclass: CommandLineTool\r\nrequirements:\r\n - class: InlineJavascriptRequirement\r\nbaseCommand: ['echo']\r\n\r\ninputs:\r\n - id: input\r\n type:\r\n type: record\r\n fields: []\r\n\r\noutputs: []\r\n```\r\nfails with:\r\n```\r\n/Users/th10/checkouts/cwltool/venv/bin/cwltool 1.0.20180611202326\r\nResolved 'test.cwl' to 'file:///Users/th10/checkouts/cwltool/test.cwl'\r\nI'm sorry, I couldn't load this CWL file. The error was:\r\nTraceback (most recent call last):\r\n File \"/Users/th10/checkouts/cwltool/cwltool/main.py\", line 527, in main\r\n job_script_provider, make_tool_kwds)\r\n File \"/Users/th10/checkouts/cwltool/cwltool/load_tool.py\", line 351, in make_tool\r\n job_script_provider=job_script_provider, **kwargs)\r\n File \"/Users/th10/checkouts/cwltool/cwltool/workflow.py\", line 51, in default_make_tool\r\n force_docker_pull, job_script_provider, **kwargs)\r\n File \"/Users/th10/checkouts/cwltool/cwltool/command_line_tool.py\", line 224, in __init__\r\n force_docker_pull, job_script_provider, **kwargs)\r\n File \"/Users/th10/checkouts/cwltool/cwltool/process.py\", line 568, in __init__\r\n validate_js_expressions(cast(CommentedMap, toolpath_object), self.doc_schema.names[toolpath_object[\"class\"]], validate_js_options)\r\n File \"/Users/th10/checkouts/cwltool/cwltool/validate_js.py\", line 181, in validate_js_expressions\r\n expressions = get_expressions(tool, schema)\r\n File \"/Users/th10/checkouts/cwltool/cwltool/validate_js.py\", line 76, in get_expressions\r\n SourceLine(tool, schema_field.name)\r\n File \"/Users/th10/checkouts/cwltool/cwltool/validate_js.py\", line 62, in get_expressions\r\n map(lambda x: get_expressions(x[1], schema.items, SourceLine(tool, x[0])), enumerate(tool)) # type: ignore # https://github.com/python/mypy/issues/4679\r\n File \"/Users/th10/checkouts/cwltool/cwltool/validate_js.py\", line 62, in <lambda>\r\n map(lambda x: get_expressions(x[1], schema.items, SourceLine(tool, x[0])), enumerate(tool)) # type: ignore # https://github.com/python/mypy/issues/4679\r\n File \"/Users/th10/checkouts/cwltool/cwltool/validate_js.py\", line 76, in get_expressions\r\n SourceLine(tool, schema_field.name)\r\n File \"/Users/th10/checkouts/cwltool/cwltool/validate_js.py\", line 55, in get_expressions\r\n assert valid_schema is not None\r\nAssertionError\r\n```\r\nas https://github.com/common-workflow-language/cwltool/blob/886a6ac41c685f20d39e352f9c657e59f3312265/cwltool/validate_js.py#L51-L52 is parsing using strict validation and earlier parts of the code are setting the \"name\" attribute of records and arrays. This will also produce an error when someone is not strict validating a CWL script and a CWL warning would appear. A quick fix for this would be to get rid of the `strict=True` parameter.\n", "before_files": [{"content": "import copy\nimport itertools\nimport json\nimport logging\nfrom collections import namedtuple\nfrom typing import (Any, Dict, List, # pylint: disable=unused-import\n Optional, Text, Tuple, Union)\n\nimport avro.schema # always import after schema_salad, never before\nfrom pkg_resources import resource_stream\nfrom ruamel.yaml.comments import CommentedMap # pylint: disable=unused-import\nfrom schema_salad.sourceline import SourceLine\nfrom schema_salad.validate import (Schema, # pylint: disable=unused-import\n ValidationException, validate_ex)\nfrom six import string_types\n\nfrom .expression import scanner as scan_expression\nfrom .loghandler import _logger\nfrom .sandboxjs import code_fragment_to_js, exec_js_process\nfrom .utils import json_dumps\n\ndef is_expression(tool, schema):\n # type: (Union[CommentedMap, Any], Optional[Schema]) -> bool\n return isinstance(schema, avro.schema.EnumSchema) \\\n and schema.name == \"Expression\" and isinstance(tool, string_types)\n\nclass SuppressLog(logging.Filter):\n def __init__(self, name): # type: (Text) -> None\n name = str(name)\n super(SuppressLog, self).__init__(name)\n\n def filter(self, record):\n return False\n\n\n_logger_validation_warnings = logging.getLogger(\"cwltool.validation_warnings\")\n_logger_validation_warnings.addFilter(SuppressLog(\"cwltool.validation_warnings\"))\n\ndef get_expressions(tool, # type: Union[CommentedMap, Any]\n schema, # type: Optional[avro.schema.Schema]\n source_line=None # type: Optional[SourceLine]\n ): # type: (...) -> List[Tuple[Text, Optional[SourceLine]]]\n if is_expression(tool, schema):\n return [(tool, source_line)]\n elif isinstance(schema, avro.schema.UnionSchema):\n valid_schema = None\n\n for possible_schema in schema.schemas:\n if is_expression(tool, possible_schema):\n return [(tool, source_line)]\n elif validate_ex(possible_schema, tool, strict=True, raise_ex=False,\n logger=_logger_validation_warnings):\n valid_schema = possible_schema\n\n return get_expressions(tool, valid_schema, source_line)\n elif isinstance(schema, avro.schema.ArraySchema):\n if not isinstance(tool, list):\n return []\n\n return list(itertools.chain(*\n map(lambda x: get_expressions(x[1], schema.items, SourceLine(tool, x[0])), enumerate(tool)) # type: ignore # https://github.com/python/mypy/issues/4679\n ))\n\n elif isinstance(schema, avro.schema.RecordSchema):\n if not isinstance(tool, Dict):\n return []\n\n expression_nodes = []\n\n for schema_field in schema.fields:\n if schema_field.name in tool:\n expression_nodes.extend(get_expressions(\n tool[schema_field.name],\n schema_field.type,\n SourceLine(tool, schema_field.name)\n ))\n\n return expression_nodes\n else:\n return []\n\n\nJSHintJSReturn = namedtuple(\"jshint_return\", [\"errors\", \"globals\"])\n\ndef jshint_js(js_text, globals=None, options=None):\n # type: (Text, List[Text], Dict) -> Tuple[List[Text], List[Text]]\n if globals is None:\n globals = []\n if options is None:\n options = {\n \"includewarnings\": [\n \"W117\", # <VARIABLE> not defined\n \"W104\", \"W119\" # using ES6 features\n ],\n \"strict\": \"implied\",\n \"esversion\": 5\n }\n\n with resource_stream(__name__, \"jshint/jshint.js\") as file:\n # NOTE: we need a global variable for lodash (which jshint depends on)\n jshint_functions_text = \"var global = this;\" + file.read().decode('utf-8')\n\n with resource_stream(__name__, \"jshint/jshint_wrapper.js\") as file:\n # NOTE: we need to assign to ob, as the expression {validateJS: validateJS} as an expression\n # is interpreted as a block with a label `validateJS`\n jshint_functions_text += \"\\n\" + file.read().decode('utf-8') + \"\\nvar ob = {validateJS: validateJS}; ob\"\n\n returncode, stdout, stderr = exec_js_process(\n \"validateJS(%s)\" % json_dumps({\n \"code\": js_text,\n \"options\": options,\n \"globals\": globals\n }),\n timeout=30,\n context=jshint_functions_text\n )\n\n def dump_jshint_error():\n # type: () -> None\n raise RuntimeError(\"jshint failed to run succesfully\\nreturncode: %d\\nstdout: \\\"%s\\\"\\nstderr: \\\"%s\\\"\" % (\n returncode,\n stdout,\n stderr\n ))\n\n if returncode == -1:\n _logger.warn(\"jshint process timed out\")\n\n if returncode != 0:\n dump_jshint_error()\n\n try:\n jshint_json = json.loads(stdout)\n except ValueError:\n dump_jshint_error()\n\n jshint_errors = [] # type: List[Text]\n\n js_text_lines = js_text.split(\"\\n\")\n\n for jshint_error_obj in jshint_json.get(\"errors\", []):\n text = u\"JSHINT: \" + js_text_lines[jshint_error_obj[\"line\"] - 1] + \"\\n\"\n text += u\"JSHINT: \" + \" \" * (jshint_error_obj[\"character\"] - 1) + \"^\\n\"\n text += u\"JSHINT: %s: %s\" % (jshint_error_obj[\"code\"], jshint_error_obj[\"reason\"])\n jshint_errors.append(text)\n\n return JSHintJSReturn(jshint_errors, jshint_json.get(\"globals\", []))\n\n\ndef print_js_hint_messages(js_hint_messages, source_line):\n # type: (List[Text], Optional[SourceLine]) -> None\n if source_line:\n for js_hint_message in js_hint_messages:\n _logger.warn(source_line.makeError(js_hint_message))\n\ndef validate_js_expressions(tool, schema, jshint_options=None):\n # type: (CommentedMap, Schema, Dict) -> None\n\n if tool.get(\"requirements\") is None:\n return\n\n requirements = tool[\"requirements\"]\n\n default_globals = [u\"self\", u\"inputs\", u\"runtime\", u\"console\"]\n\n for i, prop in enumerate(reversed(requirements)):\n if prop[\"class\"] == \"InlineJavascriptRequirement\":\n expression_lib = prop.get(\"expressionLib\", [])\n break\n else:\n return\n\n js_globals = copy.deepcopy(default_globals)\n\n for i, expression_lib_line in enumerate(expression_lib):\n expression_lib_line_errors, expression_lib_line_globals = jshint_js(expression_lib_line, js_globals, jshint_options)\n js_globals.extend(expression_lib_line_globals)\n print_js_hint_messages(expression_lib_line_errors, SourceLine(expression_lib, i))\n\n expressions = get_expressions(tool, schema)\n\n for expression, source_line in expressions:\n unscanned_str = expression.strip()\n scan_slice = scan_expression(unscanned_str)\n\n while scan_slice:\n if unscanned_str[scan_slice[0]] == '$':\n code_fragment = unscanned_str[scan_slice[0] + 1:scan_slice[1]]\n code_fragment_js = code_fragment_to_js(code_fragment, \"\")\n expression_errors, _ = jshint_js(code_fragment_js, js_globals, jshint_options)\n print_js_hint_messages(expression_errors, source_line)\n\n unscanned_str = unscanned_str[scan_slice[1]:]\n scan_slice = scan_expression(unscanned_str)\n", "path": "cwltool/validate_js.py"}], "after_files": [{"content": "import copy\nimport itertools\nimport json\nimport logging\nfrom collections import namedtuple\nfrom typing import (Any, Dict, List, # pylint: disable=unused-import\n Optional, Text, Tuple, Union)\n\nimport avro.schema # always import after schema_salad, never before\nfrom pkg_resources import resource_stream\nfrom ruamel.yaml.comments import CommentedMap # pylint: disable=unused-import\nfrom schema_salad.sourceline import SourceLine\nfrom schema_salad.validate import (Schema, # pylint: disable=unused-import\n ValidationException, validate_ex)\nfrom six import string_types\n\nfrom .expression import scanner as scan_expression\nfrom .loghandler import _logger\nfrom .sandboxjs import code_fragment_to_js, exec_js_process\nfrom .utils import json_dumps\n\ndef is_expression(tool, schema):\n # type: (Union[CommentedMap, Any], Optional[Schema]) -> bool\n return isinstance(schema, avro.schema.EnumSchema) \\\n and schema.name == \"Expression\" and isinstance(tool, string_types)\n\nclass SuppressLog(logging.Filter):\n def __init__(self, name): # type: (Text) -> None\n name = str(name)\n super(SuppressLog, self).__init__(name)\n\n def filter(self, record):\n return False\n\n\n_logger_validation_warnings = logging.getLogger(\"cwltool.validation_warnings\")\n_logger_validation_warnings.addFilter(SuppressLog(\"cwltool.validation_warnings\"))\n\ndef get_expressions(tool, # type: Union[CommentedMap, Any]\n schema, # type: Optional[avro.schema.Schema]\n source_line=None # type: Optional[SourceLine]\n ): # type: (...) -> List[Tuple[Text, Optional[SourceLine]]]\n if is_expression(tool, schema):\n return [(tool, source_line)]\n elif isinstance(schema, avro.schema.UnionSchema):\n valid_schema = None\n\n for possible_schema in schema.schemas:\n if is_expression(tool, possible_schema):\n return [(tool, source_line)]\n elif validate_ex(possible_schema, tool, raise_ex=False,\n logger=_logger_validation_warnings):\n valid_schema = possible_schema\n\n return get_expressions(tool, valid_schema, source_line)\n elif isinstance(schema, avro.schema.ArraySchema):\n if not isinstance(tool, list):\n return []\n\n return list(itertools.chain(*\n map(lambda x: get_expressions(x[1], schema.items, SourceLine(tool, x[0])), enumerate(tool)) # type: ignore # https://github.com/python/mypy/issues/4679\n ))\n\n elif isinstance(schema, avro.schema.RecordSchema):\n if not isinstance(tool, Dict):\n return []\n\n expression_nodes = []\n\n for schema_field in schema.fields:\n if schema_field.name in tool:\n expression_nodes.extend(get_expressions(\n tool[schema_field.name],\n schema_field.type,\n SourceLine(tool, schema_field.name)\n ))\n\n return expression_nodes\n else:\n return []\n\n\nJSHintJSReturn = namedtuple(\"jshint_return\", [\"errors\", \"globals\"])\n\ndef jshint_js(js_text, globals=None, options=None):\n # type: (Text, List[Text], Dict) -> Tuple[List[Text], List[Text]]\n if globals is None:\n globals = []\n if options is None:\n options = {\n \"includewarnings\": [\n \"W117\", # <VARIABLE> not defined\n \"W104\", \"W119\" # using ES6 features\n ],\n \"strict\": \"implied\",\n \"esversion\": 5\n }\n\n with resource_stream(__name__, \"jshint/jshint.js\") as file:\n # NOTE: we need a global variable for lodash (which jshint depends on)\n jshint_functions_text = \"var global = this;\" + file.read().decode('utf-8')\n\n with resource_stream(__name__, \"jshint/jshint_wrapper.js\") as file:\n # NOTE: we need to assign to ob, as the expression {validateJS: validateJS} as an expression\n # is interpreted as a block with a label `validateJS`\n jshint_functions_text += \"\\n\" + file.read().decode('utf-8') + \"\\nvar ob = {validateJS: validateJS}; ob\"\n\n returncode, stdout, stderr = exec_js_process(\n \"validateJS(%s)\" % json_dumps({\n \"code\": js_text,\n \"options\": options,\n \"globals\": globals\n }),\n timeout=30,\n context=jshint_functions_text\n )\n\n def dump_jshint_error():\n # type: () -> None\n raise RuntimeError(\"jshint failed to run succesfully\\nreturncode: %d\\nstdout: \\\"%s\\\"\\nstderr: \\\"%s\\\"\" % (\n returncode,\n stdout,\n stderr\n ))\n\n if returncode == -1:\n _logger.warn(\"jshint process timed out\")\n\n if returncode != 0:\n dump_jshint_error()\n\n try:\n jshint_json = json.loads(stdout)\n except ValueError:\n dump_jshint_error()\n\n jshint_errors = [] # type: List[Text]\n\n js_text_lines = js_text.split(\"\\n\")\n\n for jshint_error_obj in jshint_json.get(\"errors\", []):\n text = u\"JSHINT: \" + js_text_lines[jshint_error_obj[\"line\"] - 1] + \"\\n\"\n text += u\"JSHINT: \" + \" \" * (jshint_error_obj[\"character\"] - 1) + \"^\\n\"\n text += u\"JSHINT: %s: %s\" % (jshint_error_obj[\"code\"], jshint_error_obj[\"reason\"])\n jshint_errors.append(text)\n\n return JSHintJSReturn(jshint_errors, jshint_json.get(\"globals\", []))\n\n\ndef print_js_hint_messages(js_hint_messages, source_line):\n # type: (List[Text], Optional[SourceLine]) -> None\n if source_line:\n for js_hint_message in js_hint_messages:\n _logger.warn(source_line.makeError(js_hint_message))\n\ndef validate_js_expressions(tool, schema, jshint_options=None):\n # type: (CommentedMap, Schema, Dict) -> None\n\n if tool.get(\"requirements\") is None:\n return\n\n requirements = tool[\"requirements\"]\n\n default_globals = [u\"self\", u\"inputs\", u\"runtime\", u\"console\"]\n\n for i, prop in enumerate(reversed(requirements)):\n if prop[\"class\"] == \"InlineJavascriptRequirement\":\n expression_lib = prop.get(\"expressionLib\", [])\n break\n else:\n return\n\n js_globals = copy.deepcopy(default_globals)\n\n for i, expression_lib_line in enumerate(expression_lib):\n expression_lib_line_errors, expression_lib_line_globals = jshint_js(expression_lib_line, js_globals, jshint_options)\n js_globals.extend(expression_lib_line_globals)\n print_js_hint_messages(expression_lib_line_errors, SourceLine(expression_lib, i))\n\n expressions = get_expressions(tool, schema)\n\n for expression, source_line in expressions:\n unscanned_str = expression.strip()\n scan_slice = scan_expression(unscanned_str)\n\n while scan_slice:\n if unscanned_str[scan_slice[0]] == '$':\n code_fragment = unscanned_str[scan_slice[0] + 1:scan_slice[1]]\n code_fragment_js = code_fragment_to_js(code_fragment, \"\")\n expression_errors, _ = jshint_js(code_fragment_js, js_globals, jshint_options)\n print_js_hint_messages(expression_errors, source_line)\n\n unscanned_str = unscanned_str[scan_slice[1]:]\n scan_slice = scan_expression(unscanned_str)\n", "path": "cwltool/validate_js.py"}]} | 3,272 | 125 |
gh_patches_debug_41749 | rasdani/github-patches | git_diff | AnalogJ__lexicon-368 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rackspace provider not getting token from the identity API
_Update: solved! See my comment below._
I'm just starting out with lexicon and trying to get it working with Rackspace Cloud DNS. Requests are failing with a `requests.exceptions.HTTPError: 401 Client Error: Resource not found for validate token request` error. With debugging on, it appears lexicon is not requesting a token from `https://identity.api.rackspacecloud.com/v2.0/tokens` before it continues to submit a reqeuset to `https://dns.api.rackspacecloud.com/v1.0/…`.
Here's the command I'm running:
lexicon rackspace --log_level DEBUG --auth-username myusername --auth-api-key XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX list example.com A
And here's the output:
Arguments: Namespace(action='list', auth_account=None, auth_api_key='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX', auth_token=None, auth_username='myusername', content=None, delegated=None, domain='example.com', identifier=None, log_level='DEBUG', name=None, output='TABLE', priority=None, provider_name='rackspace', sleep_time=1, ttl=None, type='A')
Starting new HTTPS connection (1): dns.api.rackspacecloud.com:443
https://dns.api.rackspacecloud.com:443 "GET /v1.0/None/domains?name=example.com HTTP/1.1" 401 0
Traceback (most recent call last):
File "/Users/q/src/dns-lexicon/env/bin/lexicon", line 10, in <module>
sys.exit(main())
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/cli.py", line 117, in main
results = client.execute()
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/client.py", line 64, in execute
self.provider.authenticate()
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/providers/base.py", line 69, in authenticate
return self._authenticate()
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/providers/rackspace.py", line 66, in _authenticate
'name': self.domain
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/providers/base.py", line 142, in _get
return self._request('GET', url, query_params=query_params)
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/providers/rackspace.py", line 190, in _request
response.raise_for_status()
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/requests/models.py", line 940, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 401 Client Error: Resource not found for validate token request for url: https://dns.api.rackspacecloud.com/v1.0/None/domains?name=example.com
Of course, this request will fail if is no token has been received from the `identity.api.rackspacecloud.com` API endpoint.
I'm happy to help troubleshoot, but I thought I'd ask first if I'm using the correct parameters for the rackspace provider.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lexicon/providers/rackspace.py`
Content:
```
1 """Rackspace provider implementation"""
2 from __future__ import absolute_import
3 import json
4 import logging
5 import time
6
7 import requests
8 from lexicon.providers.base import Provider as BaseProvider
9
10
11 LOGGER = logging.getLogger(__name__)
12
13 NAMESERVER_DOMAINS = ['rackspacecloud.com']
14
15
16 def _async_request_completed(payload):
17 """Looks into an async response payload to see if the requested job has finished."""
18 if payload['status'] == 'COMPLETED':
19 return True
20 if payload['status'] == 'ERROR':
21 return True
22 return False
23
24
25 def provider_parser(subparser):
26 """Configure provider parser for Rackspace"""
27 subparser.add_argument(
28 "--auth-account", help="specify account number for authentication")
29 subparser.add_argument(
30 "--auth-username",
31 help="specify username for authentication. Only used if --auth-token is empty.")
32 subparser.add_argument(
33 "--auth-api-key",
34 help="specify api key for authentication. Only used if --auth-token is empty.")
35 subparser.add_argument(
36 "--auth-token",
37 help=("specify token for authentication. "
38 "If empty, the username and api key will be used to create a token."))
39 subparser.add_argument("--sleep-time", type=float, default=1,
40 help="number of seconds to wait between update requests.")
41
42
43 class Provider(BaseProvider):
44 """Provider class for Rackspace"""
45 def __init__(self, config):
46 super(Provider, self).__init__(config)
47 self.domain_id = None
48 self.api_endpoint = 'https://dns.api.rackspacecloud.com/v1.0'
49 self.auth_api_endpoint = 'https://identity.api.rackspacecloud.com/v2.0'
50 self._auth_token = None
51
52 def _authenticate(self):
53 self._auth_token = self._get_provider_option('auth_token')
54 if not self._auth_token:
55 auth_response = self._auth_request('POST', '/tokens', {
56 'auth': {
57 'RAX-KSKEY:apiKeyCredentials': {
58 'username': self._get_provider_option('auth_username'),
59 'apiKey': self._get_provider_option('auth_api_key')
60 }
61 }
62 })
63 self._auth_token = auth_response['access']['token']['id']
64
65 payload = self._get('/domains', {
66 'name': self.domain
67 })
68
69 if not payload['domains']:
70 raise Exception('No domain found')
71 if len(payload['domains']) > 1:
72 raise Exception('Too many domains found. This should not happen')
73
74 self.domain_id = payload['domains'][0]['id']
75
76 # Create record. If record already exists with the same content, do nothing'
77
78 def _create_record(self, rtype, name, content):
79 data = {'records': [
80 {'type': rtype, 'name': self._full_name(name), 'data': content}]}
81 if self._get_lexicon_option('ttl'):
82 data['records'][0]['ttl'] = self._get_lexicon_option('ttl')
83
84 try:
85 payload = self._post_and_wait(
86 '/domains/{0}/records'.format(self.domain_id), data)
87 except Exception as error:
88 if str(error).startswith('Record is a duplicate of another record'):
89 return self._update_record(None, rtype, name, content)
90 raise error
91
92 success = len(payload['records']) > 0
93 LOGGER.debug('create_record: %s', success)
94 return success
95
96 # List all records. Return an empty list if no records found
97 # type, name and content are used to filter records.
98 # If possible filter during the query, otherwise filter after response is received.
99 def _list_records(self, rtype=None, name=None, content=None):
100 params = {'per_page': 100}
101 if rtype:
102 params['type'] = rtype
103 if name:
104 params['name'] = self._full_name(name)
105 # Sending the data filter to the Rackspace DNS API results in a 503 error
106 # if content:
107 # params['data'] = content
108
109 payload = self._get(
110 '/domains/{0}/records'.format(self.domain_id), params)
111
112 records = list(payload['records'])
113 if content:
114 records = [
115 record for record in records if record['data'] == content]
116 records = [{
117 'type': record['type'],
118 'name': record['name'],
119 'ttl': record['ttl'],
120 'content': record['data'],
121 'id': record['id']
122 } for record in records]
123
124 LOGGER.debug('list_records: %s', records)
125 return records
126
127 # Create or update a record.
128 def _update_record(self, identifier, rtype=None, name=None, content=None):
129 data = {}
130 if rtype:
131 data['type'] = rtype
132 if name:
133 data['name'] = self._full_name(name)
134 if content:
135 data['data'] = content
136 if self._get_lexicon_option('ttl'):
137 data['ttl'] = self._get_lexicon_option('ttl')
138
139 if identifier is None:
140 records = self._list_records(rtype, name)
141 if not records:
142 raise Exception('Unable to find record to modify: ' + name)
143 identifier = records[0]['id']
144
145 self._put_and_wait(
146 '/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)
147
148 # If it didn't raise from the http status code, then we're good
149 LOGGER.debug('update_record: %s', identifier)
150 return True
151
152 # Delete an existing record.
153 # If record does not exist, do nothing.
154 def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
155 delete_record_id = []
156 if not identifier:
157 records = self._list_records(rtype, name, content)
158 delete_record_id = [record['id'] for record in records]
159 else:
160 delete_record_id.append(identifier)
161
162 LOGGER.debug('delete_records: %s', delete_record_id)
163
164 for record_id in delete_record_id:
165 self._delete_and_wait(
166 '/domains/{0}/records/{1}'.format(self.domain_id, record_id)
167 )
168
169 # If it didn't raise from the http status code, then we're good
170 success = True
171 LOGGER.debug('delete_record: %s', success)
172 return success
173
174 # Helpers
175
176 def _request(self, action='GET', url='/', data=None, query_params=None):
177 if data is None:
178 data = {}
179 if query_params is None:
180 query_params = {}
181 full_url = (self.api_endpoint +
182 '/{0}' + url).format(self._get_provider_option('auth_account'))
183 response = requests.request(action, full_url, params=query_params,
184 data=json.dumps(data),
185 headers={
186 'X-Auth-Token': self._get_provider_option('auth_token'),
187 'Content-Type': 'application/json'
188 })
189 # if the request fails for any reason, throw an error.
190 response.raise_for_status()
191 return response.json()
192
193 # Non-GET requests to the Rackspace CloudDNS API are asynchronous
194 def _request_and_wait(self, action='POST', url='/', data=None, query_params=None):
195 result = self._request(action, url, data, query_params)
196
197 sleep_time = self._get_provider_option('sleep_time') or '1'
198 sleep_time = float(sleep_time)
199
200 while not _async_request_completed(result):
201 if sleep_time:
202 time.sleep(sleep_time)
203 result = self._update_response(result)
204
205 if result['status'] == 'ERROR':
206 raise Exception(result['error']['details'])
207
208 if 'response' in result:
209 return result['response']
210 return None
211
212 def _post_and_wait(self, url='/', data=None, query_params=None):
213 return self._request_and_wait('POST', url, data, query_params)
214
215 def _put_and_wait(self, url='/', data=None, query_params=None):
216 return self._request_and_wait('PUT', url, data, query_params)
217
218 def _delete_and_wait(self, url='/', data=None, query_params=None):
219 return self._request_and_wait('DELETE', url, data, query_params)
220
221 def _update_response(self, payload):
222 response = requests.request('GET', payload['callbackUrl'], params={'showDetails': 'true'},
223 data={},
224 headers={
225 'X-Auth-Token': self._get_provider_option('auth_token'),
226 'Content-Type': 'application/json'})
227
228 # if the request fails for any reason, throw an error.
229 response.raise_for_status()
230 return response.json()
231
232 def _auth_request(self, action='GET', url='/', data=None, query_params=None):
233 if data is None:
234 data = {}
235
236 response = requests.request(action, self.auth_api_endpoint + url, params=query_params,
237 data=json.dumps(data),
238 headers={
239 'Content-Type': 'application/json'
240 })
241 # if the request fails for any reason, throw an error.
242 response.raise_for_status()
243 return response.json()
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lexicon/providers/rackspace.py b/lexicon/providers/rackspace.py
--- a/lexicon/providers/rackspace.py
+++ b/lexicon/providers/rackspace.py
@@ -48,6 +48,16 @@
self.api_endpoint = 'https://dns.api.rackspacecloud.com/v1.0'
self.auth_api_endpoint = 'https://identity.api.rackspacecloud.com/v2.0'
self._auth_token = None
+ self._auth_account = None
+
+ def _get_rackspace_option(self, key):
+ private_key = '_' + key
+ result = None
+ if hasattr(self, private_key):
+ result = getattr(self, private_key)
+ if result is None:
+ result = self._get_provider_option(key)
+ return result
def _authenticate(self):
self._auth_token = self._get_provider_option('auth_token')
@@ -61,6 +71,7 @@
}
})
self._auth_token = auth_response['access']['token']['id']
+ self._auth_account = auth_response['access']['token']['tenant']['id']
payload = self._get('/domains', {
'name': self.domain
@@ -178,12 +189,13 @@
data = {}
if query_params is None:
query_params = {}
+ LOGGER.debug('request tenant ID: %s', self._get_rackspace_option('auth_account'))
full_url = (self.api_endpoint +
- '/{0}' + url).format(self._get_provider_option('auth_account'))
+ '/{0}' + url).format(self._get_rackspace_option('auth_account'))
response = requests.request(action, full_url, params=query_params,
data=json.dumps(data),
headers={
- 'X-Auth-Token': self._get_provider_option('auth_token'),
+ 'X-Auth-Token': self._get_rackspace_option('auth_token'),
'Content-Type': 'application/json'
})
# if the request fails for any reason, throw an error.
@@ -194,7 +206,7 @@
def _request_and_wait(self, action='POST', url='/', data=None, query_params=None):
result = self._request(action, url, data, query_params)
- sleep_time = self._get_provider_option('sleep_time') or '1'
+ sleep_time = self._get_rackspace_option('sleep_time') or '1'
sleep_time = float(sleep_time)
while not _async_request_completed(result):
@@ -222,7 +234,7 @@
response = requests.request('GET', payload['callbackUrl'], params={'showDetails': 'true'},
data={},
headers={
- 'X-Auth-Token': self._get_provider_option('auth_token'),
+ 'X-Auth-Token': self._get_rackspace_option('auth_token'),
'Content-Type': 'application/json'})
# if the request fails for any reason, throw an error.
| {"golden_diff": "diff --git a/lexicon/providers/rackspace.py b/lexicon/providers/rackspace.py\n--- a/lexicon/providers/rackspace.py\n+++ b/lexicon/providers/rackspace.py\n@@ -48,6 +48,16 @@\n self.api_endpoint = 'https://dns.api.rackspacecloud.com/v1.0'\n self.auth_api_endpoint = 'https://identity.api.rackspacecloud.com/v2.0'\n self._auth_token = None\n+ self._auth_account = None\n+\n+ def _get_rackspace_option(self, key):\n+ private_key = '_' + key\n+ result = None\n+ if hasattr(self, private_key):\n+ result = getattr(self, private_key)\n+ if result is None:\n+ result = self._get_provider_option(key)\n+ return result\n \n def _authenticate(self):\n self._auth_token = self._get_provider_option('auth_token')\n@@ -61,6 +71,7 @@\n }\n })\n self._auth_token = auth_response['access']['token']['id']\n+ self._auth_account = auth_response['access']['token']['tenant']['id']\n \n payload = self._get('/domains', {\n 'name': self.domain\n@@ -178,12 +189,13 @@\n data = {}\n if query_params is None:\n query_params = {}\n+ LOGGER.debug('request tenant ID: %s', self._get_rackspace_option('auth_account'))\n full_url = (self.api_endpoint +\n- '/{0}' + url).format(self._get_provider_option('auth_account'))\n+ '/{0}' + url).format(self._get_rackspace_option('auth_account'))\n response = requests.request(action, full_url, params=query_params,\n data=json.dumps(data),\n headers={\n- 'X-Auth-Token': self._get_provider_option('auth_token'),\n+ 'X-Auth-Token': self._get_rackspace_option('auth_token'),\n 'Content-Type': 'application/json'\n })\n # if the request fails for any reason, throw an error.\n@@ -194,7 +206,7 @@\n def _request_and_wait(self, action='POST', url='/', data=None, query_params=None):\n result = self._request(action, url, data, query_params)\n \n- sleep_time = self._get_provider_option('sleep_time') or '1'\n+ sleep_time = self._get_rackspace_option('sleep_time') or '1'\n sleep_time = float(sleep_time)\n \n while not _async_request_completed(result):\n@@ -222,7 +234,7 @@\n response = requests.request('GET', payload['callbackUrl'], params={'showDetails': 'true'},\n data={},\n headers={\n- 'X-Auth-Token': self._get_provider_option('auth_token'),\n+ 'X-Auth-Token': self._get_rackspace_option('auth_token'),\n 'Content-Type': 'application/json'})\n \n # if the request fails for any reason, throw an error.\n", "issue": "Rackspace provider not getting token from the identity API\n_Update: solved! See my comment below._\r\n\r\nI'm just starting out with lexicon and trying to get it working with Rackspace Cloud DNS. Requests are failing with a `requests.exceptions.HTTPError: 401 Client Error: Resource not found for validate token request` error. With debugging on, it appears lexicon is not requesting a token from `https://identity.api.rackspacecloud.com/v2.0/tokens` before it continues to submit a reqeuset to `https://dns.api.rackspacecloud.com/v1.0/\u2026`.\r\n\r\nHere's the command I'm running:\r\n\r\n lexicon rackspace --log_level DEBUG --auth-username myusername --auth-api-key XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX list example.com A\r\n\r\nAnd here's the output:\r\n\r\n Arguments: Namespace(action='list', auth_account=None, auth_api_key='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX', auth_token=None, auth_username='myusername', content=None, delegated=None, domain='example.com', identifier=None, log_level='DEBUG', name=None, output='TABLE', priority=None, provider_name='rackspace', sleep_time=1, ttl=None, type='A')\r\n Starting new HTTPS connection (1): dns.api.rackspacecloud.com:443\r\n https://dns.api.rackspacecloud.com:443 \"GET /v1.0/None/domains?name=example.com HTTP/1.1\" 401 0\r\n Traceback (most recent call last):\r\n File \"/Users/q/src/dns-lexicon/env/bin/lexicon\", line 10, in <module>\r\n sys.exit(main())\r\n File \"/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/cli.py\", line 117, in main\r\n results = client.execute()\r\n File \"/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/client.py\", line 64, in execute\r\n self.provider.authenticate()\r\n File \"/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/providers/base.py\", line 69, in authenticate\r\n return self._authenticate()\r\n File \"/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/providers/rackspace.py\", line 66, in _authenticate\r\n 'name': self.domain\r\n File \"/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/providers/base.py\", line 142, in _get\r\n return self._request('GET', url, query_params=query_params)\r\n File \"/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/providers/rackspace.py\", line 190, in _request\r\n response.raise_for_status()\r\n File \"/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/requests/models.py\", line 940, in raise_for_status\r\n raise HTTPError(http_error_msg, response=self)\r\n requests.exceptions.HTTPError: 401 Client Error: Resource not found for validate token request for url: https://dns.api.rackspacecloud.com/v1.0/None/domains?name=example.com\r\n\r\nOf course, this request will fail if is no token has been received from the `identity.api.rackspacecloud.com` API endpoint.\r\n\r\nI'm happy to help troubleshoot, but I thought I'd ask first if I'm using the correct parameters for the rackspace provider. \n", "before_files": [{"content": "\"\"\"Rackspace provider implementation\"\"\"\nfrom __future__ import absolute_import\nimport json\nimport logging\nimport time\n\nimport requests\nfrom lexicon.providers.base import Provider as BaseProvider\n\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESERVER_DOMAINS = ['rackspacecloud.com']\n\n\ndef _async_request_completed(payload):\n \"\"\"Looks into an async response payload to see if the requested job has finished.\"\"\"\n if payload['status'] == 'COMPLETED':\n return True\n if payload['status'] == 'ERROR':\n return True\n return False\n\n\ndef provider_parser(subparser):\n \"\"\"Configure provider parser for Rackspace\"\"\"\n subparser.add_argument(\n \"--auth-account\", help=\"specify account number for authentication\")\n subparser.add_argument(\n \"--auth-username\",\n help=\"specify username for authentication. Only used if --auth-token is empty.\")\n subparser.add_argument(\n \"--auth-api-key\",\n help=\"specify api key for authentication. Only used if --auth-token is empty.\")\n subparser.add_argument(\n \"--auth-token\",\n help=(\"specify token for authentication. \"\n \"If empty, the username and api key will be used to create a token.\"))\n subparser.add_argument(\"--sleep-time\", type=float, default=1,\n help=\"number of seconds to wait between update requests.\")\n\n\nclass Provider(BaseProvider):\n \"\"\"Provider class for Rackspace\"\"\"\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.domain_id = None\n self.api_endpoint = 'https://dns.api.rackspacecloud.com/v1.0'\n self.auth_api_endpoint = 'https://identity.api.rackspacecloud.com/v2.0'\n self._auth_token = None\n\n def _authenticate(self):\n self._auth_token = self._get_provider_option('auth_token')\n if not self._auth_token:\n auth_response = self._auth_request('POST', '/tokens', {\n 'auth': {\n 'RAX-KSKEY:apiKeyCredentials': {\n 'username': self._get_provider_option('auth_username'),\n 'apiKey': self._get_provider_option('auth_api_key')\n }\n }\n })\n self._auth_token = auth_response['access']['token']['id']\n\n payload = self._get('/domains', {\n 'name': self.domain\n })\n\n if not payload['domains']:\n raise Exception('No domain found')\n if len(payload['domains']) > 1:\n raise Exception('Too many domains found. This should not happen')\n\n self.domain_id = payload['domains'][0]['id']\n\n # Create record. If record already exists with the same content, do nothing'\n\n def _create_record(self, rtype, name, content):\n data = {'records': [\n {'type': rtype, 'name': self._full_name(name), 'data': content}]}\n if self._get_lexicon_option('ttl'):\n data['records'][0]['ttl'] = self._get_lexicon_option('ttl')\n\n try:\n payload = self._post_and_wait(\n '/domains/{0}/records'.format(self.domain_id), data)\n except Exception as error:\n if str(error).startswith('Record is a duplicate of another record'):\n return self._update_record(None, rtype, name, content)\n raise error\n\n success = len(payload['records']) > 0\n LOGGER.debug('create_record: %s', success)\n return success\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def _list_records(self, rtype=None, name=None, content=None):\n params = {'per_page': 100}\n if rtype:\n params['type'] = rtype\n if name:\n params['name'] = self._full_name(name)\n # Sending the data filter to the Rackspace DNS API results in a 503 error\n # if content:\n # params['data'] = content\n\n payload = self._get(\n '/domains/{0}/records'.format(self.domain_id), params)\n\n records = list(payload['records'])\n if content:\n records = [\n record for record in records if record['data'] == content]\n records = [{\n 'type': record['type'],\n 'name': record['name'],\n 'ttl': record['ttl'],\n 'content': record['data'],\n 'id': record['id']\n } for record in records]\n\n LOGGER.debug('list_records: %s', records)\n return records\n\n # Create or update a record.\n def _update_record(self, identifier, rtype=None, name=None, content=None):\n data = {}\n if rtype:\n data['type'] = rtype\n if name:\n data['name'] = self._full_name(name)\n if content:\n data['data'] = content\n if self._get_lexicon_option('ttl'):\n data['ttl'] = self._get_lexicon_option('ttl')\n\n if identifier is None:\n records = self._list_records(rtype, name)\n if not records:\n raise Exception('Unable to find record to modify: ' + name)\n identifier = records[0]['id']\n\n self._put_and_wait(\n '/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)\n\n # If it didn't raise from the http status code, then we're good\n LOGGER.debug('update_record: %s', identifier)\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n delete_record_id = []\n if not identifier:\n records = self._list_records(rtype, name, content)\n delete_record_id = [record['id'] for record in records]\n else:\n delete_record_id.append(identifier)\n\n LOGGER.debug('delete_records: %s', delete_record_id)\n\n for record_id in delete_record_id:\n self._delete_and_wait(\n '/domains/{0}/records/{1}'.format(self.domain_id, record_id)\n )\n\n # If it didn't raise from the http status code, then we're good\n success = True\n LOGGER.debug('delete_record: %s', success)\n return success\n\n # Helpers\n\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n full_url = (self.api_endpoint +\n '/{0}' + url).format(self._get_provider_option('auth_account'))\n response = requests.request(action, full_url, params=query_params,\n data=json.dumps(data),\n headers={\n 'X-Auth-Token': self._get_provider_option('auth_token'),\n 'Content-Type': 'application/json'\n })\n # if the request fails for any reason, throw an error.\n response.raise_for_status()\n return response.json()\n\n # Non-GET requests to the Rackspace CloudDNS API are asynchronous\n def _request_and_wait(self, action='POST', url='/', data=None, query_params=None):\n result = self._request(action, url, data, query_params)\n\n sleep_time = self._get_provider_option('sleep_time') or '1'\n sleep_time = float(sleep_time)\n\n while not _async_request_completed(result):\n if sleep_time:\n time.sleep(sleep_time)\n result = self._update_response(result)\n\n if result['status'] == 'ERROR':\n raise Exception(result['error']['details'])\n\n if 'response' in result:\n return result['response']\n return None\n\n def _post_and_wait(self, url='/', data=None, query_params=None):\n return self._request_and_wait('POST', url, data, query_params)\n\n def _put_and_wait(self, url='/', data=None, query_params=None):\n return self._request_and_wait('PUT', url, data, query_params)\n\n def _delete_and_wait(self, url='/', data=None, query_params=None):\n return self._request_and_wait('DELETE', url, data, query_params)\n\n def _update_response(self, payload):\n response = requests.request('GET', payload['callbackUrl'], params={'showDetails': 'true'},\n data={},\n headers={\n 'X-Auth-Token': self._get_provider_option('auth_token'),\n 'Content-Type': 'application/json'})\n\n # if the request fails for any reason, throw an error.\n response.raise_for_status()\n return response.json()\n\n def _auth_request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n\n response = requests.request(action, self.auth_api_endpoint + url, params=query_params,\n data=json.dumps(data),\n headers={\n 'Content-Type': 'application/json'\n })\n # if the request fails for any reason, throw an error.\n response.raise_for_status()\n return response.json()\n", "path": "lexicon/providers/rackspace.py"}], "after_files": [{"content": "\"\"\"Rackspace provider implementation\"\"\"\nfrom __future__ import absolute_import\nimport json\nimport logging\nimport time\n\nimport requests\nfrom lexicon.providers.base import Provider as BaseProvider\n\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESERVER_DOMAINS = ['rackspacecloud.com']\n\n\ndef _async_request_completed(payload):\n \"\"\"Looks into an async response payload to see if the requested job has finished.\"\"\"\n if payload['status'] == 'COMPLETED':\n return True\n if payload['status'] == 'ERROR':\n return True\n return False\n\n\ndef provider_parser(subparser):\n \"\"\"Configure provider parser for Rackspace\"\"\"\n subparser.add_argument(\n \"--auth-account\", help=\"specify account number for authentication\")\n subparser.add_argument(\n \"--auth-username\",\n help=\"specify username for authentication. Only used if --auth-token is empty.\")\n subparser.add_argument(\n \"--auth-api-key\",\n help=\"specify api key for authentication. Only used if --auth-token is empty.\")\n subparser.add_argument(\n \"--auth-token\",\n help=(\"specify token for authentication. \"\n \"If empty, the username and api key will be used to create a token.\"))\n subparser.add_argument(\"--sleep-time\", type=float, default=1,\n help=\"number of seconds to wait between update requests.\")\n\n\nclass Provider(BaseProvider):\n \"\"\"Provider class for Rackspace\"\"\"\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.domain_id = None\n self.api_endpoint = 'https://dns.api.rackspacecloud.com/v1.0'\n self.auth_api_endpoint = 'https://identity.api.rackspacecloud.com/v2.0'\n self._auth_token = None\n self._auth_account = None\n\n def _get_rackspace_option(self, key):\n private_key = '_' + key\n result = None\n if hasattr(self, private_key):\n result = getattr(self, private_key)\n if result is None:\n result = self._get_provider_option(key)\n return result\n\n def _authenticate(self):\n self._auth_token = self._get_provider_option('auth_token')\n if not self._auth_token:\n auth_response = self._auth_request('POST', '/tokens', {\n 'auth': {\n 'RAX-KSKEY:apiKeyCredentials': {\n 'username': self._get_provider_option('auth_username'),\n 'apiKey': self._get_provider_option('auth_api_key')\n }\n }\n })\n self._auth_token = auth_response['access']['token']['id']\n self._auth_account = auth_response['access']['token']['tenant']['id']\n\n payload = self._get('/domains', {\n 'name': self.domain\n })\n\n if not payload['domains']:\n raise Exception('No domain found')\n if len(payload['domains']) > 1:\n raise Exception('Too many domains found. This should not happen')\n\n self.domain_id = payload['domains'][0]['id']\n\n # Create record. If record already exists with the same content, do nothing'\n\n def _create_record(self, rtype, name, content):\n data = {'records': [\n {'type': rtype, 'name': self._full_name(name), 'data': content}]}\n if self._get_lexicon_option('ttl'):\n data['records'][0]['ttl'] = self._get_lexicon_option('ttl')\n\n try:\n payload = self._post_and_wait(\n '/domains/{0}/records'.format(self.domain_id), data)\n except Exception as error:\n if str(error).startswith('Record is a duplicate of another record'):\n return self._update_record(None, rtype, name, content)\n raise error\n\n success = len(payload['records']) > 0\n LOGGER.debug('create_record: %s', success)\n return success\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def _list_records(self, rtype=None, name=None, content=None):\n params = {'per_page': 100}\n if rtype:\n params['type'] = rtype\n if name:\n params['name'] = self._full_name(name)\n # Sending the data filter to the Rackspace DNS API results in a 503 error\n # if content:\n # params['data'] = content\n\n payload = self._get(\n '/domains/{0}/records'.format(self.domain_id), params)\n\n records = list(payload['records'])\n if content:\n records = [\n record for record in records if record['data'] == content]\n records = [{\n 'type': record['type'],\n 'name': record['name'],\n 'ttl': record['ttl'],\n 'content': record['data'],\n 'id': record['id']\n } for record in records]\n\n LOGGER.debug('list_records: %s', records)\n return records\n\n # Create or update a record.\n def _update_record(self, identifier, rtype=None, name=None, content=None):\n data = {}\n if rtype:\n data['type'] = rtype\n if name:\n data['name'] = self._full_name(name)\n if content:\n data['data'] = content\n if self._get_lexicon_option('ttl'):\n data['ttl'] = self._get_lexicon_option('ttl')\n\n if identifier is None:\n records = self._list_records(rtype, name)\n if not records:\n raise Exception('Unable to find record to modify: ' + name)\n identifier = records[0]['id']\n\n self._put_and_wait(\n '/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)\n\n # If it didn't raise from the http status code, then we're good\n LOGGER.debug('update_record: %s', identifier)\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n delete_record_id = []\n if not identifier:\n records = self._list_records(rtype, name, content)\n delete_record_id = [record['id'] for record in records]\n else:\n delete_record_id.append(identifier)\n\n LOGGER.debug('delete_records: %s', delete_record_id)\n\n for record_id in delete_record_id:\n self._delete_and_wait(\n '/domains/{0}/records/{1}'.format(self.domain_id, record_id)\n )\n\n # If it didn't raise from the http status code, then we're good\n success = True\n LOGGER.debug('delete_record: %s', success)\n return success\n\n # Helpers\n\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n LOGGER.debug('request tenant ID: %s', self._get_rackspace_option('auth_account'))\n full_url = (self.api_endpoint +\n '/{0}' + url).format(self._get_rackspace_option('auth_account'))\n response = requests.request(action, full_url, params=query_params,\n data=json.dumps(data),\n headers={\n 'X-Auth-Token': self._get_rackspace_option('auth_token'),\n 'Content-Type': 'application/json'\n })\n # if the request fails for any reason, throw an error.\n response.raise_for_status()\n return response.json()\n\n # Non-GET requests to the Rackspace CloudDNS API are asynchronous\n def _request_and_wait(self, action='POST', url='/', data=None, query_params=None):\n result = self._request(action, url, data, query_params)\n\n sleep_time = self._get_rackspace_option('sleep_time') or '1'\n sleep_time = float(sleep_time)\n\n while not _async_request_completed(result):\n if sleep_time:\n time.sleep(sleep_time)\n result = self._update_response(result)\n\n if result['status'] == 'ERROR':\n raise Exception(result['error']['details'])\n\n if 'response' in result:\n return result['response']\n return None\n\n def _post_and_wait(self, url='/', data=None, query_params=None):\n return self._request_and_wait('POST', url, data, query_params)\n\n def _put_and_wait(self, url='/', data=None, query_params=None):\n return self._request_and_wait('PUT', url, data, query_params)\n\n def _delete_and_wait(self, url='/', data=None, query_params=None):\n return self._request_and_wait('DELETE', url, data, query_params)\n\n def _update_response(self, payload):\n response = requests.request('GET', payload['callbackUrl'], params={'showDetails': 'true'},\n data={},\n headers={\n 'X-Auth-Token': self._get_rackspace_option('auth_token'),\n 'Content-Type': 'application/json'})\n\n # if the request fails for any reason, throw an error.\n response.raise_for_status()\n return response.json()\n\n def _auth_request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n\n response = requests.request(action, self.auth_api_endpoint + url, params=query_params,\n data=json.dumps(data),\n headers={\n 'Content-Type': 'application/json'\n })\n # if the request fails for any reason, throw an error.\n response.raise_for_status()\n return response.json()\n", "path": "lexicon/providers/rackspace.py"}]} | 3,649 | 673 |
gh_patches_debug_5114 | rasdani/github-patches | git_diff | pre-commit__pre-commit-177 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stashed changes lost if hook fails
I've run into this particular (in my eyes, critical) bug.
If I want to do a partial commit, e.g. I have 2 files but I only add 1 file to the staging area and the staged file will cause a hook to fail, I loose the changes in the 2nd file because pre-commit fails to reroll the patch it stashed before running.
Here's my terminal log and the steps to reproduce:
## Version
$ pre-commit -V
pre-commit 0.3.0
## Commands to reproduce
```
$ cat unstaged.py
"""I am unstaged"""
$ echo "'''I am unstaged, but I have changes'''" > unstaged.py
$ echo "x = 'This is the loooooooooooooooooooooooooooooooooooongest liiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiine eveeeeeeeeer'" > foo.py
$ git status
On branch master
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git checkout -- <file>..." to discard changes in working directory)
modified: unstaged.py
modified: foo.py
no changes added to commit (use "git add" and/or "git commit -a")
$ git add foo.py
$ git commit -m "Adding a long line"
[WARNING] Unstaged files detected.
[INFO] Stashing unstaged files to /home/k/.pre-commit/patch1412683352.
Flake8...............................................Failed
hookid: flake8
foo.py:1:80: E501 line too long (112 > 79 characters)
[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...
An unexpected error has occurred: CalledProcessError: Command: [u'git', u'apply', u'/home/k/.pre-commit/patch1412683352']
Return code: 128
Expected return code: 0
Output: (u'', u'fatal: unrecognized input\n')
Check the log at ~/.pre-commit/pre-commit.log
$ git status
On branch master
Changes to be committed:
(use "git reset HEAD <file>..." to unstage)
modified: foo.py
$ echo "x = 'This is a shorter line, its better'" > foo.py
$ git status
On branch master
Changes to be committed:
(use "git reset HEAD <file>..." to unstage)
modified: foo.py
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git checkout -- <file>..." to discard changes in working directory)
modified: foo.py
$ git add foo.py
$ git commit -m "Fixed the error I got from the flake8 hook"
Flake8...............................................Passed
[master 78568e8] Fixed the error I got from the flake8 hook
1 file changed, 1 insertion(+), 1 deletion(-)
$ git status
On branch master
nothing to commit, working directory clean
$ cat unstaged.py
"""I am unstaged"""
```
## Log
```
$ cat ~/.pre-commit/pre-commit.log
Traceback (most recent call last):
File "/home/k/.virtualenvs/pre-commit-test/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/k/.virtualenvs/pre-commit-test/local/lib/python2.7/site-packages/pre_commit/main.py", line 108, in main
return run(runner, args)
File "/home/k/.virtualenvs/pre-commit-test/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 151, in run
return _run_hooks(runner, args, write=write, environ=environ)
File "/usr/lib/python2.7/contextlib.py", line 24, in __exit__
self.gen.next()
File "/home/k/.virtualenvs/pre-commit-test/local/lib/python2.7/site-packages/pre_commit/staged_files_only.py", line 55, in staged_files_only
cmd_runner.run(['git', 'apply', patch_filename])
File "/home/k/.virtualenvs/pre-commit-test/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 82, in run
returncode, replaced_cmd, retcode, output=(stdout, stderr),
CalledProcessError: Command: [u'git', u'apply', u'/home/k/.pre-commit/patch1412683352']
Return code: 128
Expected return code: 0
Output: (u'', u'fatal: unrecognized input\n')
```
## .pre-commit-config.yaml
```
$ cat .pre-commit-config.yaml
- repo: [email protected]:pre-commit/pre-commit-hooks
sha: 6343700aa063fe30acc319d2dc84353a35a3d6d0
hooks:
- id: flake8
args: ['--ignore=E712,F821']
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/staged_files_only.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import io
5 import logging
6 import time
7
8 from pre_commit.util import CalledProcessError
9
10
11 logger = logging.getLogger('pre_commit')
12
13
14 @contextlib.contextmanager
15 def staged_files_only(cmd_runner):
16 """Clear any unstaged changes from the git working directory inside this
17 context.
18
19 Args:
20 cmd_runner - PrefixedCommandRunner
21 """
22 # Determine if there are unstaged files
23 retcode, diff_stdout_binary, _ = cmd_runner.run(
24 ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],
25 retcode=None,
26 encoding=None,
27 )
28 if retcode and diff_stdout_binary.strip():
29 patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))
30 logger.warning('Unstaged files detected.')
31 logger.info(
32 'Stashing unstaged files to {0}.'.format(patch_filename),
33 )
34 # Save the current unstaged changes as a patch
35 with io.open(patch_filename, 'wb') as patch_file:
36 patch_file.write(diff_stdout_binary)
37
38 # Clear the working directory of unstaged changes
39 cmd_runner.run(['git', 'checkout', '--', '.'])
40 try:
41 yield
42 finally:
43 # Try to apply the patch we saved
44 try:
45 cmd_runner.run(['git', 'apply', patch_filename])
46 except CalledProcessError:
47 logger.warning(
48 'Stashed changes conflicted with hook auto-fixes... '
49 'Rolling back fixes...'
50 )
51 # We failed to apply the patch, presumably due to fixes made
52 # by hooks.
53 # Roll back the changes made by hooks.
54 cmd_runner.run(['git', 'checkout', '--', '.'])
55 cmd_runner.run(['git', 'apply', patch_filename])
56 logger.info('Restored changes from {0}.'.format(patch_filename))
57 else:
58 # There weren't any staged files so we don't need to do anything
59 # special
60 yield
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -21,7 +21,10 @@
"""
# Determine if there are unstaged files
retcode, diff_stdout_binary, _ = cmd_runner.run(
- ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],
+ [
+ 'git', 'diff', '--ignore-submodules', '--binary', '--exit-code',
+ '--no-color',
+ ],
retcode=None,
encoding=None,
)
| {"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -21,7 +21,10 @@\n \"\"\"\n # Determine if there are unstaged files\n retcode, diff_stdout_binary, _ = cmd_runner.run(\n- ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],\n+ [\n+ 'git', 'diff', '--ignore-submodules', '--binary', '--exit-code',\n+ '--no-color',\n+ ],\n retcode=None,\n encoding=None,\n )\n", "issue": "Stashed changes lost if hook fails\nI've run into this particular (in my eyes, critical) bug.\nIf I want to do a partial commit, e.g. I have 2 files but I only add 1 file to the staging area and the staged file will cause a hook to fail, I loose the changes in the 2nd file because pre-commit fails to reroll the patch it stashed before running.\n\nHere's my terminal log and the steps to reproduce:\n## Version\n\n$ pre-commit -V\npre-commit 0.3.0\n## Commands to reproduce\n\n```\n$ cat unstaged.py\n\"\"\"I am unstaged\"\"\"\n$ echo \"'''I am unstaged, but I have changes'''\" > unstaged.py\n$ echo \"x = 'This is the loooooooooooooooooooooooooooooooooooongest liiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiine eveeeeeeeeer'\" > foo.py \n$ git status\nOn branch master\nChanges not staged for commit:\n (use \"git add <file>...\" to update what will be committed)\n (use \"git checkout -- <file>...\" to discard changes in working directory)\n\n modified: unstaged.py\n modified: foo.py\n\nno changes added to commit (use \"git add\" and/or \"git commit -a\")\n$ git add foo.py \n$ git commit -m \"Adding a long line\"\n[WARNING] Unstaged files detected.\n[INFO] Stashing unstaged files to /home/k/.pre-commit/patch1412683352.\nFlake8...............................................Failed\nhookid: flake8\n\nfoo.py:1:80: E501 line too long (112 > 79 characters)\n\n[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...\nAn unexpected error has occurred: CalledProcessError: Command: [u'git', u'apply', u'/home/k/.pre-commit/patch1412683352']\nReturn code: 128\nExpected return code: 0\nOutput: (u'', u'fatal: unrecognized input\\n')\n\nCheck the log at ~/.pre-commit/pre-commit.log\n$ git status\nOn branch master\nChanges to be committed:\n (use \"git reset HEAD <file>...\" to unstage)\n\n modified: foo.py\n\n$ echo \"x = 'This is a shorter line, its better'\" > foo.py\n$ git status\nOn branch master\nChanges to be committed:\n (use \"git reset HEAD <file>...\" to unstage)\n\n modified: foo.py\n\nChanges not staged for commit:\n (use \"git add <file>...\" to update what will be committed)\n (use \"git checkout -- <file>...\" to discard changes in working directory)\n\n modified: foo.py\n\n$ git add foo.py \n$ git commit -m \"Fixed the error I got from the flake8 hook\"\nFlake8...............................................Passed\n[master 78568e8] Fixed the error I got from the flake8 hook\n 1 file changed, 1 insertion(+), 1 deletion(-)\n$ git status\nOn branch master\nnothing to commit, working directory clean\n$ cat unstaged.py\n\"\"\"I am unstaged\"\"\"\n```\n## Log\n\n```\n$ cat ~/.pre-commit/pre-commit.log\nTraceback (most recent call last):\n File \"/home/k/.virtualenvs/pre-commit-test/local/lib/python2.7/site-packages/pre_commit/error_handler.py\", line 34, in error_handler\n yield\n File \"/home/k/.virtualenvs/pre-commit-test/local/lib/python2.7/site-packages/pre_commit/main.py\", line 108, in main\n return run(runner, args)\n File \"/home/k/.virtualenvs/pre-commit-test/local/lib/python2.7/site-packages/pre_commit/commands/run.py\", line 151, in run\n return _run_hooks(runner, args, write=write, environ=environ)\n File \"/usr/lib/python2.7/contextlib.py\", line 24, in __exit__\n self.gen.next()\n File \"/home/k/.virtualenvs/pre-commit-test/local/lib/python2.7/site-packages/pre_commit/staged_files_only.py\", line 55, in staged_files_only\n cmd_runner.run(['git', 'apply', patch_filename])\n File \"/home/k/.virtualenvs/pre-commit-test/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py\", line 82, in run\n returncode, replaced_cmd, retcode, output=(stdout, stderr),\nCalledProcessError: Command: [u'git', u'apply', u'/home/k/.pre-commit/patch1412683352']\nReturn code: 128\nExpected return code: 0\nOutput: (u'', u'fatal: unrecognized input\\n')\n```\n## .pre-commit-config.yaml\n\n```\n$ cat .pre-commit-config.yaml \n- repo: [email protected]:pre-commit/pre-commit-hooks\n sha: 6343700aa063fe30acc319d2dc84353a35a3d6d0\n hooks:\n - id: flake8\n args: ['--ignore=E712,F821']\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport time\n\nfrom pre_commit.util import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\[email protected]\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n retcode, diff_stdout_binary, _ = cmd_runner.run(\n ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(['git', 'checkout', '--', '.'])\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n cmd_runner.run(['git', 'apply', patch_filename])\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...'\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n cmd_runner.run(['git', 'apply', patch_filename])\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport time\n\nfrom pre_commit.util import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\[email protected]\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n retcode, diff_stdout_binary, _ = cmd_runner.run(\n [\n 'git', 'diff', '--ignore-submodules', '--binary', '--exit-code',\n '--no-color',\n ],\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(['git', 'checkout', '--', '.'])\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n cmd_runner.run(['git', 'apply', patch_filename])\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...'\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n cmd_runner.run(['git', 'apply', patch_filename])\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}]} | 1,965 | 142 |
gh_patches_debug_1628 | rasdani/github-patches | git_diff | apache__tvm-12178 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exercise TVM under minimal configuration in CI
We have seen a couple bugs due to microTVM being presumed-ON in config.cmake. Namely, you get python errors importing TVM right now when USE_MICRO is OFF. We should have a regression test that verifies basic functionality with everything (or nearly everything) OFF.
Context: apache/tvm#9617
And another micro-related issue of the same kind, which i don't have handy right now.
cc @gigiblender
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ci/jenkins/generate.py`
Content:
```
1 #!/usr/bin/env python3
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 import jinja2
19 import argparse
20 import difflib
21 import re
22 import datetime
23 import textwrap
24
25 from pathlib import Path
26
27
28 REPO_ROOT = Path(__file__).resolve().parent.parent.parent
29 JENKINSFILE_TEMPLATE = REPO_ROOT / "ci" / "jenkins" / "Jenkinsfile.j2"
30 JENKINSFILE = REPO_ROOT / "Jenkinsfile"
31
32
33 data = {
34 "images": [
35 {
36 "name": "ci_arm",
37 "platform": "ARM",
38 },
39 {
40 "name": "ci_cpu",
41 "platform": "CPU",
42 },
43 {
44 "name": "ci_gpu",
45 "platform": "CPU",
46 },
47 {
48 "name": "ci_hexagon",
49 "platform": "CPU",
50 },
51 {
52 "name": "ci_i386",
53 "platform": "CPU",
54 },
55 {
56 "name": "ci_lint",
57 "platform": "CPU",
58 },
59 {
60 "name": "ci_cortexm",
61 "platform": "CPU",
62 },
63 {
64 "name": "ci_wasm",
65 "platform": "CPU",
66 },
67 ]
68 }
69
70
71 def lines_without_generated_tag(content):
72 return [
73 line for line in content.splitlines(keepends=True) if not line.startswith("// Generated at")
74 ]
75
76
77 if __name__ == "__main__":
78 help = "Regenerate Jenkinsfile from template"
79 parser = argparse.ArgumentParser(description=help)
80 parser.add_argument("--check", action="store_true", help="just verify the output didn't change")
81 args = parser.parse_args()
82
83 with open(JENKINSFILE) as f:
84 content = f.read()
85
86 data["generated_time"] = datetime.datetime.now().isoformat()
87
88 environment = jinja2.Environment(
89 loader=jinja2.FileSystemLoader(REPO_ROOT),
90 undefined=jinja2.StrictUndefined,
91 lstrip_blocks=True,
92 trim_blocks=True,
93 keep_trailing_newline=True,
94 )
95 template = environment.get_template(str(JENKINSFILE_TEMPLATE.relative_to(REPO_ROOT)))
96 new_content = template.render(**data)
97
98 diff = "".join(
99 difflib.unified_diff(
100 lines_without_generated_tag(content), lines_without_generated_tag(new_content)
101 )
102 )
103 if args.check:
104 if not diff:
105 print("Success, the newly generated Jenkinsfile matched the one on disk")
106 exit(0)
107 else:
108 print(
109 textwrap.dedent(
110 """
111 Newly generated Jenkinsfile did not match the one on disk! If you have made
112 edits to the Jenkinsfile, move them to 'jenkins/Jenkinsfile.j2' and
113 regenerate the Jenkinsfile from the template with
114
115 python3 -m pip install -r jenkins/requirements.txt
116 python3 jenkins/generate.py
117
118 Diffed changes:
119 """
120 ).strip()
121 )
122 print(diff)
123 exit(1)
124 else:
125 with open(JENKINSFILE, "w") as f:
126 f.write(new_content)
127 if not diff:
128 print(f"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, no changes made")
129 else:
130 print(f"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, changes:")
131 print(diff)
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ci/jenkins/generate.py b/ci/jenkins/generate.py
--- a/ci/jenkins/generate.py
+++ b/ci/jenkins/generate.py
@@ -40,6 +40,10 @@
"name": "ci_cpu",
"platform": "CPU",
},
+ {
+ "name": "ci_minimal",
+ "platform": "CPU",
+ },
{
"name": "ci_gpu",
"platform": "CPU",
| {"golden_diff": "diff --git a/ci/jenkins/generate.py b/ci/jenkins/generate.py\n--- a/ci/jenkins/generate.py\n+++ b/ci/jenkins/generate.py\n@@ -40,6 +40,10 @@\n \"name\": \"ci_cpu\",\n \"platform\": \"CPU\",\n },\n+ {\n+ \"name\": \"ci_minimal\",\n+ \"platform\": \"CPU\",\n+ },\n {\n \"name\": \"ci_gpu\",\n \"platform\": \"CPU\",\n", "issue": "Exercise TVM under minimal configuration in CI\nWe have seen a couple bugs due to microTVM being presumed-ON in config.cmake. Namely, you get python errors importing TVM right now when USE_MICRO is OFF. We should have a regression test that verifies basic functionality with everything (or nearly everything) OFF.\r\n\r\nContext: apache/tvm#9617\r\nAnd another micro-related issue of the same kind, which i don't have handy right now.\r\n\r\ncc @gigiblender \n", "before_files": [{"content": "#!/usr/bin/env python3\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport jinja2\nimport argparse\nimport difflib\nimport re\nimport datetime\nimport textwrap\n\nfrom pathlib import Path\n\n\nREPO_ROOT = Path(__file__).resolve().parent.parent.parent\nJENKINSFILE_TEMPLATE = REPO_ROOT / \"ci\" / \"jenkins\" / \"Jenkinsfile.j2\"\nJENKINSFILE = REPO_ROOT / \"Jenkinsfile\"\n\n\ndata = {\n \"images\": [\n {\n \"name\": \"ci_arm\",\n \"platform\": \"ARM\",\n },\n {\n \"name\": \"ci_cpu\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_gpu\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_hexagon\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_i386\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_lint\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_cortexm\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_wasm\",\n \"platform\": \"CPU\",\n },\n ]\n}\n\n\ndef lines_without_generated_tag(content):\n return [\n line for line in content.splitlines(keepends=True) if not line.startswith(\"// Generated at\")\n ]\n\n\nif __name__ == \"__main__\":\n help = \"Regenerate Jenkinsfile from template\"\n parser = argparse.ArgumentParser(description=help)\n parser.add_argument(\"--check\", action=\"store_true\", help=\"just verify the output didn't change\")\n args = parser.parse_args()\n\n with open(JENKINSFILE) as f:\n content = f.read()\n\n data[\"generated_time\"] = datetime.datetime.now().isoformat()\n\n environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(REPO_ROOT),\n undefined=jinja2.StrictUndefined,\n lstrip_blocks=True,\n trim_blocks=True,\n keep_trailing_newline=True,\n )\n template = environment.get_template(str(JENKINSFILE_TEMPLATE.relative_to(REPO_ROOT)))\n new_content = template.render(**data)\n\n diff = \"\".join(\n difflib.unified_diff(\n lines_without_generated_tag(content), lines_without_generated_tag(new_content)\n )\n )\n if args.check:\n if not diff:\n print(\"Success, the newly generated Jenkinsfile matched the one on disk\")\n exit(0)\n else:\n print(\n textwrap.dedent(\n \"\"\"\n Newly generated Jenkinsfile did not match the one on disk! If you have made\n edits to the Jenkinsfile, move them to 'jenkins/Jenkinsfile.j2' and\n regenerate the Jenkinsfile from the template with\n\n python3 -m pip install -r jenkins/requirements.txt\n python3 jenkins/generate.py\n\n Diffed changes:\n \"\"\"\n ).strip()\n )\n print(diff)\n exit(1)\n else:\n with open(JENKINSFILE, \"w\") as f:\n f.write(new_content)\n if not diff:\n print(f\"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, no changes made\")\n else:\n print(f\"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, changes:\")\n print(diff)\n", "path": "ci/jenkins/generate.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport jinja2\nimport argparse\nimport difflib\nimport re\nimport datetime\nimport textwrap\n\nfrom pathlib import Path\n\n\nREPO_ROOT = Path(__file__).resolve().parent.parent.parent\nJENKINSFILE_TEMPLATE = REPO_ROOT / \"ci\" / \"jenkins\" / \"Jenkinsfile.j2\"\nJENKINSFILE = REPO_ROOT / \"Jenkinsfile\"\n\n\ndata = {\n \"images\": [\n {\n \"name\": \"ci_arm\",\n \"platform\": \"ARM\",\n },\n {\n \"name\": \"ci_cpu\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_minimal\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_gpu\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_hexagon\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_i386\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_lint\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_cortexm\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_wasm\",\n \"platform\": \"CPU\",\n },\n ]\n}\n\n\ndef lines_without_generated_tag(content):\n return [\n line for line in content.splitlines(keepends=True) if not line.startswith(\"// Generated at\")\n ]\n\n\nif __name__ == \"__main__\":\n help = \"Regenerate Jenkinsfile from template\"\n parser = argparse.ArgumentParser(description=help)\n parser.add_argument(\"--check\", action=\"store_true\", help=\"just verify the output didn't change\")\n args = parser.parse_args()\n\n with open(JENKINSFILE) as f:\n content = f.read()\n\n data[\"generated_time\"] = datetime.datetime.now().isoformat()\n\n environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(REPO_ROOT),\n undefined=jinja2.StrictUndefined,\n lstrip_blocks=True,\n trim_blocks=True,\n keep_trailing_newline=True,\n )\n template = environment.get_template(str(JENKINSFILE_TEMPLATE.relative_to(REPO_ROOT)))\n new_content = template.render(**data)\n\n diff = \"\".join(\n difflib.unified_diff(\n lines_without_generated_tag(content), lines_without_generated_tag(new_content)\n )\n )\n if args.check:\n if not diff:\n print(\"Success, the newly generated Jenkinsfile matched the one on disk\")\n exit(0)\n else:\n print(\n textwrap.dedent(\n \"\"\"\n Newly generated Jenkinsfile did not match the one on disk! If you have made\n edits to the Jenkinsfile, move them to 'jenkins/Jenkinsfile.j2' and\n regenerate the Jenkinsfile from the template with\n\n python3 -m pip install -r jenkins/requirements.txt\n python3 jenkins/generate.py\n\n Diffed changes:\n \"\"\"\n ).strip()\n )\n print(diff)\n exit(1)\n else:\n with open(JENKINSFILE, \"w\") as f:\n f.write(new_content)\n if not diff:\n print(f\"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, no changes made\")\n else:\n print(f\"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, changes:\")\n print(diff)\n", "path": "ci/jenkins/generate.py"}]} | 1,556 | 111 |
gh_patches_debug_19824 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-899 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make private project setting more clear
Currently the checkbox is kind of hidden and it is not instantly clear what is meant by "Access to Project". The setting should be more obvious and it's consequences more clear.
Opin has another way of showing the setting which we could/should adopt
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/dashboard2/forms.py`
Content:
```
1 from django import forms
2 from django.contrib.auth import get_user_model
3 from django.forms import inlineformset_factory
4 from django.utils.translation import ugettext_lazy as _
5
6 from adhocracy4.categories import models as category_models
7 from adhocracy4.forms.fields import DateTimeField
8 from adhocracy4.maps import models as map_models
9 from adhocracy4.modules import models as module_models
10 from adhocracy4.phases import models as phase_models
11 from adhocracy4.projects import models as project_models
12 from meinberlin.apps.maps.widgets import MapChoosePolygonWithPresetWidget
13
14 from . import signals
15 from .components.forms import ModuleDashboardForm
16 from .components.forms import ModuleDashboardFormSet
17 from .components.forms import ProjectDashboardForm
18
19 User = get_user_model()
20
21
22 class ProjectCreateForm(forms.ModelForm):
23
24 class Meta:
25 model = project_models.Project
26 fields = ['name', 'description', 'image', 'image_copyright']
27
28 def __init__(self, organisation, creator,
29 *args, **kwargs):
30 super().__init__(*args, **kwargs)
31 self.organisation = organisation
32 self.creator = creator
33
34 def save(self, commit=True):
35 project = super().save(commit=False)
36
37 project.organisation = self.organisation
38 project.creator = self.creator
39
40 if commit:
41 project.save()
42 if hasattr(self, 'save_m2m'):
43 self.save_m2m()
44
45 return project
46
47
48 class ProjectBasicForm(ProjectDashboardForm):
49
50 class Meta:
51 model = project_models.Project
52 fields = ['name', 'description', 'image', 'image_copyright',
53 'tile_image', 'tile_image_copyright',
54 'is_archived', 'is_public']
55 required_for_project_publish = ['name', 'description']
56
57 def __init__(self, *args, **kwargs):
58 super().__init__(*args, **kwargs)
59 instance = kwargs.get('instance', None)
60 self._project_was_archived = instance and instance.is_archived
61
62 def save(self, commit=True):
63 project = super().save(commit)
64 if not self._project_was_archived and project.is_archived:
65 signals.project_archived.send(sender=None, project=project)
66 return project
67
68
69 class ProjectInformationForm(ProjectDashboardForm):
70
71 class Meta:
72 model = project_models.Project
73 fields = ['information']
74 required_for_project_publish = ['information']
75
76
77 class ProjectResultForm(ProjectDashboardForm):
78
79 class Meta:
80 model = project_models.Project
81 fields = ['result']
82 required_for_project_publish = []
83
84
85 class ModuleBasicForm(ModuleDashboardForm):
86
87 class Meta:
88 model = module_models.Module
89 fields = ['name', 'description']
90 required_for_project_publish = '__all__'
91
92
93 class PhaseForm(forms.ModelForm):
94 end_date = DateTimeField(
95 time_format='%H:%M',
96 required=False,
97 require_all_fields=False,
98 label=(_('End date'), _('End time'))
99 )
100 start_date = DateTimeField(
101 time_format='%H:%M',
102 required=False,
103 require_all_fields=False,
104 label=(_('Start date'), _('Start time'))
105 )
106
107 class Meta:
108 model = phase_models.Phase
109 fields = ['name', 'description', 'start_date', 'end_date',
110 'type', # required for get_phase_name in the tpl
111 ]
112 required_for_project_publish = ['name', 'description', 'start_date',
113 'end_date']
114 widgets = {
115 'type': forms.HiddenInput(),
116 'weight': forms.HiddenInput()
117 }
118
119
120 PhaseFormSet = inlineformset_factory(module_models.Module,
121 phase_models.Phase,
122 form=PhaseForm,
123 formset=ModuleDashboardFormSet,
124 extra=0,
125 can_delete=False,
126 )
127
128
129 class AreaSettingsForm(ModuleDashboardForm):
130
131 def __init__(self, *args, **kwargs):
132 self.module = kwargs['instance']
133 kwargs['instance'] = self.module.settings_instance
134 super().__init__(*args, **kwargs)
135
136 def save(self, commit=True):
137 super().save(commit)
138 return self.module
139
140 def get_project(self):
141 return self.module.project
142
143 class Meta:
144 model = map_models.AreaSettings
145 fields = ['polygon']
146 required_for_project_publish = ['polygon']
147 # widgets = map_models.AreaSettings.widgets()
148 widgets = {'polygon': MapChoosePolygonWithPresetWidget}
149
150
151 class CategoryForm(forms.ModelForm):
152 name = forms.CharField(widget=forms.TextInput(attrs={
153 'placeholder': _('Category')}
154 ))
155
156 @property
157 def media(self):
158 media = super().media
159 media.add_js(['js/formset.js'])
160 return media
161
162 class Meta:
163 model = category_models.Category
164 fields = ['name']
165
166
167 CategoryFormSet = inlineformset_factory(module_models.Module,
168 category_models.Category,
169 form=CategoryForm,
170 formset=ModuleDashboardFormSet,
171 extra=0,
172 )
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/dashboard2/forms.py b/meinberlin/apps/dashboard2/forms.py
--- a/meinberlin/apps/dashboard2/forms.py
+++ b/meinberlin/apps/dashboard2/forms.py
@@ -1,5 +1,6 @@
from django import forms
from django.contrib.auth import get_user_model
+from django.forms import RadioSelect
from django.forms import inlineformset_factory
from django.utils.translation import ugettext_lazy as _
@@ -53,6 +54,14 @@
'tile_image', 'tile_image_copyright',
'is_archived', 'is_public']
required_for_project_publish = ['name', 'description']
+ widgets = {
+ 'is_public': RadioSelect(
+ choices=[
+ (True, _('All users can participate (public).')),
+ (False, _('Only invited users can participate (private).'))
+ ]
+ ),
+ }
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| {"golden_diff": "diff --git a/meinberlin/apps/dashboard2/forms.py b/meinberlin/apps/dashboard2/forms.py\n--- a/meinberlin/apps/dashboard2/forms.py\n+++ b/meinberlin/apps/dashboard2/forms.py\n@@ -1,5 +1,6 @@\n from django import forms\n from django.contrib.auth import get_user_model\n+from django.forms import RadioSelect\n from django.forms import inlineformset_factory\n from django.utils.translation import ugettext_lazy as _\n \n@@ -53,6 +54,14 @@\n 'tile_image', 'tile_image_copyright',\n 'is_archived', 'is_public']\n required_for_project_publish = ['name', 'description']\n+ widgets = {\n+ 'is_public': RadioSelect(\n+ choices=[\n+ (True, _('All users can participate (public).')),\n+ (False, _('Only invited users can participate (private).'))\n+ ]\n+ ),\n+ }\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n", "issue": "Make private project setting more clear\nCurrently the checkbox is kind of hidden and it is not instantly clear what is meant by \"Access to Project\". The setting should be more obvious and it's consequences more clear.\r\n\r\nOpin has another way of showing the setting which we could/should adopt\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.forms import inlineformset_factory\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.categories import models as category_models\nfrom adhocracy4.forms.fields import DateTimeField\nfrom adhocracy4.maps import models as map_models\nfrom adhocracy4.modules import models as module_models\nfrom adhocracy4.phases import models as phase_models\nfrom adhocracy4.projects import models as project_models\nfrom meinberlin.apps.maps.widgets import MapChoosePolygonWithPresetWidget\n\nfrom . import signals\nfrom .components.forms import ModuleDashboardForm\nfrom .components.forms import ModuleDashboardFormSet\nfrom .components.forms import ProjectDashboardForm\n\nUser = get_user_model()\n\n\nclass ProjectCreateForm(forms.ModelForm):\n\n class Meta:\n model = project_models.Project\n fields = ['name', 'description', 'image', 'image_copyright']\n\n def __init__(self, organisation, creator,\n *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.organisation = organisation\n self.creator = creator\n\n def save(self, commit=True):\n project = super().save(commit=False)\n\n project.organisation = self.organisation\n project.creator = self.creator\n\n if commit:\n project.save()\n if hasattr(self, 'save_m2m'):\n self.save_m2m()\n\n return project\n\n\nclass ProjectBasicForm(ProjectDashboardForm):\n\n class Meta:\n model = project_models.Project\n fields = ['name', 'description', 'image', 'image_copyright',\n 'tile_image', 'tile_image_copyright',\n 'is_archived', 'is_public']\n required_for_project_publish = ['name', 'description']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n instance = kwargs.get('instance', None)\n self._project_was_archived = instance and instance.is_archived\n\n def save(self, commit=True):\n project = super().save(commit)\n if not self._project_was_archived and project.is_archived:\n signals.project_archived.send(sender=None, project=project)\n return project\n\n\nclass ProjectInformationForm(ProjectDashboardForm):\n\n class Meta:\n model = project_models.Project\n fields = ['information']\n required_for_project_publish = ['information']\n\n\nclass ProjectResultForm(ProjectDashboardForm):\n\n class Meta:\n model = project_models.Project\n fields = ['result']\n required_for_project_publish = []\n\n\nclass ModuleBasicForm(ModuleDashboardForm):\n\n class Meta:\n model = module_models.Module\n fields = ['name', 'description']\n required_for_project_publish = '__all__'\n\n\nclass PhaseForm(forms.ModelForm):\n end_date = DateTimeField(\n time_format='%H:%M',\n required=False,\n require_all_fields=False,\n label=(_('End date'), _('End time'))\n )\n start_date = DateTimeField(\n time_format='%H:%M',\n required=False,\n require_all_fields=False,\n label=(_('Start date'), _('Start time'))\n )\n\n class Meta:\n model = phase_models.Phase\n fields = ['name', 'description', 'start_date', 'end_date',\n 'type', # required for get_phase_name in the tpl\n ]\n required_for_project_publish = ['name', 'description', 'start_date',\n 'end_date']\n widgets = {\n 'type': forms.HiddenInput(),\n 'weight': forms.HiddenInput()\n }\n\n\nPhaseFormSet = inlineformset_factory(module_models.Module,\n phase_models.Phase,\n form=PhaseForm,\n formset=ModuleDashboardFormSet,\n extra=0,\n can_delete=False,\n )\n\n\nclass AreaSettingsForm(ModuleDashboardForm):\n\n def __init__(self, *args, **kwargs):\n self.module = kwargs['instance']\n kwargs['instance'] = self.module.settings_instance\n super().__init__(*args, **kwargs)\n\n def save(self, commit=True):\n super().save(commit)\n return self.module\n\n def get_project(self):\n return self.module.project\n\n class Meta:\n model = map_models.AreaSettings\n fields = ['polygon']\n required_for_project_publish = ['polygon']\n # widgets = map_models.AreaSettings.widgets()\n widgets = {'polygon': MapChoosePolygonWithPresetWidget}\n\n\nclass CategoryForm(forms.ModelForm):\n name = forms.CharField(widget=forms.TextInput(attrs={\n 'placeholder': _('Category')}\n ))\n\n @property\n def media(self):\n media = super().media\n media.add_js(['js/formset.js'])\n return media\n\n class Meta:\n model = category_models.Category\n fields = ['name']\n\n\nCategoryFormSet = inlineformset_factory(module_models.Module,\n category_models.Category,\n form=CategoryForm,\n formset=ModuleDashboardFormSet,\n extra=0,\n )\n", "path": "meinberlin/apps/dashboard2/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.forms import RadioSelect\nfrom django.forms import inlineformset_factory\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.categories import models as category_models\nfrom adhocracy4.forms.fields import DateTimeField\nfrom adhocracy4.maps import models as map_models\nfrom adhocracy4.modules import models as module_models\nfrom adhocracy4.phases import models as phase_models\nfrom adhocracy4.projects import models as project_models\nfrom meinberlin.apps.maps.widgets import MapChoosePolygonWithPresetWidget\n\nfrom . import signals\nfrom .components.forms import ModuleDashboardForm\nfrom .components.forms import ModuleDashboardFormSet\nfrom .components.forms import ProjectDashboardForm\n\nUser = get_user_model()\n\n\nclass ProjectCreateForm(forms.ModelForm):\n\n class Meta:\n model = project_models.Project\n fields = ['name', 'description', 'image', 'image_copyright']\n\n def __init__(self, organisation, creator,\n *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.organisation = organisation\n self.creator = creator\n\n def save(self, commit=True):\n project = super().save(commit=False)\n\n project.organisation = self.organisation\n project.creator = self.creator\n\n if commit:\n project.save()\n if hasattr(self, 'save_m2m'):\n self.save_m2m()\n\n return project\n\n\nclass ProjectBasicForm(ProjectDashboardForm):\n\n class Meta:\n model = project_models.Project\n fields = ['name', 'description', 'image', 'image_copyright',\n 'tile_image', 'tile_image_copyright',\n 'is_archived', 'is_public']\n required_for_project_publish = ['name', 'description']\n widgets = {\n 'is_public': RadioSelect(\n choices=[\n (True, _('All users can participate (public).')),\n (False, _('Only invited users can participate (private).'))\n ]\n ),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n instance = kwargs.get('instance', None)\n self._project_was_archived = instance and instance.is_archived\n\n def save(self, commit=True):\n project = super().save(commit)\n if not self._project_was_archived and project.is_archived:\n signals.project_archived.send(sender=None, project=project)\n return project\n\n\nclass ProjectInformationForm(ProjectDashboardForm):\n\n class Meta:\n model = project_models.Project\n fields = ['information']\n required_for_project_publish = ['information']\n\n\nclass ProjectResultForm(ProjectDashboardForm):\n\n class Meta:\n model = project_models.Project\n fields = ['result']\n required_for_project_publish = []\n\n\nclass ModuleBasicForm(ModuleDashboardForm):\n\n class Meta:\n model = module_models.Module\n fields = ['name', 'description']\n required_for_project_publish = '__all__'\n\n\nclass PhaseForm(forms.ModelForm):\n end_date = DateTimeField(\n time_format='%H:%M',\n required=False,\n require_all_fields=False,\n label=(_('End date'), _('End time'))\n )\n start_date = DateTimeField(\n time_format='%H:%M',\n required=False,\n require_all_fields=False,\n label=(_('Start date'), _('Start time'))\n )\n\n class Meta:\n model = phase_models.Phase\n fields = ['name', 'description', 'start_date', 'end_date',\n 'type', # required for get_phase_name in the tpl\n ]\n required_for_project_publish = ['name', 'description', 'start_date',\n 'end_date']\n widgets = {\n 'type': forms.HiddenInput(),\n 'weight': forms.HiddenInput()\n }\n\n\nPhaseFormSet = inlineformset_factory(module_models.Module,\n phase_models.Phase,\n form=PhaseForm,\n formset=ModuleDashboardFormSet,\n extra=0,\n can_delete=False,\n )\n\n\nclass AreaSettingsForm(ModuleDashboardForm):\n\n def __init__(self, *args, **kwargs):\n self.module = kwargs['instance']\n kwargs['instance'] = self.module.settings_instance\n super().__init__(*args, **kwargs)\n\n def save(self, commit=True):\n super().save(commit)\n return self.module\n\n def get_project(self):\n return self.module.project\n\n class Meta:\n model = map_models.AreaSettings\n fields = ['polygon']\n required_for_project_publish = ['polygon']\n # widgets = map_models.AreaSettings.widgets()\n widgets = {'polygon': MapChoosePolygonWithPresetWidget}\n\n\nclass CategoryForm(forms.ModelForm):\n name = forms.CharField(widget=forms.TextInput(attrs={\n 'placeholder': _('Category')}\n ))\n\n @property\n def media(self):\n media = super().media\n media.add_js(['js/formset.js'])\n return media\n\n class Meta:\n model = category_models.Category\n fields = ['name']\n\n\nCategoryFormSet = inlineformset_factory(module_models.Module,\n category_models.Category,\n form=CategoryForm,\n formset=ModuleDashboardFormSet,\n extra=0,\n )\n", "path": "meinberlin/apps/dashboard2/forms.py"}]} | 1,801 | 226 |
gh_patches_debug_26793 | rasdani/github-patches | git_diff | pulp__pulpcore-5196 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Overwriting existing packages in backend storage can lead to caching issues
If an existing package is re-added to pulp, the default behavior will overwrite the existing file in backing storage. This is typically fine.
- If using Azure Blobstore, the timestamp of the blob is updated (Last-Modified time and ETag).
- Conversely, some CDN's (notably Azure Front Door) use Last-Modified Time as a signal that a file in origin has updated.
- This can lead to poor cache behavior, and in some cases, incomplete downloads as the CDN attempts to resolve disparate content.
- If we set `AZURE_OVERWRITE_FILES` to `false` this partially mitigates the issue (Last-Modified/ETag are unmodified). However, this results in duplicate copies written to storage (with a suffix to differentiate from the original).
- We should have an option that does "nothing" if the uploaded file already exists (don't overwrite, and don't write a new copy).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/plugin/viewsets/content.py`
Content:
```
1 from drf_spectacular.utils import extend_schema
2
3 from django.db import DatabaseError
4 from django.db.utils import IntegrityError
5
6 from pulpcore.app import tasks
7 from pulpcore.plugin.serializers import (
8 ArtifactSerializer,
9 AsyncOperationResponseSerializer,
10 )
11 from pulpcore.plugin.models import Artifact, PulpTemporaryFile
12 from pulpcore.plugin.tasking import dispatch
13 from pulpcore.plugin.viewsets import (
14 ContentViewSet,
15 OperationPostponedResponse,
16 )
17
18
19 class DefaultDeferredContextMixin:
20 """A mixin that provides a method for retrieving the default deferred context."""
21
22 def get_deferred_context(self, request):
23 """
24 Supply context for deferred validation.
25
26 When overwriting this method, it must return a dict, that is JSON serializable by
27 and does _not_ contain 'request' as a key.
28 """
29 return {}
30
31
32 class NoArtifactContentViewSet(DefaultDeferredContextMixin, ContentViewSet):
33 """A ViewSet for content creation that does not require a file to be uploaded."""
34
35 @extend_schema(
36 description="Trigger an asynchronous task to create content,"
37 "optionally create new repository version.",
38 responses={202: AsyncOperationResponseSerializer},
39 )
40 def create(self, request):
41 """Create a content unit."""
42 serializer = self.get_serializer(data=request.data)
43 serializer.is_valid(raise_exception=True)
44
45 exclusive_resources = [
46 item for item in (serializer.validated_data.get(key) for key in ("repository",)) if item
47 ]
48
49 task = dispatch(
50 tasks.base.general_create,
51 exclusive_resources=exclusive_resources,
52 args=(self.queryset.model._meta.app_label, serializer.__class__.__name__),
53 kwargs={
54 "data": {k: v for k, v in request.data.items()},
55 "context": self.get_deferred_context(request),
56 },
57 )
58 return OperationPostponedResponse(task, request)
59
60
61 class NoArtifactContentUploadViewSet(DefaultDeferredContextMixin, ContentViewSet):
62 """A ViewSet for uploads that do not require to store an uploaded content as an Artifact."""
63
64 @extend_schema(
65 description="Trigger an asynchronous task to create content,"
66 "optionally create new repository version.",
67 responses={202: AsyncOperationResponseSerializer},
68 )
69 def create(self, request):
70 """Create a content unit."""
71 serializer = self.get_serializer(data=request.data)
72 serializer.is_valid(raise_exception=True)
73
74 task_payload = {k: v for k, v in request.data.items()}
75
76 file_content = task_payload.pop("file", None)
77 temp_file = PulpTemporaryFile.init_and_validate(file_content)
78 temp_file.save()
79
80 exclusive_resources = [
81 item for item in (serializer.validated_data.get(key) for key in ("repository",)) if item
82 ]
83
84 app_label = self.queryset.model._meta.app_label
85 context = self.get_deferred_context(request)
86 context["pulp_temp_file_pk"] = str(temp_file.pk)
87 task = dispatch(
88 tasks.base.general_create,
89 exclusive_resources=exclusive_resources,
90 args=(app_label, serializer.__class__.__name__),
91 kwargs={"data": task_payload, "context": context},
92 )
93 return OperationPostponedResponse(task, request)
94
95
96 class SingleArtifactContentUploadViewSet(DefaultDeferredContextMixin, ContentViewSet):
97 """A ViewSet which can be used to store an uploaded content as an Artifact."""
98
99 @extend_schema(
100 description="Trigger an asynchronous task to create content,"
101 "optionally create new repository version.",
102 responses={202: AsyncOperationResponseSerializer},
103 )
104 def create(self, request):
105 """Create a content unit."""
106 serializer = self.get_serializer(data=request.data)
107 serializer.is_valid(raise_exception=True)
108
109 task_payload = self.init_content_data(serializer, request)
110
111 exclusive_resources = [
112 item
113 for item in (serializer.validated_data.get(key) for key in ("upload", "repository"))
114 if item
115 ]
116
117 app_label = self.queryset.model._meta.app_label
118 task = dispatch(
119 tasks.base.general_create,
120 exclusive_resources=exclusive_resources,
121 args=(app_label, serializer.__class__.__name__),
122 kwargs={
123 "data": task_payload,
124 "context": self.get_deferred_context(request),
125 },
126 )
127 return OperationPostponedResponse(task, request)
128
129 def init_content_data(self, serializer, request):
130 """Initialize the reference to an Artifact along with relevant task's payload data."""
131 task_payload = {k: v for k, v in request.data.items()}
132 if "file" in task_payload:
133 # in the upload code path make sure, the artifact exists, and the 'file'
134 # parameter is replaced by 'artifact'
135 artifact = Artifact.init_and_validate(task_payload.pop("file"))
136 try:
137 artifact.save()
138 except IntegrityError:
139 # if artifact already exists, let's use it
140 try:
141 artifact = Artifact.objects.get(
142 sha256=artifact.sha256, pulp_domain=request.pulp_domain
143 )
144 artifact.touch()
145 except (Artifact.DoesNotExist, DatabaseError):
146 # the artifact has since been removed from when we first attempted to save it
147 artifact.save()
148
149 task_payload["artifact"] = ArtifactSerializer(
150 artifact, context={"request": request}
151 ).data["pulp_href"]
152 elif "artifact" in serializer.validated_data:
153 serializer.validated_data["artifact"].touch()
154 # In case of a provided upload object, there is no artifact to touch yet.
155
156 return task_payload
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/plugin/viewsets/content.py b/pulpcore/plugin/viewsets/content.py
--- a/pulpcore/plugin/viewsets/content.py
+++ b/pulpcore/plugin/viewsets/content.py
@@ -133,18 +133,20 @@
# in the upload code path make sure, the artifact exists, and the 'file'
# parameter is replaced by 'artifact'
artifact = Artifact.init_and_validate(task_payload.pop("file"))
+ # if artifact already exists, let's use it
try:
- artifact.save()
- except IntegrityError:
- # if artifact already exists, let's use it
+ artifact = Artifact.objects.get(
+ sha256=artifact.sha256, pulp_domain=request.pulp_domain
+ )
+ artifact.touch()
+ except (Artifact.DoesNotExist, DatabaseError):
try:
+ artifact.save()
+ except IntegrityError:
artifact = Artifact.objects.get(
sha256=artifact.sha256, pulp_domain=request.pulp_domain
)
artifact.touch()
- except (Artifact.DoesNotExist, DatabaseError):
- # the artifact has since been removed from when we first attempted to save it
- artifact.save()
task_payload["artifact"] = ArtifactSerializer(
artifact, context={"request": request}
| {"golden_diff": "diff --git a/pulpcore/plugin/viewsets/content.py b/pulpcore/plugin/viewsets/content.py\n--- a/pulpcore/plugin/viewsets/content.py\n+++ b/pulpcore/plugin/viewsets/content.py\n@@ -133,18 +133,20 @@\n # in the upload code path make sure, the artifact exists, and the 'file'\n # parameter is replaced by 'artifact'\n artifact = Artifact.init_and_validate(task_payload.pop(\"file\"))\n+ # if artifact already exists, let's use it\n try:\n- artifact.save()\n- except IntegrityError:\n- # if artifact already exists, let's use it\n+ artifact = Artifact.objects.get(\n+ sha256=artifact.sha256, pulp_domain=request.pulp_domain\n+ )\n+ artifact.touch()\n+ except (Artifact.DoesNotExist, DatabaseError):\n try:\n+ artifact.save()\n+ except IntegrityError:\n artifact = Artifact.objects.get(\n sha256=artifact.sha256, pulp_domain=request.pulp_domain\n )\n artifact.touch()\n- except (Artifact.DoesNotExist, DatabaseError):\n- # the artifact has since been removed from when we first attempted to save it\n- artifact.save()\n \n task_payload[\"artifact\"] = ArtifactSerializer(\n artifact, context={\"request\": request}\n", "issue": "Overwriting existing packages in backend storage can lead to caching issues\nIf an existing package is re-added to pulp, the default behavior will overwrite the existing file in backing storage. This is typically fine.\r\n- If using Azure Blobstore, the timestamp of the blob is updated (Last-Modified time and ETag).\r\n- Conversely, some CDN's (notably Azure Front Door) use Last-Modified Time as a signal that a file in origin has updated.\r\n- This can lead to poor cache behavior, and in some cases, incomplete downloads as the CDN attempts to resolve disparate content.\r\n- If we set `AZURE_OVERWRITE_FILES` to `false` this partially mitigates the issue (Last-Modified/ETag are unmodified). However, this results in duplicate copies written to storage (with a suffix to differentiate from the original).\r\n- We should have an option that does \"nothing\" if the uploaded file already exists (don't overwrite, and don't write a new copy).\n", "before_files": [{"content": "from drf_spectacular.utils import extend_schema\n\nfrom django.db import DatabaseError\nfrom django.db.utils import IntegrityError\n\nfrom pulpcore.app import tasks\nfrom pulpcore.plugin.serializers import (\n ArtifactSerializer,\n AsyncOperationResponseSerializer,\n)\nfrom pulpcore.plugin.models import Artifact, PulpTemporaryFile\nfrom pulpcore.plugin.tasking import dispatch\nfrom pulpcore.plugin.viewsets import (\n ContentViewSet,\n OperationPostponedResponse,\n)\n\n\nclass DefaultDeferredContextMixin:\n \"\"\"A mixin that provides a method for retrieving the default deferred context.\"\"\"\n\n def get_deferred_context(self, request):\n \"\"\"\n Supply context for deferred validation.\n\n When overwriting this method, it must return a dict, that is JSON serializable by\n and does _not_ contain 'request' as a key.\n \"\"\"\n return {}\n\n\nclass NoArtifactContentViewSet(DefaultDeferredContextMixin, ContentViewSet):\n \"\"\"A ViewSet for content creation that does not require a file to be uploaded.\"\"\"\n\n @extend_schema(\n description=\"Trigger an asynchronous task to create content,\"\n \"optionally create new repository version.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def create(self, request):\n \"\"\"Create a content unit.\"\"\"\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n exclusive_resources = [\n item for item in (serializer.validated_data.get(key) for key in (\"repository\",)) if item\n ]\n\n task = dispatch(\n tasks.base.general_create,\n exclusive_resources=exclusive_resources,\n args=(self.queryset.model._meta.app_label, serializer.__class__.__name__),\n kwargs={\n \"data\": {k: v for k, v in request.data.items()},\n \"context\": self.get_deferred_context(request),\n },\n )\n return OperationPostponedResponse(task, request)\n\n\nclass NoArtifactContentUploadViewSet(DefaultDeferredContextMixin, ContentViewSet):\n \"\"\"A ViewSet for uploads that do not require to store an uploaded content as an Artifact.\"\"\"\n\n @extend_schema(\n description=\"Trigger an asynchronous task to create content,\"\n \"optionally create new repository version.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def create(self, request):\n \"\"\"Create a content unit.\"\"\"\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n task_payload = {k: v for k, v in request.data.items()}\n\n file_content = task_payload.pop(\"file\", None)\n temp_file = PulpTemporaryFile.init_and_validate(file_content)\n temp_file.save()\n\n exclusive_resources = [\n item for item in (serializer.validated_data.get(key) for key in (\"repository\",)) if item\n ]\n\n app_label = self.queryset.model._meta.app_label\n context = self.get_deferred_context(request)\n context[\"pulp_temp_file_pk\"] = str(temp_file.pk)\n task = dispatch(\n tasks.base.general_create,\n exclusive_resources=exclusive_resources,\n args=(app_label, serializer.__class__.__name__),\n kwargs={\"data\": task_payload, \"context\": context},\n )\n return OperationPostponedResponse(task, request)\n\n\nclass SingleArtifactContentUploadViewSet(DefaultDeferredContextMixin, ContentViewSet):\n \"\"\"A ViewSet which can be used to store an uploaded content as an Artifact.\"\"\"\n\n @extend_schema(\n description=\"Trigger an asynchronous task to create content,\"\n \"optionally create new repository version.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def create(self, request):\n \"\"\"Create a content unit.\"\"\"\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n task_payload = self.init_content_data(serializer, request)\n\n exclusive_resources = [\n item\n for item in (serializer.validated_data.get(key) for key in (\"upload\", \"repository\"))\n if item\n ]\n\n app_label = self.queryset.model._meta.app_label\n task = dispatch(\n tasks.base.general_create,\n exclusive_resources=exclusive_resources,\n args=(app_label, serializer.__class__.__name__),\n kwargs={\n \"data\": task_payload,\n \"context\": self.get_deferred_context(request),\n },\n )\n return OperationPostponedResponse(task, request)\n\n def init_content_data(self, serializer, request):\n \"\"\"Initialize the reference to an Artifact along with relevant task's payload data.\"\"\"\n task_payload = {k: v for k, v in request.data.items()}\n if \"file\" in task_payload:\n # in the upload code path make sure, the artifact exists, and the 'file'\n # parameter is replaced by 'artifact'\n artifact = Artifact.init_and_validate(task_payload.pop(\"file\"))\n try:\n artifact.save()\n except IntegrityError:\n # if artifact already exists, let's use it\n try:\n artifact = Artifact.objects.get(\n sha256=artifact.sha256, pulp_domain=request.pulp_domain\n )\n artifact.touch()\n except (Artifact.DoesNotExist, DatabaseError):\n # the artifact has since been removed from when we first attempted to save it\n artifact.save()\n\n task_payload[\"artifact\"] = ArtifactSerializer(\n artifact, context={\"request\": request}\n ).data[\"pulp_href\"]\n elif \"artifact\" in serializer.validated_data:\n serializer.validated_data[\"artifact\"].touch()\n # In case of a provided upload object, there is no artifact to touch yet.\n\n return task_payload\n", "path": "pulpcore/plugin/viewsets/content.py"}], "after_files": [{"content": "from drf_spectacular.utils import extend_schema\n\nfrom django.db import DatabaseError\nfrom django.db.utils import IntegrityError\n\nfrom pulpcore.app import tasks\nfrom pulpcore.plugin.serializers import (\n ArtifactSerializer,\n AsyncOperationResponseSerializer,\n)\nfrom pulpcore.plugin.models import Artifact, PulpTemporaryFile\nfrom pulpcore.plugin.tasking import dispatch\nfrom pulpcore.plugin.viewsets import (\n ContentViewSet,\n OperationPostponedResponse,\n)\n\n\nclass DefaultDeferredContextMixin:\n \"\"\"A mixin that provides a method for retrieving the default deferred context.\"\"\"\n\n def get_deferred_context(self, request):\n \"\"\"\n Supply context for deferred validation.\n\n When overwriting this method, it must return a dict, that is JSON serializable by\n and does _not_ contain 'request' as a key.\n \"\"\"\n return {}\n\n\nclass NoArtifactContentViewSet(DefaultDeferredContextMixin, ContentViewSet):\n \"\"\"A ViewSet for content creation that does not require a file to be uploaded.\"\"\"\n\n @extend_schema(\n description=\"Trigger an asynchronous task to create content,\"\n \"optionally create new repository version.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def create(self, request):\n \"\"\"Create a content unit.\"\"\"\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n exclusive_resources = [\n item for item in (serializer.validated_data.get(key) for key in (\"repository\",)) if item\n ]\n\n task = dispatch(\n tasks.base.general_create,\n exclusive_resources=exclusive_resources,\n args=(self.queryset.model._meta.app_label, serializer.__class__.__name__),\n kwargs={\n \"data\": {k: v for k, v in request.data.items()},\n \"context\": self.get_deferred_context(request),\n },\n )\n return OperationPostponedResponse(task, request)\n\n\nclass NoArtifactContentUploadViewSet(DefaultDeferredContextMixin, ContentViewSet):\n \"\"\"A ViewSet for uploads that do not require to store an uploaded content as an Artifact.\"\"\"\n\n @extend_schema(\n description=\"Trigger an asynchronous task to create content,\"\n \"optionally create new repository version.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def create(self, request):\n \"\"\"Create a content unit.\"\"\"\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n task_payload = {k: v for k, v in request.data.items()}\n\n file_content = task_payload.pop(\"file\", None)\n temp_file = PulpTemporaryFile.init_and_validate(file_content)\n temp_file.save()\n\n exclusive_resources = [\n item for item in (serializer.validated_data.get(key) for key in (\"repository\",)) if item\n ]\n\n app_label = self.queryset.model._meta.app_label\n context = self.get_deferred_context(request)\n context[\"pulp_temp_file_pk\"] = str(temp_file.pk)\n task = dispatch(\n tasks.base.general_create,\n exclusive_resources=exclusive_resources,\n args=(app_label, serializer.__class__.__name__),\n kwargs={\"data\": task_payload, \"context\": context},\n )\n return OperationPostponedResponse(task, request)\n\n\nclass SingleArtifactContentUploadViewSet(DefaultDeferredContextMixin, ContentViewSet):\n \"\"\"A ViewSet which can be used to store an uploaded content as an Artifact.\"\"\"\n\n @extend_schema(\n description=\"Trigger an asynchronous task to create content,\"\n \"optionally create new repository version.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def create(self, request):\n \"\"\"Create a content unit.\"\"\"\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n task_payload = self.init_content_data(serializer, request)\n\n exclusive_resources = [\n item\n for item in (serializer.validated_data.get(key) for key in (\"upload\", \"repository\"))\n if item\n ]\n\n app_label = self.queryset.model._meta.app_label\n task = dispatch(\n tasks.base.general_create,\n exclusive_resources=exclusive_resources,\n args=(app_label, serializer.__class__.__name__),\n kwargs={\n \"data\": task_payload,\n \"context\": self.get_deferred_context(request),\n },\n )\n return OperationPostponedResponse(task, request)\n\n def init_content_data(self, serializer, request):\n \"\"\"Initialize the reference to an Artifact along with relevant task's payload data.\"\"\"\n task_payload = {k: v for k, v in request.data.items()}\n if \"file\" in task_payload:\n # in the upload code path make sure, the artifact exists, and the 'file'\n # parameter is replaced by 'artifact'\n artifact = Artifact.init_and_validate(task_payload.pop(\"file\"))\n # if artifact already exists, let's use it\n try:\n artifact = Artifact.objects.get(\n sha256=artifact.sha256, pulp_domain=request.pulp_domain\n )\n artifact.touch()\n except (Artifact.DoesNotExist, DatabaseError):\n try:\n artifact.save()\n except IntegrityError:\n artifact = Artifact.objects.get(\n sha256=artifact.sha256, pulp_domain=request.pulp_domain\n )\n artifact.touch()\n\n task_payload[\"artifact\"] = ArtifactSerializer(\n artifact, context={\"request\": request}\n ).data[\"pulp_href\"]\n elif \"artifact\" in serializer.validated_data:\n serializer.validated_data[\"artifact\"].touch()\n # In case of a provided upload object, there is no artifact to touch yet.\n\n return task_payload\n", "path": "pulpcore/plugin/viewsets/content.py"}]} | 2,008 | 286 |
gh_patches_debug_33710 | rasdani/github-patches | git_diff | pypa__pipenv-3156 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Windows update breaks pipenv virtual environments creation
### Issue description
`> pipenv install` and `> pipenv --python 3` fails in a desktop folder after windows update.
### Expected result
Creation of a virtual environment and installing all dependencies declared in Pipfile.lock
### Actual result
```
> pipenv install
Traceback (most recent call last):
File "c:\users\ale\appdata\local\programs\python\python36\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\users\ale\appdata\local\programs\python\python36\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Users\Ale\AppData\Local\Programs\Python\Python36\Scripts\pipenv.exe\__main__.py", line 9, in <module>
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\click\core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\click\core.py", line 717, in main
rv = self.invoke(ctx)
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\click\core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\click\core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\click\core.py", line 555, in invoke
return callback(*args, **kwargs)
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\click\decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\click\core.py", line 555, in invoke
return callback(*args, **kwargs)
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\click\decorators.py", line 17, in new_func
return f(get_current_context(), *args, **kwargs)
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\cli\command.py", line 249, in install
editable_packages=state.installstate.editables,
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\core.py", line 1695, in do_install
pypi_mirror=pypi_mirror,
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\core.py", line 561, in ensure_project
pypi_mirror=pypi_mirror,
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\core.py", line 484, in ensure_virtualenv
python = ensure_python(three=three, python=python)
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\core.py", line 397, in ensure_python
path_to_python = find_a_system_python(python)
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\core.py", line 360, in find_a_system_python
python_entry = finder.find_python_version(line)
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\pythonfinder\pythonfinder.py", line 81, in find_python_version
match = self.windows_finder.find_python_version(
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\pythonfinder\pythonfinder.py", line 49, in windows_finder
self._windows_finder = WindowsFinder()
File "<attrs generated init 87ada516ba10af7bd87735a5460aaaf86a1d070f>", line 13, in __init__
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\pythonfinder\models\windows.py", line 66, in get_versions
path = ensure_path(install_path.__getattr__(""))
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\pythonfinder\_vendor\pep514tools\_registry.py", line 75, in __getattr__
raise AttributeError(attr)
AttributeError
```
```
> pipenv --python 3
*
* (Same as pipenv install)
AttributeError
```
### Steps to replicate
I was working on a project after updating windows and I wanted to test my configuration so I did
`> pipenv --rm` to delete my current environment. When I tried to use `> pipenv install --dev` I got that error.
I have already tried reinstalling pipenv and python and deleting all my site-packages.
It once let me create an environment in `c:\\users\\ale`, but not in other directories. I also tried `> pipenv -support`, but it failed at getting the python installations with the same error as always.
I again tested removing the environment and performing a `> pipenv install` but this time it failed.
After some testing I removed the Pipfile and Pipfile.lock in `c:\\users\\ale` and ` >pipenv install` worked again but only in that directory as creating an empty folder in the desktop or other directories and executing pipenv install always fails with the same traceback.
-------------------------------------------------------------------------------
Windows 10 Home: `'1803 (OS build 17134.345)'`
Pip version: `'18.1'`
Pipenv version: `'2018.10.9'`
Pipenv location: `'c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv'`
Python location: `'c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\python.exe'`
Python installations found:
```
Traceback (most recent call last):
*
* (Same error as above)
*
File "c:\users\ale\appdata\local\programs\python\python36\lib\site-packages\pipenv\vendor\pythonfinder\_vendor\pep514tools\_registry.py", line 75, in __getattr__
raise AttributeError(attr)
AttributeError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pipenv/vendor/pythonfinder/__init__.py`
Content:
```
1 from __future__ import print_function, absolute_import
2
3 __version__ = '1.1.6'
4
5 # Add NullHandler to "pythonfinder" logger, because Python2's default root
6 # logger has no handler and warnings like this would be reported:
7 #
8 # > No handlers could be found for logger "pythonfinder.models.pyenv"
9 import logging
10 logger = logging.getLogger(__name__)
11 logger.addHandler(logging.NullHandler())
12
13 __all__ = ["Finder", "WindowsFinder", "SystemPath", "InvalidPythonVersion"]
14 from .pythonfinder import Finder
15 from .models import SystemPath, WindowsFinder
16 from .exceptions import InvalidPythonVersion
17
```
Path: `pipenv/vendor/pythonfinder/cli.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding=utf-8 -*-
3 from __future__ import print_function, absolute_import
4 import click
5 import crayons
6 import sys
7 from . import __version__
8 from .pythonfinder import Finder
9
10
11 @click.command()
12 @click.option("--find", default=False, nargs=1, help="Find a specific python version.")
13 @click.option("--which", default=False, nargs=1, help="Run the which command.")
14 @click.option(
15 "--findall", is_flag=True, default=False, help="Find all python versions."
16 )
17 @click.option(
18 "--version", is_flag=True, default=False, help="Display PythonFinder version."
19 )
20 @click.option("--ignore-unsupported/--no-unsupported", is_flag=True, default=True, envvar="PYTHONFINDER_IGNORE_UNSUPPORTED", help="Ignore unsupported python versions.")
21 @click.version_option(prog_name='pyfinder', version=__version__)
22 @click.pass_context
23 def cli(ctx, find=False, which=False, findall=False, version=False, ignore_unsupported=True):
24 if version:
25 click.echo(
26 "{0} version {1}".format(
27 crayons.white("PythonFinder", bold=True), crayons.yellow(__version__)
28 )
29 )
30 sys.exit(0)
31 finder = Finder(ignore_unsupported=ignore_unsupported)
32 if findall:
33 versions = [v for v in finder.find_all_python_versions()]
34 if versions:
35 click.secho("Found python at the following locations:", fg="green")
36 for v in versions:
37 py = v.py_version
38 comes_from = getattr(py, "comes_from", None)
39 if comes_from is not None:
40 comes_from_path = getattr(comes_from, "path", v.path)
41 else:
42 comes_from_path = v.path
43 click.secho(
44 "{py.name!s}: {py.version!s} ({py.architecture!s}) @ {comes_from!s}".format(
45 py=py, comes_from=comes_from_path
46 ),
47 fg="yellow",
48 )
49 sys.exit(0)
50 else:
51 click.secho(
52 "ERROR: No valid python versions found! Check your path and try again.",
53 fg="red",
54 )
55 if find:
56 click.secho("Searching for python: {0!s}".format(find.strip()), fg="yellow")
57 found = finder.find_python_version(find.strip())
58 if found:
59 click.secho("Found python at the following locations:", fg="green")
60 sys.exit(0)
61 else:
62 click.secho("Failed to find matching executable...", fg="yellow")
63 sys.exit(1)
64 elif which:
65 found = finder.system_path.which(which.strip())
66 if found:
67 click.secho("Found Executable: {0}".format(found), fg="white")
68 sys.exit(0)
69 else:
70 click.secho("Failed to find matching executable...", fg="yellow")
71 sys.exit(1)
72 else:
73 click.echo("Please provide a command", color="red")
74 sys.exit(1)
75 sys.exit()
76
77
78 if __name__ == "__main__":
79 cli()
80
```
Path: `pipenv/vendor/pythonfinder/models/windows.py`
Content:
```
1 # -*- coding=utf-8 -*-
2 from __future__ import absolute_import, print_function
3
4 import operator
5
6 from collections import defaultdict
7
8 import attr
9
10 from ..exceptions import InvalidPythonVersion
11 from ..utils import ensure_path
12 from .mixins import BaseFinder
13 from .path import PathEntry
14 from .python import PythonVersion, VersionMap
15
16
17 @attr.s
18 class WindowsFinder(BaseFinder):
19 paths = attr.ib(default=attr.Factory(list))
20 version_list = attr.ib(default=attr.Factory(list))
21 versions = attr.ib()
22 pythons = attr.ib()
23
24 def find_all_python_versions(
25 self,
26 major=None,
27 minor=None,
28 patch=None,
29 pre=None,
30 dev=None,
31 arch=None,
32 name=None,
33 ):
34 version_matcher = operator.methodcaller(
35 "matches",
36 major=major,
37 minor=minor,
38 patch=patch,
39 pre=pre,
40 dev=dev,
41 arch=arch,
42 name=name,
43 )
44 py_filter = filter(
45 None, filter(lambda c: version_matcher(c), self.version_list)
46 )
47 version_sort = operator.attrgetter("version_sort")
48 return [c.comes_from for c in sorted(py_filter, key=version_sort, reverse=True)]
49
50 def find_python_version(
51 self,
52 major=None,
53 minor=None,
54 patch=None,
55 pre=None,
56 dev=None,
57 arch=None,
58 name=None,
59 ):
60 return next(
61 (
62 v
63 for v in self.find_all_python_versions(
64 major=major,
65 minor=minor,
66 patch=patch,
67 pre=pre,
68 dev=dev,
69 arch=arch,
70 name=None,
71 )
72 ),
73 None,
74 )
75
76 @versions.default
77 def get_versions(self):
78 versions = defaultdict(PathEntry)
79 from pythonfinder._vendor.pep514tools import environment as pep514env
80
81 env_versions = pep514env.findall()
82 path = None
83 for version_object in env_versions:
84 install_path = getattr(version_object.info, "install_path", None)
85 if install_path is None:
86 continue
87 path = ensure_path(install_path.__getattr__(""))
88 try:
89 py_version = PythonVersion.from_windows_launcher(version_object)
90 except InvalidPythonVersion:
91 continue
92 self.version_list.append(py_version)
93 base_dir = PathEntry.create(
94 path,
95 is_root=True,
96 only_python=True,
97 pythons={py_version.comes_from.path: py_version},
98 )
99 versions[py_version.version_tuple[:5]] = base_dir
100 self.paths.append(base_dir)
101 return versions
102
103 @pythons.default
104 def get_pythons(self):
105 pythons = defaultdict()
106 for version in self.version_list:
107 _path = ensure_path(version.comes_from.path)
108 pythons[_path.as_posix()] = version.comes_from
109 return pythons
110
111 @classmethod
112 def create(cls):
113 return cls()
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pipenv/vendor/pythonfinder/__init__.py b/pipenv/vendor/pythonfinder/__init__.py
--- a/pipenv/vendor/pythonfinder/__init__.py
+++ b/pipenv/vendor/pythonfinder/__init__.py
@@ -1,6 +1,6 @@
from __future__ import print_function, absolute_import
-__version__ = '1.1.6'
+__version__ = '1.1.7.dev0'
# Add NullHandler to "pythonfinder" logger, because Python2's default root
# logger has no handler and warnings like this would be reported:
diff --git a/pipenv/vendor/pythonfinder/cli.py b/pipenv/vendor/pythonfinder/cli.py
--- a/pipenv/vendor/pythonfinder/cli.py
+++ b/pipenv/vendor/pythonfinder/cli.py
@@ -56,7 +56,20 @@
click.secho("Searching for python: {0!s}".format(find.strip()), fg="yellow")
found = finder.find_python_version(find.strip())
if found:
+ py = found.py_version
+ comes_from = getattr(py, "comes_from", None)
+ if comes_from is not None:
+ comes_from_path = getattr(comes_from, "path", found.path)
+ else:
+ comes_from_path = found.path
+ arch = getattr(py, "architecture", None)
click.secho("Found python at the following locations:", fg="green")
+ click.secho(
+ "{py.name!s}: {py.version!s} ({py.architecture!s}) @ {comes_from!s}".format(
+ py=py, comes_from=comes_from_path
+ ),
+ fg="yellow",
+ )
sys.exit(0)
else:
click.secho("Failed to find matching executable...", fg="yellow")
diff --git a/pipenv/vendor/pythonfinder/models/windows.py b/pipenv/vendor/pythonfinder/models/windows.py
--- a/pipenv/vendor/pythonfinder/models/windows.py
+++ b/pipenv/vendor/pythonfinder/models/windows.py
@@ -84,7 +84,10 @@
install_path = getattr(version_object.info, "install_path", None)
if install_path is None:
continue
- path = ensure_path(install_path.__getattr__(""))
+ try:
+ path = ensure_path(install_path.__getattr__(""))
+ except AttributeError:
+ continue
try:
py_version = PythonVersion.from_windows_launcher(version_object)
except InvalidPythonVersion:
| {"golden_diff": "diff --git a/pipenv/vendor/pythonfinder/__init__.py b/pipenv/vendor/pythonfinder/__init__.py\n--- a/pipenv/vendor/pythonfinder/__init__.py\n+++ b/pipenv/vendor/pythonfinder/__init__.py\n@@ -1,6 +1,6 @@\n from __future__ import print_function, absolute_import\n \n-__version__ = '1.1.6'\n+__version__ = '1.1.7.dev0'\n \n # Add NullHandler to \"pythonfinder\" logger, because Python2's default root\n # logger has no handler and warnings like this would be reported:\ndiff --git a/pipenv/vendor/pythonfinder/cli.py b/pipenv/vendor/pythonfinder/cli.py\n--- a/pipenv/vendor/pythonfinder/cli.py\n+++ b/pipenv/vendor/pythonfinder/cli.py\n@@ -56,7 +56,20 @@\n click.secho(\"Searching for python: {0!s}\".format(find.strip()), fg=\"yellow\")\n found = finder.find_python_version(find.strip())\n if found:\n+ py = found.py_version\n+ comes_from = getattr(py, \"comes_from\", None)\n+ if comes_from is not None:\n+ comes_from_path = getattr(comes_from, \"path\", found.path)\n+ else:\n+ comes_from_path = found.path\n+ arch = getattr(py, \"architecture\", None)\n click.secho(\"Found python at the following locations:\", fg=\"green\")\n+ click.secho(\n+ \"{py.name!s}: {py.version!s} ({py.architecture!s}) @ {comes_from!s}\".format(\n+ py=py, comes_from=comes_from_path\n+ ),\n+ fg=\"yellow\",\n+ )\n sys.exit(0)\n else:\n click.secho(\"Failed to find matching executable...\", fg=\"yellow\")\ndiff --git a/pipenv/vendor/pythonfinder/models/windows.py b/pipenv/vendor/pythonfinder/models/windows.py\n--- a/pipenv/vendor/pythonfinder/models/windows.py\n+++ b/pipenv/vendor/pythonfinder/models/windows.py\n@@ -84,7 +84,10 @@\n install_path = getattr(version_object.info, \"install_path\", None)\n if install_path is None:\n continue\n- path = ensure_path(install_path.__getattr__(\"\"))\n+ try:\n+ path = ensure_path(install_path.__getattr__(\"\"))\n+ except AttributeError:\n+ continue\n try:\n py_version = PythonVersion.from_windows_launcher(version_object)\n except InvalidPythonVersion:\n", "issue": "Windows update breaks pipenv virtual environments creation\n### Issue description\r\n\r\n`> pipenv install` and `> pipenv --python 3` fails in a desktop folder after windows update.\r\n\r\n\r\n### Expected result\r\nCreation of a virtual environment and installing all dependencies declared in Pipfile.lock\r\n### Actual result\r\n```\r\n> pipenv install\r\nTraceback (most recent call last):\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\Ale\\AppData\\Local\\Programs\\Python\\Python36\\Scripts\\pipenv.exe\\__main__.py\", line 9, in <module>\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\click\\core.py\", line 764, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\click\\core.py\", line 717, in main\r\n rv = self.invoke(ctx)\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\click\\core.py\", line 1137, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\click\\core.py\", line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\click\\core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\click\\decorators.py\", line 64, in new_func\r\n return ctx.invoke(f, obj, *args, **kwargs)\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\click\\core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\click\\decorators.py\", line 17, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\cli\\command.py\", line 249, in install\r\n editable_packages=state.installstate.editables,\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\core.py\", line 1695, in do_install\r\n pypi_mirror=pypi_mirror,\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\core.py\", line 561, in ensure_project\r\n pypi_mirror=pypi_mirror,\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\core.py\", line 484, in ensure_virtualenv\r\n python = ensure_python(three=three, python=python)\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\core.py\", line 397, in ensure_python\r\n path_to_python = find_a_system_python(python)\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\core.py\", line 360, in find_a_system_python\r\n python_entry = finder.find_python_version(line)\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\pythonfinder\\pythonfinder.py\", line 81, in find_python_version\r\n match = self.windows_finder.find_python_version(\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\pythonfinder\\pythonfinder.py\", line 49, in windows_finder\r\n self._windows_finder = WindowsFinder()\r\n File \"<attrs generated init 87ada516ba10af7bd87735a5460aaaf86a1d070f>\", line 13, in __init__\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\pythonfinder\\models\\windows.py\", line 66, in get_versions\r\n path = ensure_path(install_path.__getattr__(\"\"))\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\pythonfinder\\_vendor\\pep514tools\\_registry.py\", line 75, in __getattr__\r\n raise AttributeError(attr)\r\nAttributeError\r\n```\r\n\r\n```\r\n> pipenv --python 3\r\n*\r\n* (Same as pipenv install)\r\n AttributeError\r\n```\r\n### Steps to replicate\r\n\r\nI was working on a project after updating windows and I wanted to test my configuration so I did\r\n`> pipenv --rm` to delete my current environment. When I tried to use `> pipenv install --dev` I got that error.\r\nI have already tried reinstalling pipenv and python and deleting all my site-packages. \r\n\r\nIt once let me create an environment in `c:\\\\users\\\\ale`, but not in other directories. I also tried `> pipenv -support`, but it failed at getting the python installations with the same error as always.\r\nI again tested removing the environment and performing a `> pipenv install` but this time it failed.\r\n\r\nAfter some testing I removed the Pipfile and Pipfile.lock in `c:\\\\users\\\\ale` and ` >pipenv install` worked again but only in that directory as creating an empty folder in the desktop or other directories and executing pipenv install always fails with the same traceback.\r\n\r\n-------------------------------------------------------------------------------\r\n\r\nWindows 10 Home: `'1803 (OS build 17134.345)'`\r\n\r\nPip version: `'18.1'`\r\n\r\nPipenv version: `'2018.10.9'`\r\n\r\nPipenv location: `'c:\\\\users\\\\ale\\\\appdata\\\\local\\\\programs\\\\python\\\\python36\\\\lib\\\\site-packages\\\\pipenv'`\r\n\r\nPython location: `'c:\\\\users\\\\ale\\\\appdata\\\\local\\\\programs\\\\python\\\\python36\\\\python.exe'`\r\n\r\nPython installations found:\r\n\r\n```\r\nTraceback (most recent call last):\r\n *\r\n * (Same error as above)\r\n *\r\n File \"c:\\users\\ale\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\pipenv\\vendor\\pythonfinder\\_vendor\\pep514tools\\_registry.py\", line 75, in __getattr__\r\n raise AttributeError(attr)\r\nAttributeError\r\n```\n", "before_files": [{"content": "from __future__ import print_function, absolute_import\n\n__version__ = '1.1.6'\n\n# Add NullHandler to \"pythonfinder\" logger, because Python2's default root\n# logger has no handler and warnings like this would be reported:\n#\n# > No handlers could be found for logger \"pythonfinder.models.pyenv\"\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n__all__ = [\"Finder\", \"WindowsFinder\", \"SystemPath\", \"InvalidPythonVersion\"]\nfrom .pythonfinder import Finder\nfrom .models import SystemPath, WindowsFinder\nfrom .exceptions import InvalidPythonVersion\n", "path": "pipenv/vendor/pythonfinder/__init__.py"}, {"content": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\nfrom __future__ import print_function, absolute_import\nimport click\nimport crayons\nimport sys\nfrom . import __version__\nfrom .pythonfinder import Finder\n\n\[email protected]()\[email protected](\"--find\", default=False, nargs=1, help=\"Find a specific python version.\")\[email protected](\"--which\", default=False, nargs=1, help=\"Run the which command.\")\[email protected](\n \"--findall\", is_flag=True, default=False, help=\"Find all python versions.\"\n)\[email protected](\n \"--version\", is_flag=True, default=False, help=\"Display PythonFinder version.\"\n)\[email protected](\"--ignore-unsupported/--no-unsupported\", is_flag=True, default=True, envvar=\"PYTHONFINDER_IGNORE_UNSUPPORTED\", help=\"Ignore unsupported python versions.\")\[email protected]_option(prog_name='pyfinder', version=__version__)\[email protected]_context\ndef cli(ctx, find=False, which=False, findall=False, version=False, ignore_unsupported=True):\n if version:\n click.echo(\n \"{0} version {1}\".format(\n crayons.white(\"PythonFinder\", bold=True), crayons.yellow(__version__)\n )\n )\n sys.exit(0)\n finder = Finder(ignore_unsupported=ignore_unsupported)\n if findall:\n versions = [v for v in finder.find_all_python_versions()]\n if versions:\n click.secho(\"Found python at the following locations:\", fg=\"green\")\n for v in versions:\n py = v.py_version\n comes_from = getattr(py, \"comes_from\", None)\n if comes_from is not None:\n comes_from_path = getattr(comes_from, \"path\", v.path)\n else:\n comes_from_path = v.path\n click.secho(\n \"{py.name!s}: {py.version!s} ({py.architecture!s}) @ {comes_from!s}\".format(\n py=py, comes_from=comes_from_path\n ),\n fg=\"yellow\",\n )\n sys.exit(0)\n else:\n click.secho(\n \"ERROR: No valid python versions found! Check your path and try again.\",\n fg=\"red\",\n )\n if find:\n click.secho(\"Searching for python: {0!s}\".format(find.strip()), fg=\"yellow\")\n found = finder.find_python_version(find.strip())\n if found:\n click.secho(\"Found python at the following locations:\", fg=\"green\")\n sys.exit(0)\n else:\n click.secho(\"Failed to find matching executable...\", fg=\"yellow\")\n sys.exit(1)\n elif which:\n found = finder.system_path.which(which.strip())\n if found:\n click.secho(\"Found Executable: {0}\".format(found), fg=\"white\")\n sys.exit(0)\n else:\n click.secho(\"Failed to find matching executable...\", fg=\"yellow\")\n sys.exit(1)\n else:\n click.echo(\"Please provide a command\", color=\"red\")\n sys.exit(1)\n sys.exit()\n\n\nif __name__ == \"__main__\":\n cli()\n", "path": "pipenv/vendor/pythonfinder/cli.py"}, {"content": "# -*- coding=utf-8 -*-\nfrom __future__ import absolute_import, print_function\n\nimport operator\n\nfrom collections import defaultdict\n\nimport attr\n\nfrom ..exceptions import InvalidPythonVersion\nfrom ..utils import ensure_path\nfrom .mixins import BaseFinder\nfrom .path import PathEntry\nfrom .python import PythonVersion, VersionMap\n\n\[email protected]\nclass WindowsFinder(BaseFinder):\n paths = attr.ib(default=attr.Factory(list))\n version_list = attr.ib(default=attr.Factory(list))\n versions = attr.ib()\n pythons = attr.ib()\n\n def find_all_python_versions(\n self,\n major=None,\n minor=None,\n patch=None,\n pre=None,\n dev=None,\n arch=None,\n name=None,\n ):\n version_matcher = operator.methodcaller(\n \"matches\",\n major=major,\n minor=minor,\n patch=patch,\n pre=pre,\n dev=dev,\n arch=arch,\n name=name,\n )\n py_filter = filter(\n None, filter(lambda c: version_matcher(c), self.version_list)\n )\n version_sort = operator.attrgetter(\"version_sort\")\n return [c.comes_from for c in sorted(py_filter, key=version_sort, reverse=True)]\n\n def find_python_version(\n self,\n major=None,\n minor=None,\n patch=None,\n pre=None,\n dev=None,\n arch=None,\n name=None,\n ):\n return next(\n (\n v\n for v in self.find_all_python_versions(\n major=major,\n minor=minor,\n patch=patch,\n pre=pre,\n dev=dev,\n arch=arch,\n name=None,\n )\n ),\n None,\n )\n\n @versions.default\n def get_versions(self):\n versions = defaultdict(PathEntry)\n from pythonfinder._vendor.pep514tools import environment as pep514env\n\n env_versions = pep514env.findall()\n path = None\n for version_object in env_versions:\n install_path = getattr(version_object.info, \"install_path\", None)\n if install_path is None:\n continue\n path = ensure_path(install_path.__getattr__(\"\"))\n try:\n py_version = PythonVersion.from_windows_launcher(version_object)\n except InvalidPythonVersion:\n continue\n self.version_list.append(py_version)\n base_dir = PathEntry.create(\n path,\n is_root=True,\n only_python=True,\n pythons={py_version.comes_from.path: py_version},\n )\n versions[py_version.version_tuple[:5]] = base_dir\n self.paths.append(base_dir)\n return versions\n\n @pythons.default\n def get_pythons(self):\n pythons = defaultdict()\n for version in self.version_list:\n _path = ensure_path(version.comes_from.path)\n pythons[_path.as_posix()] = version.comes_from\n return pythons\n\n @classmethod\n def create(cls):\n return cls()\n", "path": "pipenv/vendor/pythonfinder/models/windows.py"}], "after_files": [{"content": "from __future__ import print_function, absolute_import\n\n__version__ = '1.1.7.dev0'\n\n# Add NullHandler to \"pythonfinder\" logger, because Python2's default root\n# logger has no handler and warnings like this would be reported:\n#\n# > No handlers could be found for logger \"pythonfinder.models.pyenv\"\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n__all__ = [\"Finder\", \"WindowsFinder\", \"SystemPath\", \"InvalidPythonVersion\"]\nfrom .pythonfinder import Finder\nfrom .models import SystemPath, WindowsFinder\nfrom .exceptions import InvalidPythonVersion\n", "path": "pipenv/vendor/pythonfinder/__init__.py"}, {"content": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\nfrom __future__ import print_function, absolute_import\nimport click\nimport crayons\nimport sys\nfrom . import __version__\nfrom .pythonfinder import Finder\n\n\[email protected]()\[email protected](\"--find\", default=False, nargs=1, help=\"Find a specific python version.\")\[email protected](\"--which\", default=False, nargs=1, help=\"Run the which command.\")\[email protected](\n \"--findall\", is_flag=True, default=False, help=\"Find all python versions.\"\n)\[email protected](\n \"--version\", is_flag=True, default=False, help=\"Display PythonFinder version.\"\n)\[email protected](\"--ignore-unsupported/--no-unsupported\", is_flag=True, default=True, envvar=\"PYTHONFINDER_IGNORE_UNSUPPORTED\", help=\"Ignore unsupported python versions.\")\[email protected]_option(prog_name='pyfinder', version=__version__)\[email protected]_context\ndef cli(ctx, find=False, which=False, findall=False, version=False, ignore_unsupported=True):\n if version:\n click.echo(\n \"{0} version {1}\".format(\n crayons.white(\"PythonFinder\", bold=True), crayons.yellow(__version__)\n )\n )\n sys.exit(0)\n finder = Finder(ignore_unsupported=ignore_unsupported)\n if findall:\n versions = [v for v in finder.find_all_python_versions()]\n if versions:\n click.secho(\"Found python at the following locations:\", fg=\"green\")\n for v in versions:\n py = v.py_version\n comes_from = getattr(py, \"comes_from\", None)\n if comes_from is not None:\n comes_from_path = getattr(comes_from, \"path\", v.path)\n else:\n comes_from_path = v.path\n click.secho(\n \"{py.name!s}: {py.version!s} ({py.architecture!s}) @ {comes_from!s}\".format(\n py=py, comes_from=comes_from_path\n ),\n fg=\"yellow\",\n )\n sys.exit(0)\n else:\n click.secho(\n \"ERROR: No valid python versions found! Check your path and try again.\",\n fg=\"red\",\n )\n if find:\n click.secho(\"Searching for python: {0!s}\".format(find.strip()), fg=\"yellow\")\n found = finder.find_python_version(find.strip())\n if found:\n py = found.py_version\n comes_from = getattr(py, \"comes_from\", None)\n if comes_from is not None:\n comes_from_path = getattr(comes_from, \"path\", found.path)\n else:\n comes_from_path = found.path\n arch = getattr(py, \"architecture\", None)\n click.secho(\"Found python at the following locations:\", fg=\"green\")\n click.secho(\n \"{py.name!s}: {py.version!s} ({py.architecture!s}) @ {comes_from!s}\".format(\n py=py, comes_from=comes_from_path\n ),\n fg=\"yellow\",\n )\n sys.exit(0)\n else:\n click.secho(\"Failed to find matching executable...\", fg=\"yellow\")\n sys.exit(1)\n elif which:\n found = finder.system_path.which(which.strip())\n if found:\n click.secho(\"Found Executable: {0}\".format(found), fg=\"white\")\n sys.exit(0)\n else:\n click.secho(\"Failed to find matching executable...\", fg=\"yellow\")\n sys.exit(1)\n else:\n click.echo(\"Please provide a command\", color=\"red\")\n sys.exit(1)\n sys.exit()\n\n\nif __name__ == \"__main__\":\n cli()\n", "path": "pipenv/vendor/pythonfinder/cli.py"}, {"content": "# -*- coding=utf-8 -*-\nfrom __future__ import absolute_import, print_function\n\nimport operator\n\nfrom collections import defaultdict\n\nimport attr\n\nfrom ..exceptions import InvalidPythonVersion\nfrom ..utils import ensure_path\nfrom .mixins import BaseFinder\nfrom .path import PathEntry\nfrom .python import PythonVersion, VersionMap\n\n\[email protected]\nclass WindowsFinder(BaseFinder):\n paths = attr.ib(default=attr.Factory(list))\n version_list = attr.ib(default=attr.Factory(list))\n versions = attr.ib()\n pythons = attr.ib()\n\n def find_all_python_versions(\n self,\n major=None,\n minor=None,\n patch=None,\n pre=None,\n dev=None,\n arch=None,\n name=None,\n ):\n version_matcher = operator.methodcaller(\n \"matches\",\n major=major,\n minor=minor,\n patch=patch,\n pre=pre,\n dev=dev,\n arch=arch,\n name=name,\n )\n py_filter = filter(\n None, filter(lambda c: version_matcher(c), self.version_list)\n )\n version_sort = operator.attrgetter(\"version_sort\")\n return [c.comes_from for c in sorted(py_filter, key=version_sort, reverse=True)]\n\n def find_python_version(\n self,\n major=None,\n minor=None,\n patch=None,\n pre=None,\n dev=None,\n arch=None,\n name=None,\n ):\n return next(\n (\n v\n for v in self.find_all_python_versions(\n major=major,\n minor=minor,\n patch=patch,\n pre=pre,\n dev=dev,\n arch=arch,\n name=None,\n )\n ),\n None,\n )\n\n @versions.default\n def get_versions(self):\n versions = defaultdict(PathEntry)\n from pythonfinder._vendor.pep514tools import environment as pep514env\n\n env_versions = pep514env.findall()\n path = None\n for version_object in env_versions:\n install_path = getattr(version_object.info, \"install_path\", None)\n if install_path is None:\n continue\n try:\n path = ensure_path(install_path.__getattr__(\"\"))\n except AttributeError:\n continue\n try:\n py_version = PythonVersion.from_windows_launcher(version_object)\n except InvalidPythonVersion:\n continue\n self.version_list.append(py_version)\n base_dir = PathEntry.create(\n path,\n is_root=True,\n only_python=True,\n pythons={py_version.comes_from.path: py_version},\n )\n versions[py_version.version_tuple[:5]] = base_dir\n self.paths.append(base_dir)\n return versions\n\n @pythons.default\n def get_pythons(self):\n pythons = defaultdict()\n for version in self.version_list:\n _path = ensure_path(version.comes_from.path)\n pythons[_path.as_posix()] = version.comes_from\n return pythons\n\n @classmethod\n def create(cls):\n return cls()\n", "path": "pipenv/vendor/pythonfinder/models/windows.py"}]} | 3,857 | 544 |
gh_patches_debug_977 | rasdani/github-patches | git_diff | medtagger__MedTagger-442 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Getting random scan for non-existing task key results in 500
## Current Behavior
Providing non existing task key results in 500 HTTP code.
## Expected Behavior
Backend should handle this situation appropriate and return 404 HTTP code.
## Steps to Reproduce the Problem
1. Perform a GET `scans/random?task=<task_key>` and provide non existing key.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/medtagger/repositories/tasks.py`
Content:
```
1 """Module responsible for definition of TaskRepository."""
2 from typing import List
3
4 from medtagger.database import db_session
5 from medtagger.database.models import Task, LabelTag, Dataset
6 from medtagger.exceptions import InternalErrorException
7
8
9 def get_all_tasks(include_disabled: bool = False) -> List[Task]:
10 """Fetch all tasks from database ordered by key."""
11 query = Task.query
12 if not include_disabled:
13 query = query.filter(~Task.disabled)
14 return query.order_by(Task.key).all()
15
16
17 def get_task_by_key(key: str) -> Task:
18 """Fetch Task from database.
19
20 :param key: key for a Task
21 :return: Task object
22 """
23 with db_session() as session:
24 task = session.query(Task).filter(Task.key == key).one()
25 return task
26
27
28 def add_task(key: str, name: str, image_path: str, datasets_keys: List[str], tags: List[LabelTag]) -> Task:
29 """Add new Task to the database.
30
31 :param key: key that will identify such Task
32 :param name: name that will be used in the Use Interface for such Task
33 :param image_path: path to the image that represents such Task (used in User Interface)
34 :param datasets_keys: Keys of Datasets that Task takes Scans from
35 :param tags: Label Tags that will be created and assigned to Task
36 :return: Task object
37 """
38 with db_session() as session:
39 task = Task(key, name, image_path)
40 datasets = Dataset.query.filter(Dataset.key.in_(datasets_keys)).all() # type: ignore
41 task.datasets = datasets
42 task.available_tags = tags
43 session.add(task)
44 return task
45
46
47 def assign_label_tag(tag: LabelTag, task_key: str) -> None:
48 """Assign existing Label Tag to Task.
49
50 :param tag: tag that should be assigned to Task
51 :param task_key: key that will identify such Task
52 """
53 with db_session():
54 task = Task.query.filter(Task.key == task_key).one()
55 task.available_tags.append(tag)
56 task.save()
57
58
59 def unassign_label_tag(tag: LabelTag, task_key: str) -> None:
60 """Unassign Label Tag from Task.
61
62 :param tag: tag that should be unassigned from Task
63 :param task_key: key that will identify such Task
64 """
65 with db_session():
66 task = Task.query.filter(Task.key == task_key).one()
67 task.available_tags.remove(tag)
68 task.save()
69
70
71 def update(task_key: str, name: str = None, image_path: str = None, datasets_keys: List[str] = None) -> Task:
72 """Update Datasets where this Task will be available.
73
74 :param task_key: key that will identify such Task
75 :param name: (optional) new name for such Task
76 :param image_path: (optional) new path to the image which shows on the UI
77 :param datasets_keys: (optional) keys of Datasets which should have this Task
78 """
79 with db_session():
80 task = Task.query.filter(Task.key == task_key).one()
81 if name:
82 task.name = name
83 if image_path:
84 task.image_path = image_path
85 if datasets_keys:
86 datasets = Dataset.query.filter(Dataset.key.in_(datasets_keys)).all() # type: ignore
87 task.datasets = datasets
88 return task
89
90
91 def disable(task_key: str) -> None:
92 """Disable existing Task."""
93 disabling_query = Task.query.filter(Task.key == task_key)
94 updated = disabling_query.update({'disabled': True}, synchronize_session='fetch')
95 if not updated:
96 raise InternalErrorException(f'Task "{task_key}" was not disabled due to unknown database error.')
97
98
99 def enable(task_key: str) -> None:
100 """Enable existing Task."""
101 enabling_query = Task.query.filter(Task.key == task_key)
102 updated = enabling_query.update({'disabled': False}, synchronize_session='fetch')
103 if not updated:
104 raise InternalErrorException(f'Task "{task_key}" was not enabled due to unknown database error.')
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/medtagger/repositories/tasks.py b/backend/medtagger/repositories/tasks.py
--- a/backend/medtagger/repositories/tasks.py
+++ b/backend/medtagger/repositories/tasks.py
@@ -21,7 +21,7 @@
:return: Task object
"""
with db_session() as session:
- task = session.query(Task).filter(Task.key == key).one()
+ task = session.query(Task).filter(Task.key == key).first()
return task
| {"golden_diff": "diff --git a/backend/medtagger/repositories/tasks.py b/backend/medtagger/repositories/tasks.py\n--- a/backend/medtagger/repositories/tasks.py\n+++ b/backend/medtagger/repositories/tasks.py\n@@ -21,7 +21,7 @@\n :return: Task object\n \"\"\"\n with db_session() as session:\n- task = session.query(Task).filter(Task.key == key).one()\n+ task = session.query(Task).filter(Task.key == key).first()\n return task\n", "issue": "Getting random scan for non-existing task key results in 500\n## Current Behavior\r\n\r\nProviding non existing task key results in 500 HTTP code.\r\n\r\n## Expected Behavior\r\n\r\nBackend should handle this situation appropriate and return 404 HTTP code.\r\n\r\n## Steps to Reproduce the Problem\r\n\r\n 1. Perform a GET `scans/random?task=<task_key>` and provide non existing key.\r\n\n", "before_files": [{"content": "\"\"\"Module responsible for definition of TaskRepository.\"\"\"\nfrom typing import List\n\nfrom medtagger.database import db_session\nfrom medtagger.database.models import Task, LabelTag, Dataset\nfrom medtagger.exceptions import InternalErrorException\n\n\ndef get_all_tasks(include_disabled: bool = False) -> List[Task]:\n \"\"\"Fetch all tasks from database ordered by key.\"\"\"\n query = Task.query\n if not include_disabled:\n query = query.filter(~Task.disabled)\n return query.order_by(Task.key).all()\n\n\ndef get_task_by_key(key: str) -> Task:\n \"\"\"Fetch Task from database.\n\n :param key: key for a Task\n :return: Task object\n \"\"\"\n with db_session() as session:\n task = session.query(Task).filter(Task.key == key).one()\n return task\n\n\ndef add_task(key: str, name: str, image_path: str, datasets_keys: List[str], tags: List[LabelTag]) -> Task:\n \"\"\"Add new Task to the database.\n\n :param key: key that will identify such Task\n :param name: name that will be used in the Use Interface for such Task\n :param image_path: path to the image that represents such Task (used in User Interface)\n :param datasets_keys: Keys of Datasets that Task takes Scans from\n :param tags: Label Tags that will be created and assigned to Task\n :return: Task object\n \"\"\"\n with db_session() as session:\n task = Task(key, name, image_path)\n datasets = Dataset.query.filter(Dataset.key.in_(datasets_keys)).all() # type: ignore\n task.datasets = datasets\n task.available_tags = tags\n session.add(task)\n return task\n\n\ndef assign_label_tag(tag: LabelTag, task_key: str) -> None:\n \"\"\"Assign existing Label Tag to Task.\n\n :param tag: tag that should be assigned to Task\n :param task_key: key that will identify such Task\n \"\"\"\n with db_session():\n task = Task.query.filter(Task.key == task_key).one()\n task.available_tags.append(tag)\n task.save()\n\n\ndef unassign_label_tag(tag: LabelTag, task_key: str) -> None:\n \"\"\"Unassign Label Tag from Task.\n\n :param tag: tag that should be unassigned from Task\n :param task_key: key that will identify such Task\n \"\"\"\n with db_session():\n task = Task.query.filter(Task.key == task_key).one()\n task.available_tags.remove(tag)\n task.save()\n\n\ndef update(task_key: str, name: str = None, image_path: str = None, datasets_keys: List[str] = None) -> Task:\n \"\"\"Update Datasets where this Task will be available.\n\n :param task_key: key that will identify such Task\n :param name: (optional) new name for such Task\n :param image_path: (optional) new path to the image which shows on the UI\n :param datasets_keys: (optional) keys of Datasets which should have this Task\n \"\"\"\n with db_session():\n task = Task.query.filter(Task.key == task_key).one()\n if name:\n task.name = name\n if image_path:\n task.image_path = image_path\n if datasets_keys:\n datasets = Dataset.query.filter(Dataset.key.in_(datasets_keys)).all() # type: ignore\n task.datasets = datasets\n return task\n\n\ndef disable(task_key: str) -> None:\n \"\"\"Disable existing Task.\"\"\"\n disabling_query = Task.query.filter(Task.key == task_key)\n updated = disabling_query.update({'disabled': True}, synchronize_session='fetch')\n if not updated:\n raise InternalErrorException(f'Task \"{task_key}\" was not disabled due to unknown database error.')\n\n\ndef enable(task_key: str) -> None:\n \"\"\"Enable existing Task.\"\"\"\n enabling_query = Task.query.filter(Task.key == task_key)\n updated = enabling_query.update({'disabled': False}, synchronize_session='fetch')\n if not updated:\n raise InternalErrorException(f'Task \"{task_key}\" was not enabled due to unknown database error.')\n", "path": "backend/medtagger/repositories/tasks.py"}], "after_files": [{"content": "\"\"\"Module responsible for definition of TaskRepository.\"\"\"\nfrom typing import List\n\nfrom medtagger.database import db_session\nfrom medtagger.database.models import Task, LabelTag, Dataset\nfrom medtagger.exceptions import InternalErrorException\n\n\ndef get_all_tasks(include_disabled: bool = False) -> List[Task]:\n \"\"\"Fetch all tasks from database ordered by key.\"\"\"\n query = Task.query\n if not include_disabled:\n query = query.filter(~Task.disabled)\n return query.order_by(Task.key).all()\n\n\ndef get_task_by_key(key: str) -> Task:\n \"\"\"Fetch Task from database.\n\n :param key: key for a Task\n :return: Task object\n \"\"\"\n with db_session() as session:\n task = session.query(Task).filter(Task.key == key).first()\n return task\n\n\ndef add_task(key: str, name: str, image_path: str, datasets_keys: List[str], tags: List[LabelTag]) -> Task:\n \"\"\"Add new Task to the database.\n\n :param key: key that will identify such Task\n :param name: name that will be used in the Use Interface for such Task\n :param image_path: path to the image that represents such Task (used in User Interface)\n :param datasets_keys: Keys of Datasets that Task takes Scans from\n :param tags: Label Tags that will be created and assigned to Task\n :return: Task object\n \"\"\"\n with db_session() as session:\n task = Task(key, name, image_path)\n datasets = Dataset.query.filter(Dataset.key.in_(datasets_keys)).all() # type: ignore\n task.datasets = datasets\n task.available_tags = tags\n session.add(task)\n return task\n\n\ndef assign_label_tag(tag: LabelTag, task_key: str) -> None:\n \"\"\"Assign existing Label Tag to Task.\n\n :param tag: tag that should be assigned to Task\n :param task_key: key that will identify such Task\n \"\"\"\n with db_session():\n task = Task.query.filter(Task.key == task_key).one()\n task.available_tags.append(tag)\n task.save()\n\n\ndef unassign_label_tag(tag: LabelTag, task_key: str) -> None:\n \"\"\"Unassign Label Tag from Task.\n\n :param tag: tag that should be unassigned from Task\n :param task_key: key that will identify such Task\n \"\"\"\n with db_session():\n task = Task.query.filter(Task.key == task_key).one()\n task.available_tags.remove(tag)\n task.save()\n\n\ndef update(task_key: str, name: str = None, image_path: str = None, datasets_keys: List[str] = None) -> Task:\n \"\"\"Update Datasets where this Task will be available.\n\n :param task_key: key that will identify such Task\n :param name: (optional) new name for such Task\n :param image_path: (optional) new path to the image which shows on the UI\n :param datasets_keys: (optional) keys of Datasets which should have this Task\n \"\"\"\n with db_session():\n task = Task.query.filter(Task.key == task_key).one()\n if name:\n task.name = name\n if image_path:\n task.image_path = image_path\n if datasets_keys:\n datasets = Dataset.query.filter(Dataset.key.in_(datasets_keys)).all() # type: ignore\n task.datasets = datasets\n return task\n\n\ndef disable(task_key: str) -> None:\n \"\"\"Disable existing Task.\"\"\"\n disabling_query = Task.query.filter(Task.key == task_key)\n updated = disabling_query.update({'disabled': True}, synchronize_session='fetch')\n if not updated:\n raise InternalErrorException(f'Task \"{task_key}\" was not disabled due to unknown database error.')\n\n\ndef enable(task_key: str) -> None:\n \"\"\"Enable existing Task.\"\"\"\n enabling_query = Task.query.filter(Task.key == task_key)\n updated = enabling_query.update({'disabled': False}, synchronize_session='fetch')\n if not updated:\n raise InternalErrorException(f'Task \"{task_key}\" was not enabled due to unknown database error.')\n", "path": "backend/medtagger/repositories/tasks.py"}]} | 1,444 | 113 |
gh_patches_debug_40038 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-4223 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove py36 support
Python 3.6 end of life is December 23. We're planning to release major version v1.0 in December. For clarity and ease of support, let's remove official support for python 3.6 ahead of v1.0.
I think this looks like:
- Turning off py36 tests
- Bumping `python_requires` for `dbt-core`
Removal of related cruft (e.g. backported `dataclasses`) can come later
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/postgres/setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 6):
6 print('Error: dbt does not support this version of Python.')
7 print('Please upgrade to Python 3.6 or higher.')
8 sys.exit(1)
9
10
11 from setuptools import setup
12 try:
13 from setuptools import find_namespace_packages
14 except ImportError:
15 # the user has a downlevel version of setuptools.
16 print('Error: dbt requires setuptools v40.1.0 or higher.')
17 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
18 'and try again')
19 sys.exit(1)
20
21
22 PSYCOPG2_MESSAGE = '''
23 No package name override was set.
24 Using 'psycopg2-binary' package to satisfy 'psycopg2'
25
26 If you experience segmentation faults, silent crashes, or installation errors,
27 consider retrying with the 'DBT_PSYCOPG2_NAME' environment variable set to
28 'psycopg2'. It may require a compiler toolchain and development libraries!
29 '''.strip()
30
31
32 def _dbt_psycopg2_name():
33 # if the user chose something, use that
34 package_name = os.getenv('DBT_PSYCOPG2_NAME', '')
35 if package_name:
36 return package_name
37
38 # default to psycopg2-binary for all OSes/versions
39 print(PSYCOPG2_MESSAGE)
40 return 'psycopg2-binary'
41
42
43 package_name = "dbt-postgres"
44 package_version = "1.0.0b2"
45 description = """The postgres adpter plugin for dbt (data build tool)"""
46
47 this_directory = os.path.abspath(os.path.dirname(__file__))
48 with open(os.path.join(this_directory, 'README.md')) as f:
49 long_description = f.read()
50
51 DBT_PSYCOPG2_NAME = _dbt_psycopg2_name()
52
53 setup(
54 name=package_name,
55 version=package_version,
56 description=description,
57 long_description=long_description,
58 long_description_content_type='text/markdown',
59 author="dbt Labs",
60 author_email="[email protected]",
61 url="https://github.com/dbt-labs/dbt-core",
62 packages=find_namespace_packages(include=['dbt', 'dbt.*']),
63 package_data={
64 'dbt': [
65 'include/postgres/dbt_project.yml',
66 'include/postgres/sample_profiles.yml',
67 'include/postgres/macros/*.sql',
68 'include/postgres/macros/**/*.sql',
69 ]
70 },
71 install_requires=[
72 'dbt-core=={}'.format(package_version),
73 '{}~=2.8'.format(DBT_PSYCOPG2_NAME),
74 ],
75 zip_safe=False,
76 classifiers=[
77 'Development Status :: 5 - Production/Stable',
78
79 'License :: OSI Approved :: Apache Software License',
80
81 'Operating System :: Microsoft :: Windows',
82 'Operating System :: MacOS :: MacOS X',
83 'Operating System :: POSIX :: Linux',
84
85 'Programming Language :: Python :: 3.6',
86 'Programming Language :: Python :: 3.7',
87 'Programming Language :: Python :: 3.8',
88 'Programming Language :: Python :: 3.9',
89 ],
90 python_requires=">=3.6.2",
91 )
92
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import sys
4
5
6 if 'sdist' not in sys.argv:
7 print('')
8 print('As of v1.0.0, `pip install dbt` is no longer supported.')
9 print('Instead, please use either:')
10 print(' - `pip install dbt-core`, for core functionality')
11 print(' - `pip install dbt-<adapter>`, to use dbt with your database, platform, or query engine')
12 print('See full list: https://docs.getdbt.com/docs/available-adapters')
13 print('')
14 sys.exit(1)
15
16
17 if sys.version_info < (3, 6):
18 print('Error: dbt does not support this version of Python.')
19 print('Please upgrade to Python 3.6 or higher.')
20 sys.exit(1)
21
22
23 from setuptools import setup
24 try:
25 from setuptools import find_namespace_packages
26 except ImportError:
27 # the user has a downlevel version of setuptools.
28 print('Error: dbt requires setuptools v40.1.0 or higher.')
29 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
30 'and try again')
31 sys.exit(1)
32
33 this_directory = os.path.abspath(os.path.dirname(__file__))
34 with open(os.path.join(this_directory, 'README.md')) as f:
35 long_description = f.read()
36
37
38 package_name = "dbt"
39 package_version = "1.0.0b2"
40 description = """With dbt, data analysts and engineers can build analytics \
41 the way engineers build applications."""
42
43
44 setup(
45 name=package_name,
46 version=package_version,
47
48 description=description,
49 long_description=long_description,
50 long_description_content_type='text/markdown',
51
52 author="dbt Labs",
53 author_email="[email protected]",
54 url="https://github.com/dbt-labs/dbt-core",
55 zip_safe=False,
56 classifiers=[
57 'Development Status :: 7 - Inactive',
58
59 'License :: OSI Approved :: Apache Software License',
60
61 'Operating System :: Microsoft :: Windows',
62 'Operating System :: MacOS :: MacOS X',
63 'Operating System :: POSIX :: Linux',
64
65 'Programming Language :: Python :: 3.6',
66 'Programming Language :: Python :: 3.7',
67 'Programming Language :: Python :: 3.8',
68 'Programming Language :: Python :: 3.9',
69 ],
70 python_requires=">=3.6.2",
71 )
72
```
Path: `core/setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 6):
6 print('Error: dbt does not support this version of Python.')
7 print('Please upgrade to Python 3.6 or higher.')
8 sys.exit(1)
9
10
11 from setuptools import setup
12 try:
13 from setuptools import find_namespace_packages
14 except ImportError:
15 # the user has a downlevel version of setuptools.
16 print('Error: dbt requires setuptools v40.1.0 or higher.')
17 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
18 'and try again')
19 sys.exit(1)
20
21
22 this_directory = os.path.abspath(os.path.dirname(__file__))
23 with open(os.path.join(this_directory, 'README.md')) as f:
24 long_description = f.read()
25
26
27 package_name = "dbt-core"
28 package_version = "1.0.0b2"
29 description = """With dbt, data analysts and engineers can build analytics \
30 the way engineers build applications."""
31
32
33 setup(
34 name=package_name,
35 version=package_version,
36 description=description,
37 long_description=long_description,
38 long_description_content_type='text/markdown',
39 author="dbt Labs",
40 author_email="[email protected]",
41 url="https://github.com/dbt-labs/dbt-core",
42 packages=find_namespace_packages(include=['dbt', 'dbt.*']),
43 include_package_data = True,
44 test_suite='test',
45 entry_points={
46 'console_scripts': [
47 'dbt = dbt.main:main',
48 ],
49 },
50 scripts=[
51 'scripts/dbt',
52 ],
53 install_requires=[
54 'Jinja2==2.11.3',
55 'agate>=1.6,<1.6.4',
56 'click>=8,<9',
57 'colorama>=0.3.9,<0.4.5',
58 'dataclasses>=0.6,<0.9;python_version<"3.7"',
59 'hologram==0.0.14',
60 'isodate>=0.6,<0.7',
61 'logbook>=1.5,<1.6',
62 'mashumaro==2.9',
63 'minimal-snowplow-tracker==0.0.2',
64 'networkx>=2.3,<3',
65 'packaging>=20.9,<22.0',
66 'sqlparse>=0.2.3,<0.5',
67 'dbt-extractor==0.4.0',
68 'typing-extensions>=3.7.4,<3.11',
69 'werkzeug>=1,<3',
70 # the following are all to match snowflake-connector-python
71 'requests<3.0.0',
72 'idna>=2.5,<4',
73 'cffi>=1.9,<2.0.0',
74 ],
75 zip_safe=False,
76 classifiers=[
77 'Development Status :: 5 - Production/Stable',
78
79 'License :: OSI Approved :: Apache Software License',
80
81 'Operating System :: Microsoft :: Windows',
82 'Operating System :: MacOS :: MacOS X',
83 'Operating System :: POSIX :: Linux',
84
85 'Programming Language :: Python :: 3.6',
86 'Programming Language :: Python :: 3.7',
87 'Programming Language :: Python :: 3.8',
88 'Programming Language :: Python :: 3.9',
89 ],
90 python_requires=">=3.6.3",
91 )
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -2,9 +2,9 @@
import os
import sys
-if sys.version_info < (3, 6):
+if sys.version_info < (3, 7):
print('Error: dbt does not support this version of Python.')
- print('Please upgrade to Python 3.6 or higher.')
+ print('Please upgrade to Python 3.7 or higher.')
sys.exit(1)
@@ -55,7 +55,6 @@
'agate>=1.6,<1.6.4',
'click>=8,<9',
'colorama>=0.3.9,<0.4.5',
- 'dataclasses>=0.6,<0.9;python_version<"3.7"',
'hologram==0.0.14',
'isodate>=0.6,<0.7',
'logbook>=1.5,<1.6',
@@ -82,10 +81,9 @@
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
- 'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
- python_requires=">=3.6.3",
+ python_requires=">=3.7",
)
diff --git a/plugins/postgres/setup.py b/plugins/postgres/setup.py
--- a/plugins/postgres/setup.py
+++ b/plugins/postgres/setup.py
@@ -2,9 +2,9 @@
import os
import sys
-if sys.version_info < (3, 6):
+if sys.version_info < (3, 7):
print('Error: dbt does not support this version of Python.')
- print('Please upgrade to Python 3.6 or higher.')
+ print('Please upgrade to Python 3.7 or higher.')
sys.exit(1)
@@ -82,10 +82,9 @@
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
- 'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
- python_requires=">=3.6.2",
+ python_requires=">=3.7",
)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,9 +14,9 @@
sys.exit(1)
-if sys.version_info < (3, 6):
+if sys.version_info < (3, 7):
print('Error: dbt does not support this version of Python.')
- print('Please upgrade to Python 3.6 or higher.')
+ print('Please upgrade to Python 3.7 or higher.')
sys.exit(1)
@@ -62,10 +62,9 @@
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
- 'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
- python_requires=">=3.6.2",
+ python_requires=">=3.7",
)
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -2,9 +2,9 @@\n import os\n import sys\n \n-if sys.version_info < (3, 6):\n+if sys.version_info < (3, 7):\n print('Error: dbt does not support this version of Python.')\n- print('Please upgrade to Python 3.6 or higher.')\n+ print('Please upgrade to Python 3.7 or higher.')\n sys.exit(1)\n \n \n@@ -55,7 +55,6 @@\n 'agate>=1.6,<1.6.4',\n 'click>=8,<9',\n 'colorama>=0.3.9,<0.4.5',\n- 'dataclasses>=0.6,<0.9;python_version<\"3.7\"',\n 'hologram==0.0.14',\n 'isodate>=0.6,<0.7',\n 'logbook>=1.5,<1.6',\n@@ -82,10 +81,9 @@\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n \n- 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n- python_requires=\">=3.6.3\",\n+ python_requires=\">=3.7\",\n )\ndiff --git a/plugins/postgres/setup.py b/plugins/postgres/setup.py\n--- a/plugins/postgres/setup.py\n+++ b/plugins/postgres/setup.py\n@@ -2,9 +2,9 @@\n import os\n import sys\n \n-if sys.version_info < (3, 6):\n+if sys.version_info < (3, 7):\n print('Error: dbt does not support this version of Python.')\n- print('Please upgrade to Python 3.6 or higher.')\n+ print('Please upgrade to Python 3.7 or higher.')\n sys.exit(1)\n \n \n@@ -82,10 +82,9 @@\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n \n- 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n- python_requires=\">=3.6.2\",\n+ python_requires=\">=3.7\",\n )\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,9 +14,9 @@\n sys.exit(1)\n \n \n-if sys.version_info < (3, 6):\n+if sys.version_info < (3, 7):\n print('Error: dbt does not support this version of Python.')\n- print('Please upgrade to Python 3.6 or higher.')\n+ print('Please upgrade to Python 3.7 or higher.')\n sys.exit(1)\n \n \n@@ -62,10 +62,9 @@\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n \n- 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n- python_requires=\">=3.6.2\",\n+ python_requires=\">=3.7\",\n )\n", "issue": "Remove py36 support\nPython 3.6 end of life is December 23. We're planning to release major version v1.0 in December. For clarity and ease of support, let's remove official support for python 3.6 ahead of v1.0.\r\n\r\nI think this looks like:\r\n- Turning off py36 tests\r\n- Bumping `python_requires` for `dbt-core`\r\n\r\nRemoval of related cruft (e.g. backported `dataclasses`) can come later\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\nPSYCOPG2_MESSAGE = '''\nNo package name override was set.\nUsing 'psycopg2-binary' package to satisfy 'psycopg2'\n\nIf you experience segmentation faults, silent crashes, or installation errors,\nconsider retrying with the 'DBT_PSYCOPG2_NAME' environment variable set to\n'psycopg2'. It may require a compiler toolchain and development libraries!\n'''.strip()\n\n\ndef _dbt_psycopg2_name():\n # if the user chose something, use that\n package_name = os.getenv('DBT_PSYCOPG2_NAME', '')\n if package_name:\n return package_name\n\n # default to psycopg2-binary for all OSes/versions\n print(PSYCOPG2_MESSAGE)\n return 'psycopg2-binary'\n\n\npackage_name = \"dbt-postgres\"\npackage_version = \"1.0.0b2\"\ndescription = \"\"\"The postgres adpter plugin for dbt (data build tool)\"\"\"\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md')) as f:\n long_description = f.read()\n\nDBT_PSYCOPG2_NAME = _dbt_psycopg2_name()\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/postgres/dbt_project.yml',\n 'include/postgres/sample_profiles.yml',\n 'include/postgres/macros/*.sql',\n 'include/postgres/macros/**/*.sql',\n ]\n },\n install_requires=[\n 'dbt-core=={}'.format(package_version),\n '{}~=2.8'.format(DBT_PSYCOPG2_NAME),\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.2\",\n)\n", "path": "plugins/postgres/setup.py"}, {"content": "#!/usr/bin/env python\nimport os\nimport sys\n\n\nif 'sdist' not in sys.argv:\n print('')\n print('As of v1.0.0, `pip install dbt` is no longer supported.')\n print('Instead, please use either:')\n print(' - `pip install dbt-core`, for core functionality')\n print(' - `pip install dbt-<adapter>`, to use dbt with your database, platform, or query engine')\n print('See full list: https://docs.getdbt.com/docs/available-adapters')\n print('')\n sys.exit(1)\n\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md')) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt\"\npackage_version = \"1.0.0b2\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n zip_safe=False,\n classifiers=[\n 'Development Status :: 7 - Inactive',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.2\",\n)\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md')) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.0.0b2\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n include_package_data = True,\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.3',\n 'agate>=1.6,<1.6.4',\n 'click>=8,<9',\n 'colorama>=0.3.9,<0.4.5',\n 'dataclasses>=0.6,<0.9;python_version<\"3.7\"',\n 'hologram==0.0.14',\n 'isodate>=0.6,<0.7',\n 'logbook>=1.5,<1.6',\n 'mashumaro==2.9',\n 'minimal-snowplow-tracker==0.0.2',\n 'networkx>=2.3,<3',\n 'packaging>=20.9,<22.0',\n 'sqlparse>=0.2.3,<0.5',\n 'dbt-extractor==0.4.0',\n 'typing-extensions>=3.7.4,<3.11',\n 'werkzeug>=1,<3',\n # the following are all to match snowflake-connector-python\n 'requests<3.0.0',\n 'idna>=2.5,<4',\n 'cffi>=1.9,<2.0.0',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.3\",\n)\n", "path": "core/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.7 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\nPSYCOPG2_MESSAGE = '''\nNo package name override was set.\nUsing 'psycopg2-binary' package to satisfy 'psycopg2'\n\nIf you experience segmentation faults, silent crashes, or installation errors,\nconsider retrying with the 'DBT_PSYCOPG2_NAME' environment variable set to\n'psycopg2'. It may require a compiler toolchain and development libraries!\n'''.strip()\n\n\ndef _dbt_psycopg2_name():\n # if the user chose something, use that\n package_name = os.getenv('DBT_PSYCOPG2_NAME', '')\n if package_name:\n return package_name\n\n # default to psycopg2-binary for all OSes/versions\n print(PSYCOPG2_MESSAGE)\n return 'psycopg2-binary'\n\n\npackage_name = \"dbt-postgres\"\npackage_version = \"1.0.0b2\"\ndescription = \"\"\"The postgres adpter plugin for dbt (data build tool)\"\"\"\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md')) as f:\n long_description = f.read()\n\nDBT_PSYCOPG2_NAME = _dbt_psycopg2_name()\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/postgres/dbt_project.yml',\n 'include/postgres/sample_profiles.yml',\n 'include/postgres/macros/*.sql',\n 'include/postgres/macros/**/*.sql',\n ]\n },\n install_requires=[\n 'dbt-core=={}'.format(package_version),\n '{}~=2.8'.format(DBT_PSYCOPG2_NAME),\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.7\",\n)\n", "path": "plugins/postgres/setup.py"}, {"content": "#!/usr/bin/env python\nimport os\nimport sys\n\n\nif 'sdist' not in sys.argv:\n print('')\n print('As of v1.0.0, `pip install dbt` is no longer supported.')\n print('Instead, please use either:')\n print(' - `pip install dbt-core`, for core functionality')\n print(' - `pip install dbt-<adapter>`, to use dbt with your database, platform, or query engine')\n print('See full list: https://docs.getdbt.com/docs/available-adapters')\n print('')\n sys.exit(1)\n\n\nif sys.version_info < (3, 7):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.7 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md')) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt\"\npackage_version = \"1.0.0b2\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n zip_safe=False,\n classifiers=[\n 'Development Status :: 7 - Inactive',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.7\",\n)\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.7 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md')) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.0.0b2\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n include_package_data = True,\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.3',\n 'agate>=1.6,<1.6.4',\n 'click>=8,<9',\n 'colorama>=0.3.9,<0.4.5',\n 'hologram==0.0.14',\n 'isodate>=0.6,<0.7',\n 'logbook>=1.5,<1.6',\n 'mashumaro==2.9',\n 'minimal-snowplow-tracker==0.0.2',\n 'networkx>=2.3,<3',\n 'packaging>=20.9,<22.0',\n 'sqlparse>=0.2.3,<0.5',\n 'dbt-extractor==0.4.0',\n 'typing-extensions>=3.7.4,<3.11',\n 'werkzeug>=1,<3',\n # the following are all to match snowflake-connector-python\n 'requests<3.0.0',\n 'idna>=2.5,<4',\n 'cffi>=1.9,<2.0.0',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.7\",\n)\n", "path": "core/setup.py"}]} | 2,860 | 784 |
gh_patches_debug_24127 | rasdani/github-patches | git_diff | deepset-ai__haystack-6358 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Installation] Import of GPTGenerator failing without torch being installed
**Describe the bug**
Installed haystack from latest main in a fresh env via
```
pip install -e '.[dev]'
pip install -U -e .[preview]
```
Import of GPTGenerator throws error:
```
>>> from haystack.preview.components.generators.openai import GPTGenerator
None of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/maltepietsch/deepset/dev/haystack/haystack/preview/components/generators/__init__.py", line 1, in <module>
from haystack.preview.components.generators.hugging_face_local import HuggingFaceLocalGenerator
File "/Users/maltepietsch/deepset/dev/haystack/haystack/preview/components/generators/hugging_face_local.py", line 27, in <module>
class StopWordsCriteria(StoppingCriteria):
File "/Users/maltepietsch/deepset/dev/haystack/haystack/preview/components/generators/hugging_face_local.py", line 43, in StopWordsCriteria
device: Union[str, torch.device] = "cpu",
^^^^^
NameError: name 'torch' is not defined
```
**Expected behavior**
I can use GPTGenerator without any torch installation 🤔
**FAQ Check**
- [x] Have you had a look at [our new FAQ page](https://docs.haystack.deepset.ai/docs/faq)?
**System:**
- OS: MacOSX
- Haystack version (commit or version number): e905066
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/preview/components/generators/hugging_face_local.py`
Content:
```
1 import logging
2 from typing import Any, Dict, List, Literal, Optional, Union
3 from copy import deepcopy
4
5 from haystack.preview import component, default_to_dict
6 from haystack.preview.lazy_imports import LazyImport
7
8 logger = logging.getLogger(__name__)
9
10 SUPPORTED_TASKS = ["text-generation", "text2text-generation"]
11
12 with LazyImport(
13 message="PyTorch is needed to run this component. Please install it by following the instructions at https://pytorch.org/"
14 ) as torch_import:
15 import torch
16
17 with LazyImport(message="Run 'pip install transformers'") as transformers_import:
18 from huggingface_hub import model_info
19 from transformers import (
20 pipeline,
21 StoppingCriteriaList,
22 StoppingCriteria,
23 PreTrainedTokenizer,
24 PreTrainedTokenizerFast,
25 )
26
27 class StopWordsCriteria(StoppingCriteria):
28 """
29 Stops text generation if any one of the stop words is generated.
30
31 Note: When a stop word is encountered, the generation of new text is stopped.
32 However, if the stop word is in the prompt itself, it can stop generating new text
33 prematurely after the first token. This is particularly important for LLMs designed
34 for dialogue generation. For these models, like for example mosaicml/mpt-7b-chat,
35 the output includes both the new text and the original prompt. Therefore, it's important
36 to make sure your prompt has no stop words.
37 """
38
39 def __init__(
40 self,
41 tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
42 stop_words: List[str],
43 device: Union[str, torch.device] = "cpu",
44 ):
45 super().__init__()
46 encoded_stop_words = tokenizer(stop_words, add_special_tokens=False, padding=True, return_tensors="pt")
47 self.stop_ids = encoded_stop_words.input_ids.to(device)
48
49 def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
50 for stop_id in self.stop_ids:
51 found_stop_word = self.is_stop_word_found(input_ids, stop_id)
52 if found_stop_word:
53 return True
54 return False
55
56 def is_stop_word_found(self, generated_text_ids: torch.Tensor, stop_id: torch.Tensor) -> bool:
57 generated_text_ids = generated_text_ids[-1]
58 len_generated_text_ids = generated_text_ids.size(0)
59 len_stop_id = stop_id.size(0)
60 result = all(generated_text_ids[len_generated_text_ids - len_stop_id :].eq(stop_id))
61 return result
62
63
64 @component
65 class HuggingFaceLocalGenerator:
66 """
67 Generator based on a Hugging Face model.
68 This component provides an interface to generate text using a Hugging Face model that runs locally.
69
70 Usage example:
71 ```python
72 from haystack.preview.components.generators import HuggingFaceLocalGenerator
73
74 generator = HuggingFaceLocalGenerator(model="google/flan-t5-large",
75 task="text2text-generation",
76 generation_kwargs={
77 "max_new_tokens": 100,
78 "temperature": 0.9,
79 })
80
81 print(generator.run("Who is the best American actor?"))
82 # {'replies': ['John Cusack']}
83 ```
84 """
85
86 def __init__(
87 self,
88 model_name_or_path: str = "google/flan-t5-base",
89 task: Optional[Literal["text-generation", "text2text-generation"]] = None,
90 device: Optional[str] = None,
91 token: Optional[Union[str, bool]] = None,
92 generation_kwargs: Optional[Dict[str, Any]] = None,
93 pipeline_kwargs: Optional[Dict[str, Any]] = None,
94 stop_words: Optional[List[str]] = None,
95 ):
96 """
97 :param model_name_or_path: The name or path of a Hugging Face model for text generation,
98 for example, "google/flan-t5-large".
99 If the model is also specified in the `pipeline_kwargs`, this parameter will be ignored.
100 :param task: The task for the Hugging Face pipeline.
101 Possible values are "text-generation" and "text2text-generation".
102 Generally, decoder-only models like GPT support "text-generation",
103 while encoder-decoder models like T5 support "text2text-generation".
104 If the task is also specified in the `pipeline_kwargs`, this parameter will be ignored.
105 If not specified, the component will attempt to infer the task from the model name,
106 calling the Hugging Face Hub API.
107 :param device: The device on which the model is loaded. (e.g., "cpu", "cuda:0").
108 If `device` or `device_map` is specified in the `pipeline_kwargs`, this parameter will be ignored.
109 :param token: The token to use as HTTP bearer authorization for remote files.
110 If True, will use the token generated when running huggingface-cli login (stored in ~/.huggingface).
111 If the token is also specified in the `pipeline_kwargs`, this parameter will be ignored.
112 :param generation_kwargs: A dictionary containing keyword arguments to customize text generation.
113 Some examples: `max_length`, `max_new_tokens`, `temperature`, `top_k`, `top_p`,...
114 See Hugging Face's documentation for more information:
115 - https://huggingface.co/docs/transformers/main/en/generation_strategies#customize-text-generation
116 - https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationConfig
117 :param pipeline_kwargs: Dictionary containing keyword arguments used to initialize the pipeline.
118 These keyword arguments provide fine-grained control over the pipeline.
119 In case of duplication, these kwargs override `model_name_or_path`, `task`, `device`, and `token` init parameters.
120 See Hugging Face's [documentation](https://huggingface.co/docs/transformers/en/main_classes/pipelines#transformers.pipeline.task)
121 for more information on the available kwargs.
122 In this dictionary, you can also include `model_kwargs` to specify the kwargs
123 for model initialization:
124 https://huggingface.co/docs/transformers/en/main_classes/model#transformers.PreTrainedModel.from_pretrained
125 :param stop_words: A list of stop words. If any one of the stop words is generated, the generation is stopped.
126 If you provide this parameter, you should not specify the `stopping_criteria` in `generation_kwargs`.
127 For some chat models, the output includes both the new text and the original prompt.
128 In these cases, it's important to make sure your prompt has no stop words.
129 """
130 transformers_import.check()
131 torch_import.check()
132
133 pipeline_kwargs = pipeline_kwargs or {}
134 generation_kwargs = generation_kwargs or {}
135
136 # check if the pipeline_kwargs contain the essential parameters
137 # otherwise, populate them with values from other init parameters
138 pipeline_kwargs.setdefault("model", model_name_or_path)
139 pipeline_kwargs.setdefault("token", token)
140 if device is not None and "device" not in pipeline_kwargs and "device_map" not in pipeline_kwargs:
141 pipeline_kwargs["device"] = device
142
143 # task identification and validation
144 if task is None:
145 if "task" in pipeline_kwargs:
146 task = pipeline_kwargs["task"]
147 elif isinstance(pipeline_kwargs["model"], str):
148 task = model_info(pipeline_kwargs["model"], token=pipeline_kwargs["token"]).pipeline_tag
149
150 if task not in SUPPORTED_TASKS:
151 raise ValueError(
152 f"Task '{task}' is not supported. " f"The supported tasks are: {', '.join(SUPPORTED_TASKS)}."
153 )
154 pipeline_kwargs["task"] = task
155
156 # if not specified, set return_full_text to False for text-generation
157 # only generated text is returned (excluding prompt)
158 if task == "text-generation":
159 generation_kwargs.setdefault("return_full_text", False)
160
161 if stop_words and "stopping_criteria" in generation_kwargs:
162 raise ValueError(
163 "Found both the `stop_words` init parameter and the `stopping_criteria` key in `generation_kwargs`. "
164 "Please specify only one of them."
165 )
166
167 self.pipeline_kwargs = pipeline_kwargs
168 self.generation_kwargs = generation_kwargs
169 self.stop_words = stop_words
170 self.pipeline = None
171 self.stopping_criteria_list = None
172
173 def _get_telemetry_data(self) -> Dict[str, Any]:
174 """
175 Data that is sent to Posthog for usage analytics.
176 """
177 if isinstance(self.pipeline_kwargs["model"], str):
178 return {"model": self.pipeline_kwargs["model"]}
179 return {"model": f"[object of type {type(self.pipeline_kwargs['model'])}]"}
180
181 def warm_up(self):
182 if self.pipeline is None:
183 self.pipeline = pipeline(**self.pipeline_kwargs)
184
185 if self.stop_words and self.stopping_criteria_list is None:
186 stop_words_criteria = StopWordsCriteria(
187 tokenizer=self.pipeline.tokenizer, stop_words=self.stop_words, device=self.pipeline.device
188 )
189 self.stopping_criteria_list = StoppingCriteriaList([stop_words_criteria])
190
191 def to_dict(self) -> Dict[str, Any]:
192 """
193 Serialize this component to a dictionary.
194 """
195 pipeline_kwargs_to_serialize = deepcopy(self.pipeline_kwargs)
196
197 # we don't want to serialize valid tokens
198 if isinstance(pipeline_kwargs_to_serialize["token"], str):
199 pipeline_kwargs_to_serialize["token"] = None
200
201 return default_to_dict(
202 self,
203 pipeline_kwargs=pipeline_kwargs_to_serialize,
204 generation_kwargs=self.generation_kwargs,
205 stop_words=self.stop_words,
206 )
207
208 @component.output_types(replies=List[str])
209 def run(self, prompt: str, generation_kwargs: Optional[Dict[str, Any]] = None):
210 """
211 Run the text generation model on the given prompt.
212
213 :param prompt: A string representing the prompt.
214 :param generation_kwargs: Additional keyword arguments for text generation.
215 :return: A dictionary containing the generated replies.
216 """
217 if self.pipeline is None:
218 raise RuntimeError("The generation model has not been loaded. Please call warm_up() before running.")
219
220 if not prompt:
221 return {"replies": []}
222
223 # merge generation kwargs from init method with those from run method
224 updated_generation_kwargs = {**self.generation_kwargs, **(generation_kwargs or {})}
225
226 output = self.pipeline(prompt, stopping_criteria=self.stopping_criteria_list, **updated_generation_kwargs)
227 replies = [o["generated_text"] for o in output if "generated_text" in o]
228
229 if self.stop_words:
230 # the output of the pipeline includes the stop word
231 replies = [reply.replace(stop_word, "").rstrip() for reply in replies for stop_word in self.stop_words]
232
233 return {"replies": replies}
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/preview/components/generators/hugging_face_local.py b/haystack/preview/components/generators/hugging_face_local.py
--- a/haystack/preview/components/generators/hugging_face_local.py
+++ b/haystack/preview/components/generators/hugging_face_local.py
@@ -9,12 +9,8 @@
SUPPORTED_TASKS = ["text-generation", "text2text-generation"]
-with LazyImport(
- message="PyTorch is needed to run this component. Please install it by following the instructions at https://pytorch.org/"
-) as torch_import:
+with LazyImport(message="Run 'pip install transformers[torch]'") as torch_and_transformers_import:
import torch
-
-with LazyImport(message="Run 'pip install transformers'") as transformers_import:
from huggingface_hub import model_info
from transformers import (
pipeline,
@@ -127,8 +123,7 @@
For some chat models, the output includes both the new text and the original prompt.
In these cases, it's important to make sure your prompt has no stop words.
"""
- transformers_import.check()
- torch_import.check()
+ torch_and_transformers_import.check()
pipeline_kwargs = pipeline_kwargs or {}
generation_kwargs = generation_kwargs or {}
| {"golden_diff": "diff --git a/haystack/preview/components/generators/hugging_face_local.py b/haystack/preview/components/generators/hugging_face_local.py\n--- a/haystack/preview/components/generators/hugging_face_local.py\n+++ b/haystack/preview/components/generators/hugging_face_local.py\n@@ -9,12 +9,8 @@\n \n SUPPORTED_TASKS = [\"text-generation\", \"text2text-generation\"]\n \n-with LazyImport(\n- message=\"PyTorch is needed to run this component. Please install it by following the instructions at https://pytorch.org/\"\n-) as torch_import:\n+with LazyImport(message=\"Run 'pip install transformers[torch]'\") as torch_and_transformers_import:\n import torch\n-\n-with LazyImport(message=\"Run 'pip install transformers'\") as transformers_import:\n from huggingface_hub import model_info\n from transformers import (\n pipeline,\n@@ -127,8 +123,7 @@\n For some chat models, the output includes both the new text and the original prompt.\n In these cases, it's important to make sure your prompt has no stop words.\n \"\"\"\n- transformers_import.check()\n- torch_import.check()\n+ torch_and_transformers_import.check()\n \n pipeline_kwargs = pipeline_kwargs or {}\n generation_kwargs = generation_kwargs or {}\n", "issue": "[Installation] Import of GPTGenerator failing without torch being installed\n**Describe the bug**\r\nInstalled haystack from latest main in a fresh env via\r\n```\r\npip install -e '.[dev]'\r\npip install -U -e .[preview]\r\n```\r\nImport of GPTGenerator throws error:\r\n```\r\n>>> from haystack.preview.components.generators.openai import GPTGenerator\r\nNone of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/maltepietsch/deepset/dev/haystack/haystack/preview/components/generators/__init__.py\", line 1, in <module>\r\n from haystack.preview.components.generators.hugging_face_local import HuggingFaceLocalGenerator\r\n File \"/Users/maltepietsch/deepset/dev/haystack/haystack/preview/components/generators/hugging_face_local.py\", line 27, in <module>\r\n class StopWordsCriteria(StoppingCriteria):\r\n File \"/Users/maltepietsch/deepset/dev/haystack/haystack/preview/components/generators/hugging_face_local.py\", line 43, in StopWordsCriteria\r\n device: Union[str, torch.device] = \"cpu\",\r\n ^^^^^\r\nNameError: name 'torch' is not defined\r\n```\r\n**Expected behavior**\r\nI can use GPTGenerator without any torch installation \ud83e\udd14 \r\n\r\n\r\n**FAQ Check**\r\n- [x] Have you had a look at [our new FAQ page](https://docs.haystack.deepset.ai/docs/faq)?\r\n\r\n**System:**\r\n - OS: MacOSX\r\n - Haystack version (commit or version number): e905066\r\n\n", "before_files": [{"content": "import logging\nfrom typing import Any, Dict, List, Literal, Optional, Union\nfrom copy import deepcopy\n\nfrom haystack.preview import component, default_to_dict\nfrom haystack.preview.lazy_imports import LazyImport\n\nlogger = logging.getLogger(__name__)\n\nSUPPORTED_TASKS = [\"text-generation\", \"text2text-generation\"]\n\nwith LazyImport(\n message=\"PyTorch is needed to run this component. Please install it by following the instructions at https://pytorch.org/\"\n) as torch_import:\n import torch\n\nwith LazyImport(message=\"Run 'pip install transformers'\") as transformers_import:\n from huggingface_hub import model_info\n from transformers import (\n pipeline,\n StoppingCriteriaList,\n StoppingCriteria,\n PreTrainedTokenizer,\n PreTrainedTokenizerFast,\n )\n\n class StopWordsCriteria(StoppingCriteria):\n \"\"\"\n Stops text generation if any one of the stop words is generated.\n\n Note: When a stop word is encountered, the generation of new text is stopped.\n However, if the stop word is in the prompt itself, it can stop generating new text\n prematurely after the first token. This is particularly important for LLMs designed\n for dialogue generation. For these models, like for example mosaicml/mpt-7b-chat,\n the output includes both the new text and the original prompt. Therefore, it's important\n to make sure your prompt has no stop words.\n \"\"\"\n\n def __init__(\n self,\n tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],\n stop_words: List[str],\n device: Union[str, torch.device] = \"cpu\",\n ):\n super().__init__()\n encoded_stop_words = tokenizer(stop_words, add_special_tokens=False, padding=True, return_tensors=\"pt\")\n self.stop_ids = encoded_stop_words.input_ids.to(device)\n\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n for stop_id in self.stop_ids:\n found_stop_word = self.is_stop_word_found(input_ids, stop_id)\n if found_stop_word:\n return True\n return False\n\n def is_stop_word_found(self, generated_text_ids: torch.Tensor, stop_id: torch.Tensor) -> bool:\n generated_text_ids = generated_text_ids[-1]\n len_generated_text_ids = generated_text_ids.size(0)\n len_stop_id = stop_id.size(0)\n result = all(generated_text_ids[len_generated_text_ids - len_stop_id :].eq(stop_id))\n return result\n\n\n@component\nclass HuggingFaceLocalGenerator:\n \"\"\"\n Generator based on a Hugging Face model.\n This component provides an interface to generate text using a Hugging Face model that runs locally.\n\n Usage example:\n ```python\n from haystack.preview.components.generators import HuggingFaceLocalGenerator\n\n generator = HuggingFaceLocalGenerator(model=\"google/flan-t5-large\",\n task=\"text2text-generation\",\n generation_kwargs={\n \"max_new_tokens\": 100,\n \"temperature\": 0.9,\n })\n\n print(generator.run(\"Who is the best American actor?\"))\n # {'replies': ['John Cusack']}\n ```\n \"\"\"\n\n def __init__(\n self,\n model_name_or_path: str = \"google/flan-t5-base\",\n task: Optional[Literal[\"text-generation\", \"text2text-generation\"]] = None,\n device: Optional[str] = None,\n token: Optional[Union[str, bool]] = None,\n generation_kwargs: Optional[Dict[str, Any]] = None,\n pipeline_kwargs: Optional[Dict[str, Any]] = None,\n stop_words: Optional[List[str]] = None,\n ):\n \"\"\"\n :param model_name_or_path: The name or path of a Hugging Face model for text generation,\n for example, \"google/flan-t5-large\".\n If the model is also specified in the `pipeline_kwargs`, this parameter will be ignored.\n :param task: The task for the Hugging Face pipeline.\n Possible values are \"text-generation\" and \"text2text-generation\".\n Generally, decoder-only models like GPT support \"text-generation\",\n while encoder-decoder models like T5 support \"text2text-generation\".\n If the task is also specified in the `pipeline_kwargs`, this parameter will be ignored.\n If not specified, the component will attempt to infer the task from the model name,\n calling the Hugging Face Hub API.\n :param device: The device on which the model is loaded. (e.g., \"cpu\", \"cuda:0\").\n If `device` or `device_map` is specified in the `pipeline_kwargs`, this parameter will be ignored.\n :param token: The token to use as HTTP bearer authorization for remote files.\n If True, will use the token generated when running huggingface-cli login (stored in ~/.huggingface).\n If the token is also specified in the `pipeline_kwargs`, this parameter will be ignored.\n :param generation_kwargs: A dictionary containing keyword arguments to customize text generation.\n Some examples: `max_length`, `max_new_tokens`, `temperature`, `top_k`, `top_p`,...\n See Hugging Face's documentation for more information:\n - https://huggingface.co/docs/transformers/main/en/generation_strategies#customize-text-generation\n - https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationConfig\n :param pipeline_kwargs: Dictionary containing keyword arguments used to initialize the pipeline.\n These keyword arguments provide fine-grained control over the pipeline.\n In case of duplication, these kwargs override `model_name_or_path`, `task`, `device`, and `token` init parameters.\n See Hugging Face's [documentation](https://huggingface.co/docs/transformers/en/main_classes/pipelines#transformers.pipeline.task)\n for more information on the available kwargs.\n In this dictionary, you can also include `model_kwargs` to specify the kwargs\n for model initialization:\n https://huggingface.co/docs/transformers/en/main_classes/model#transformers.PreTrainedModel.from_pretrained\n :param stop_words: A list of stop words. If any one of the stop words is generated, the generation is stopped.\n If you provide this parameter, you should not specify the `stopping_criteria` in `generation_kwargs`.\n For some chat models, the output includes both the new text and the original prompt.\n In these cases, it's important to make sure your prompt has no stop words.\n \"\"\"\n transformers_import.check()\n torch_import.check()\n\n pipeline_kwargs = pipeline_kwargs or {}\n generation_kwargs = generation_kwargs or {}\n\n # check if the pipeline_kwargs contain the essential parameters\n # otherwise, populate them with values from other init parameters\n pipeline_kwargs.setdefault(\"model\", model_name_or_path)\n pipeline_kwargs.setdefault(\"token\", token)\n if device is not None and \"device\" not in pipeline_kwargs and \"device_map\" not in pipeline_kwargs:\n pipeline_kwargs[\"device\"] = device\n\n # task identification and validation\n if task is None:\n if \"task\" in pipeline_kwargs:\n task = pipeline_kwargs[\"task\"]\n elif isinstance(pipeline_kwargs[\"model\"], str):\n task = model_info(pipeline_kwargs[\"model\"], token=pipeline_kwargs[\"token\"]).pipeline_tag\n\n if task not in SUPPORTED_TASKS:\n raise ValueError(\n f\"Task '{task}' is not supported. \" f\"The supported tasks are: {', '.join(SUPPORTED_TASKS)}.\"\n )\n pipeline_kwargs[\"task\"] = task\n\n # if not specified, set return_full_text to False for text-generation\n # only generated text is returned (excluding prompt)\n if task == \"text-generation\":\n generation_kwargs.setdefault(\"return_full_text\", False)\n\n if stop_words and \"stopping_criteria\" in generation_kwargs:\n raise ValueError(\n \"Found both the `stop_words` init parameter and the `stopping_criteria` key in `generation_kwargs`. \"\n \"Please specify only one of them.\"\n )\n\n self.pipeline_kwargs = pipeline_kwargs\n self.generation_kwargs = generation_kwargs\n self.stop_words = stop_words\n self.pipeline = None\n self.stopping_criteria_list = None\n\n def _get_telemetry_data(self) -> Dict[str, Any]:\n \"\"\"\n Data that is sent to Posthog for usage analytics.\n \"\"\"\n if isinstance(self.pipeline_kwargs[\"model\"], str):\n return {\"model\": self.pipeline_kwargs[\"model\"]}\n return {\"model\": f\"[object of type {type(self.pipeline_kwargs['model'])}]\"}\n\n def warm_up(self):\n if self.pipeline is None:\n self.pipeline = pipeline(**self.pipeline_kwargs)\n\n if self.stop_words and self.stopping_criteria_list is None:\n stop_words_criteria = StopWordsCriteria(\n tokenizer=self.pipeline.tokenizer, stop_words=self.stop_words, device=self.pipeline.device\n )\n self.stopping_criteria_list = StoppingCriteriaList([stop_words_criteria])\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n \"\"\"\n pipeline_kwargs_to_serialize = deepcopy(self.pipeline_kwargs)\n\n # we don't want to serialize valid tokens\n if isinstance(pipeline_kwargs_to_serialize[\"token\"], str):\n pipeline_kwargs_to_serialize[\"token\"] = None\n\n return default_to_dict(\n self,\n pipeline_kwargs=pipeline_kwargs_to_serialize,\n generation_kwargs=self.generation_kwargs,\n stop_words=self.stop_words,\n )\n\n @component.output_types(replies=List[str])\n def run(self, prompt: str, generation_kwargs: Optional[Dict[str, Any]] = None):\n \"\"\"\n Run the text generation model on the given prompt.\n\n :param prompt: A string representing the prompt.\n :param generation_kwargs: Additional keyword arguments for text generation.\n :return: A dictionary containing the generated replies.\n \"\"\"\n if self.pipeline is None:\n raise RuntimeError(\"The generation model has not been loaded. Please call warm_up() before running.\")\n\n if not prompt:\n return {\"replies\": []}\n\n # merge generation kwargs from init method with those from run method\n updated_generation_kwargs = {**self.generation_kwargs, **(generation_kwargs or {})}\n\n output = self.pipeline(prompt, stopping_criteria=self.stopping_criteria_list, **updated_generation_kwargs)\n replies = [o[\"generated_text\"] for o in output if \"generated_text\" in o]\n\n if self.stop_words:\n # the output of the pipeline includes the stop word\n replies = [reply.replace(stop_word, \"\").rstrip() for reply in replies for stop_word in self.stop_words]\n\n return {\"replies\": replies}\n", "path": "haystack/preview/components/generators/hugging_face_local.py"}], "after_files": [{"content": "import logging\nfrom typing import Any, Dict, List, Literal, Optional, Union\nfrom copy import deepcopy\n\nfrom haystack.preview import component, default_to_dict\nfrom haystack.preview.lazy_imports import LazyImport\n\nlogger = logging.getLogger(__name__)\n\nSUPPORTED_TASKS = [\"text-generation\", \"text2text-generation\"]\n\nwith LazyImport(message=\"Run 'pip install transformers[torch]'\") as torch_and_transformers_import:\n import torch\n from huggingface_hub import model_info\n from transformers import (\n pipeline,\n StoppingCriteriaList,\n StoppingCriteria,\n PreTrainedTokenizer,\n PreTrainedTokenizerFast,\n )\n\n class StopWordsCriteria(StoppingCriteria):\n \"\"\"\n Stops text generation if any one of the stop words is generated.\n\n Note: When a stop word is encountered, the generation of new text is stopped.\n However, if the stop word is in the prompt itself, it can stop generating new text\n prematurely after the first token. This is particularly important for LLMs designed\n for dialogue generation. For these models, like for example mosaicml/mpt-7b-chat,\n the output includes both the new text and the original prompt. Therefore, it's important\n to make sure your prompt has no stop words.\n \"\"\"\n\n def __init__(\n self,\n tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],\n stop_words: List[str],\n device: Union[str, torch.device] = \"cpu\",\n ):\n super().__init__()\n encoded_stop_words = tokenizer(stop_words, add_special_tokens=False, padding=True, return_tensors=\"pt\")\n self.stop_ids = encoded_stop_words.input_ids.to(device)\n\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n for stop_id in self.stop_ids:\n found_stop_word = self.is_stop_word_found(input_ids, stop_id)\n if found_stop_word:\n return True\n return False\n\n def is_stop_word_found(self, generated_text_ids: torch.Tensor, stop_id: torch.Tensor) -> bool:\n generated_text_ids = generated_text_ids[-1]\n len_generated_text_ids = generated_text_ids.size(0)\n len_stop_id = stop_id.size(0)\n result = all(generated_text_ids[len_generated_text_ids - len_stop_id :].eq(stop_id))\n return result\n\n\n@component\nclass HuggingFaceLocalGenerator:\n \"\"\"\n Generator based on a Hugging Face model.\n This component provides an interface to generate text using a Hugging Face model that runs locally.\n\n Usage example:\n ```python\n from haystack.preview.components.generators import HuggingFaceLocalGenerator\n\n generator = HuggingFaceLocalGenerator(model=\"google/flan-t5-large\",\n task=\"text2text-generation\",\n generation_kwargs={\n \"max_new_tokens\": 100,\n \"temperature\": 0.9,\n })\n\n print(generator.run(\"Who is the best American actor?\"))\n # {'replies': ['John Cusack']}\n ```\n \"\"\"\n\n def __init__(\n self,\n model_name_or_path: str = \"google/flan-t5-base\",\n task: Optional[Literal[\"text-generation\", \"text2text-generation\"]] = None,\n device: Optional[str] = None,\n token: Optional[Union[str, bool]] = None,\n generation_kwargs: Optional[Dict[str, Any]] = None,\n pipeline_kwargs: Optional[Dict[str, Any]] = None,\n stop_words: Optional[List[str]] = None,\n ):\n \"\"\"\n :param model_name_or_path: The name or path of a Hugging Face model for text generation,\n for example, \"google/flan-t5-large\".\n If the model is also specified in the `pipeline_kwargs`, this parameter will be ignored.\n :param task: The task for the Hugging Face pipeline.\n Possible values are \"text-generation\" and \"text2text-generation\".\n Generally, decoder-only models like GPT support \"text-generation\",\n while encoder-decoder models like T5 support \"text2text-generation\".\n If the task is also specified in the `pipeline_kwargs`, this parameter will be ignored.\n If not specified, the component will attempt to infer the task from the model name,\n calling the Hugging Face Hub API.\n :param device: The device on which the model is loaded. (e.g., \"cpu\", \"cuda:0\").\n If `device` or `device_map` is specified in the `pipeline_kwargs`, this parameter will be ignored.\n :param token: The token to use as HTTP bearer authorization for remote files.\n If True, will use the token generated when running huggingface-cli login (stored in ~/.huggingface).\n If the token is also specified in the `pipeline_kwargs`, this parameter will be ignored.\n :param generation_kwargs: A dictionary containing keyword arguments to customize text generation.\n Some examples: `max_length`, `max_new_tokens`, `temperature`, `top_k`, `top_p`,...\n See Hugging Face's documentation for more information:\n - https://huggingface.co/docs/transformers/main/en/generation_strategies#customize-text-generation\n - https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationConfig\n :param pipeline_kwargs: Dictionary containing keyword arguments used to initialize the pipeline.\n These keyword arguments provide fine-grained control over the pipeline.\n In case of duplication, these kwargs override `model_name_or_path`, `task`, `device`, and `token` init parameters.\n See Hugging Face's [documentation](https://huggingface.co/docs/transformers/en/main_classes/pipelines#transformers.pipeline.task)\n for more information on the available kwargs.\n In this dictionary, you can also include `model_kwargs` to specify the kwargs\n for model initialization:\n https://huggingface.co/docs/transformers/en/main_classes/model#transformers.PreTrainedModel.from_pretrained\n :param stop_words: A list of stop words. If any one of the stop words is generated, the generation is stopped.\n If you provide this parameter, you should not specify the `stopping_criteria` in `generation_kwargs`.\n For some chat models, the output includes both the new text and the original prompt.\n In these cases, it's important to make sure your prompt has no stop words.\n \"\"\"\n torch_and_transformers_import.check()\n\n pipeline_kwargs = pipeline_kwargs or {}\n generation_kwargs = generation_kwargs or {}\n\n # check if the pipeline_kwargs contain the essential parameters\n # otherwise, populate them with values from other init parameters\n pipeline_kwargs.setdefault(\"model\", model_name_or_path)\n pipeline_kwargs.setdefault(\"token\", token)\n if device is not None and \"device\" not in pipeline_kwargs and \"device_map\" not in pipeline_kwargs:\n pipeline_kwargs[\"device\"] = device\n\n # task identification and validation\n if task is None:\n if \"task\" in pipeline_kwargs:\n task = pipeline_kwargs[\"task\"]\n elif isinstance(pipeline_kwargs[\"model\"], str):\n task = model_info(pipeline_kwargs[\"model\"], token=pipeline_kwargs[\"token\"]).pipeline_tag\n\n if task not in SUPPORTED_TASKS:\n raise ValueError(\n f\"Task '{task}' is not supported. \" f\"The supported tasks are: {', '.join(SUPPORTED_TASKS)}.\"\n )\n pipeline_kwargs[\"task\"] = task\n\n # if not specified, set return_full_text to False for text-generation\n # only generated text is returned (excluding prompt)\n if task == \"text-generation\":\n generation_kwargs.setdefault(\"return_full_text\", False)\n\n if stop_words and \"stopping_criteria\" in generation_kwargs:\n raise ValueError(\n \"Found both the `stop_words` init parameter and the `stopping_criteria` key in `generation_kwargs`. \"\n \"Please specify only one of them.\"\n )\n\n self.pipeline_kwargs = pipeline_kwargs\n self.generation_kwargs = generation_kwargs\n self.stop_words = stop_words\n self.pipeline = None\n self.stopping_criteria_list = None\n\n def _get_telemetry_data(self) -> Dict[str, Any]:\n \"\"\"\n Data that is sent to Posthog for usage analytics.\n \"\"\"\n if isinstance(self.pipeline_kwargs[\"model\"], str):\n return {\"model\": self.pipeline_kwargs[\"model\"]}\n return {\"model\": f\"[object of type {type(self.pipeline_kwargs['model'])}]\"}\n\n def warm_up(self):\n if self.pipeline is None:\n self.pipeline = pipeline(**self.pipeline_kwargs)\n\n if self.stop_words and self.stopping_criteria_list is None:\n stop_words_criteria = StopWordsCriteria(\n tokenizer=self.pipeline.tokenizer, stop_words=self.stop_words, device=self.pipeline.device\n )\n self.stopping_criteria_list = StoppingCriteriaList([stop_words_criteria])\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n \"\"\"\n pipeline_kwargs_to_serialize = deepcopy(self.pipeline_kwargs)\n\n # we don't want to serialize valid tokens\n if isinstance(pipeline_kwargs_to_serialize[\"token\"], str):\n pipeline_kwargs_to_serialize[\"token\"] = None\n\n return default_to_dict(\n self,\n pipeline_kwargs=pipeline_kwargs_to_serialize,\n generation_kwargs=self.generation_kwargs,\n stop_words=self.stop_words,\n )\n\n @component.output_types(replies=List[str])\n def run(self, prompt: str, generation_kwargs: Optional[Dict[str, Any]] = None):\n \"\"\"\n Run the text generation model on the given prompt.\n\n :param prompt: A string representing the prompt.\n :param generation_kwargs: Additional keyword arguments for text generation.\n :return: A dictionary containing the generated replies.\n \"\"\"\n if self.pipeline is None:\n raise RuntimeError(\"The generation model has not been loaded. Please call warm_up() before running.\")\n\n if not prompt:\n return {\"replies\": []}\n\n # merge generation kwargs from init method with those from run method\n updated_generation_kwargs = {**self.generation_kwargs, **(generation_kwargs or {})}\n\n output = self.pipeline(prompt, stopping_criteria=self.stopping_criteria_list, **updated_generation_kwargs)\n replies = [o[\"generated_text\"] for o in output if \"generated_text\" in o]\n\n if self.stop_words:\n # the output of the pipeline includes the stop word\n replies = [reply.replace(stop_word, \"\").rstrip() for reply in replies for stop_word in self.stop_words]\n\n return {\"replies\": replies}\n", "path": "haystack/preview/components/generators/hugging_face_local.py"}]} | 3,543 | 284 |
gh_patches_debug_471 | rasdani/github-patches | git_diff | pytorch__rl-402 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Loggers registration
## Describe the bug
The [`__init__.py`](https://github.com/facebookresearch/rl/blob/main/torchrl/trainers/loggers/__init__.py) from loggers does not reference the implemented loggers. However, the [docstring from the trainer](https://github.com/facebookresearch/rl/blob/806733f27dfa9a878b75b079de9f18df83f54c2d/torchrl/trainers/helpers/trainers.py#L109) assumes that this kind of import can be run.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchrl/trainers/loggers/__init__.py`
Content:
```
1 # Copyright (c) Meta Platforms, Inc. and affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 from .common import Logger
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchrl/trainers/loggers/__init__.py b/torchrl/trainers/loggers/__init__.py
--- a/torchrl/trainers/loggers/__init__.py
+++ b/torchrl/trainers/loggers/__init__.py
@@ -4,3 +4,6 @@
# LICENSE file in the root directory of this source tree.
from .common import Logger
+from .csv import CSVLogger
+from .tensorboard import TensorboardLogger
+from .wandb import WandbLogger
| {"golden_diff": "diff --git a/torchrl/trainers/loggers/__init__.py b/torchrl/trainers/loggers/__init__.py\n--- a/torchrl/trainers/loggers/__init__.py\n+++ b/torchrl/trainers/loggers/__init__.py\n@@ -4,3 +4,6 @@\n # LICENSE file in the root directory of this source tree.\n \n from .common import Logger\n+from .csv import CSVLogger\n+from .tensorboard import TensorboardLogger\n+from .wandb import WandbLogger\n", "issue": "[BUG] Loggers registration\n## Describe the bug\r\n\r\nThe [`__init__.py`](https://github.com/facebookresearch/rl/blob/main/torchrl/trainers/loggers/__init__.py) from loggers does not reference the implemented loggers. However, the [docstring from the trainer](https://github.com/facebookresearch/rl/blob/806733f27dfa9a878b75b079de9f18df83f54c2d/torchrl/trainers/helpers/trainers.py#L109) assumes that this kind of import can be run.\n", "before_files": [{"content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom .common import Logger\n", "path": "torchrl/trainers/loggers/__init__.py"}], "after_files": [{"content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom .common import Logger\nfrom .csv import CSVLogger\nfrom .tensorboard import TensorboardLogger\nfrom .wandb import WandbLogger\n", "path": "torchrl/trainers/loggers/__init__.py"}]} | 451 | 113 |
gh_patches_debug_16425 | rasdani/github-patches | git_diff | pantsbuild__pants-15979 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`./pants run` crashes when `remote_cache_eager_fetch = false`
**Describe the bug**
After setting `remote_cache_eager_fetch = false`, a CI step that `./pants run`s a `pex_binary` has started (intermittently) failing with errors like:
```
Exception: Was not present in either the local or remote store: Digest { hash: Fingerprint<46683dec8706b7ac7c4f6011f68b4b8c10ad423ae8ba57745a6f5e01ba5b2f7b>, size_bytes: 11827 }
```
**Pants version**
`PANTS_SHA=5d8a328d72209863986c8959b20305505bc068ba`
**OS**
Linux
**Additional info**
Some BuildSense links where we've seen the failure:
* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/
* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_31_09_277_3793f53b54914135885f2ac951faf210/
* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/base/exceptions.py`
Content:
```
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6
7 class TargetDefinitionException(Exception):
8 """Indicates an invalid target definition.
9
10 :API: public
11 """
12
13 def __init__(self, target, msg):
14 """
15 :param target: the target in question
16 :param string msg: a description of the target misconfiguration
17 """
18 super().__init__(f"Invalid target {target}: {msg}")
19
20
21 class BuildConfigurationError(Exception):
22 """Indicates an error in a pants installation's configuration."""
23
24
25 class BackendConfigurationError(BuildConfigurationError):
26 """Indicates a plugin backend with a missing or malformed register module."""
27
28
29 class MappingError(Exception):
30 """Indicates an error mapping addressable objects."""
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/base/exceptions.py b/src/python/pants/base/exceptions.py
--- a/src/python/pants/base/exceptions.py
+++ b/src/python/pants/base/exceptions.py
@@ -3,6 +3,11 @@
from __future__ import annotations
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from pants.engine.internals.native_engine import PyFailure
+
class TargetDefinitionException(Exception):
"""Indicates an invalid target definition.
@@ -28,3 +33,15 @@
class MappingError(Exception):
"""Indicates an error mapping addressable objects."""
+
+
+class NativeEngineFailure(Exception):
+ """A wrapper around a `Failure` instance.
+
+ TODO: This type is defined in Python because pyo3 doesn't support declaring Exceptions with
+ additional fields. See https://github.com/PyO3/pyo3/issues/295
+ """
+
+ def __init__(self, msg: str, failure: PyFailure) -> None:
+ super().__init__(msg)
+ self.failure = failure
| {"golden_diff": "diff --git a/src/python/pants/base/exceptions.py b/src/python/pants/base/exceptions.py\n--- a/src/python/pants/base/exceptions.py\n+++ b/src/python/pants/base/exceptions.py\n@@ -3,6 +3,11 @@\n \n from __future__ import annotations\n \n+from typing import TYPE_CHECKING\n+\n+if TYPE_CHECKING:\n+ from pants.engine.internals.native_engine import PyFailure\n+\n \n class TargetDefinitionException(Exception):\n \"\"\"Indicates an invalid target definition.\n@@ -28,3 +33,15 @@\n \n class MappingError(Exception):\n \"\"\"Indicates an error mapping addressable objects.\"\"\"\n+\n+\n+class NativeEngineFailure(Exception):\n+ \"\"\"A wrapper around a `Failure` instance.\n+\n+ TODO: This type is defined in Python because pyo3 doesn't support declaring Exceptions with\n+ additional fields. See https://github.com/PyO3/pyo3/issues/295\n+ \"\"\"\n+\n+ def __init__(self, msg: str, failure: PyFailure) -> None:\n+ super().__init__(msg)\n+ self.failure = failure\n", "issue": "`./pants run` crashes when `remote_cache_eager_fetch = false`\n**Describe the bug**\r\n\r\nAfter setting `remote_cache_eager_fetch = false`, a CI step that `./pants run`s a `pex_binary` has started (intermittently) failing with errors like:\r\n```\r\n Exception: Was not present in either the local or remote store: Digest { hash: Fingerprint<46683dec8706b7ac7c4f6011f68b4b8c10ad423ae8ba57745a6f5e01ba5b2f7b>, size_bytes: 11827 }\r\n```\r\n\r\n**Pants version**\r\n\r\n`PANTS_SHA=5d8a328d72209863986c8959b20305505bc068ba`\r\n\r\n**OS**\r\n\r\nLinux\r\n\r\n**Additional info**\r\n\r\nSome BuildSense links where we've seen the failure:\r\n* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/\r\n* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_31_09_277_3793f53b54914135885f2ac951faf210/\r\n* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/\r\n\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\n\nclass TargetDefinitionException(Exception):\n \"\"\"Indicates an invalid target definition.\n\n :API: public\n \"\"\"\n\n def __init__(self, target, msg):\n \"\"\"\n :param target: the target in question\n :param string msg: a description of the target misconfiguration\n \"\"\"\n super().__init__(f\"Invalid target {target}: {msg}\")\n\n\nclass BuildConfigurationError(Exception):\n \"\"\"Indicates an error in a pants installation's configuration.\"\"\"\n\n\nclass BackendConfigurationError(BuildConfigurationError):\n \"\"\"Indicates a plugin backend with a missing or malformed register module.\"\"\"\n\n\nclass MappingError(Exception):\n \"\"\"Indicates an error mapping addressable objects.\"\"\"\n", "path": "src/python/pants/base/exceptions.py"}], "after_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from pants.engine.internals.native_engine import PyFailure\n\n\nclass TargetDefinitionException(Exception):\n \"\"\"Indicates an invalid target definition.\n\n :API: public\n \"\"\"\n\n def __init__(self, target, msg):\n \"\"\"\n :param target: the target in question\n :param string msg: a description of the target misconfiguration\n \"\"\"\n super().__init__(f\"Invalid target {target}: {msg}\")\n\n\nclass BuildConfigurationError(Exception):\n \"\"\"Indicates an error in a pants installation's configuration.\"\"\"\n\n\nclass BackendConfigurationError(BuildConfigurationError):\n \"\"\"Indicates a plugin backend with a missing or malformed register module.\"\"\"\n\n\nclass MappingError(Exception):\n \"\"\"Indicates an error mapping addressable objects.\"\"\"\n\n\nclass NativeEngineFailure(Exception):\n \"\"\"A wrapper around a `Failure` instance.\n\n TODO: This type is defined in Python because pyo3 doesn't support declaring Exceptions with\n additional fields. See https://github.com/PyO3/pyo3/issues/295\n \"\"\"\n\n def __init__(self, msg: str, failure: PyFailure) -> None:\n super().__init__(msg)\n self.failure = failure\n", "path": "src/python/pants/base/exceptions.py"}]} | 942 | 243 |
gh_patches_debug_2986 | rasdani/github-patches | git_diff | coala__coala-3908 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fail to install and py.test on docker environment.
<!-- Hello! If you're filing a bug, please include every step so as to help us reproduce it on our machines. If you're unsure about how to file an issue, use the issue template. If you need any help regarding usage of coala, check out the documentation or hit us up on chat. You can ignore or delete this text, it is commented and won't appear when the issue is submitted or previewed.
Chat: https://coala.io/chat
Issue Template: https://github.com/coala/coala/blob/master/CONTRIBUTING.rst#filing-issues
Documentation: https://docs.coala.io
-->
When I try to install by `python setup.py install`, it is failed with this message.
`UnicodeEncodeError: 'ascii' codec can't encode character '\xfc' in position 15224: ordinal not in range(128)`
Also, the same happening when I try to run unit test on local.
It needs to be fixed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import datetime
4 import locale
5 import platform
6 import sys
7 from os import getenv
8 from subprocess import call
9
10 import setuptools.command.build_py
11 from setuptools import find_packages, setup
12 from setuptools.command.test import test as TestCommand
13
14 from coalib import VERSION, assert_supported_version, get_version
15 from coalib.misc.BuildManPage import BuildManPage
16
17 try:
18 locale.getlocale()
19 except (ValueError, UnicodeError):
20 locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
21
22
23 assert_supported_version()
24
25
26 class BuildPyCommand(setuptools.command.build_py.build_py):
27
28 def run(self):
29 if platform.system() != 'Windows':
30 self.run_command('build_manpage')
31 setuptools.command.build_py.build_py.run(self)
32
33
34 class PyTestCommand(TestCommand):
35
36 def run_tests(self):
37 # import here, cause outside the eggs aren't loaded
38 import pytest
39 errno = pytest.main([])
40 sys.exit(errno)
41
42
43 class BuildDocsCommand(setuptools.command.build_py.build_py):
44 apidoc_command = (
45 'sphinx-apidoc', '-f', '-o', 'docs', '--no-toc', 'coalib'
46 )
47 doc_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')
48
49 def run(self):
50 errOne = call(self.apidoc_command)
51 errTwo = call(self.doc_command)
52 sys.exit(errOne or errTwo)
53
54
55 # Generate API documentation only if we are running on readthedocs.io
56 on_rtd = getenv('READTHEDOCS', None) is not None
57 if on_rtd:
58 call(BuildDocsCommand.apidoc_command)
59 if 'dev' in VERSION:
60 current_version = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
61 call(['python3', '.misc/adjust_version_number.py', 'coalib/VERSION',
62 '-b {}'.format(current_version)])
63 VERSION = get_version()
64
65 with open('requirements.txt') as requirements:
66 required = requirements.read().splitlines()
67
68 with open('test-requirements.txt') as requirements:
69 test_required = requirements.read().splitlines()
70
71 with open('README.rst') as readme:
72 long_description = readme.read()
73
74
75 if __name__ == '__main__':
76 if platform.system() != 'Windows':
77 data_files = [('.', ['coala.1'])]
78 else:
79 data_files = [('.', [])]
80
81 setup(name='coala',
82 version=VERSION,
83 description='Linting and Fixing Code for All Languages',
84 author='The coala developers',
85 author_email='[email protected]',
86 maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\xfcger'
87 if not on_rtd else 'L.S., F.N., M.K.',
88 maintainer_email=('[email protected], '
89 '[email protected], '
90 '[email protected]'),
91 url='http://coala.io/',
92 platforms='any',
93 packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),
94 install_requires=required,
95 tests_require=test_required,
96 package_data={'coalib': ['default_coafile', 'VERSION',
97 'bearlib/languages/documentation/*.coalang']
98 },
99 license='AGPL-3.0',
100 data_files=data_files,
101 long_description=long_description,
102 entry_points={
103 'console_scripts': [
104 'coala = coalib.coala:main',
105 'coala-ci = coalib.coala_ci:main',
106 'coala-json = coalib.coala_json:main',
107 'coala-format = coalib.coala_format:main',
108 'coala-delete-orig = coalib.coala_delete_orig:main']},
109 # from http://pypi.python.org/pypi?%3Aaction=list_classifiers
110 classifiers=[
111 'Development Status :: 4 - Beta',
112
113 'Environment :: Console',
114 'Environment :: MacOS X',
115 'Environment :: Win32 (MS Windows)',
116 'Environment :: X11 Applications :: Gnome',
117
118 'Intended Audience :: Science/Research',
119 'Intended Audience :: Developers',
120
121 'License :: OSI Approved :: GNU Affero General Public License '
122 'v3 or later (AGPLv3+)',
123
124 'Operating System :: OS Independent',
125
126 'Programming Language :: Python :: Implementation :: CPython',
127 'Programming Language :: Python :: 3.4',
128 'Programming Language :: Python :: 3.5',
129 'Programming Language :: Python :: 3 :: Only',
130
131 'Topic :: Scientific/Engineering :: Information Analysis',
132 'Topic :: Software Development :: Quality Assurance',
133 'Topic :: Text Processing :: Linguistic'],
134 cmdclass={'build_manpage': BuildManPage,
135 'build_py': BuildPyCommand,
136 'docs': BuildDocsCommand,
137 'test': PyTestCommand})
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,10 @@
from coalib.misc.BuildManPage import BuildManPage
try:
- locale.getlocale()
+ lc = locale.getlocale()
+ pf = platform.system()
+ if pf != 'Windows' and lc == (None, None):
+ locale.setlocale(locale.LC_ALL, 'C.UTF-8')
except (ValueError, UnicodeError):
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,10 @@\n from coalib.misc.BuildManPage import BuildManPage\n \n try:\n- locale.getlocale()\n+ lc = locale.getlocale()\n+ pf = platform.system()\n+ if pf != 'Windows' and lc == (None, None):\n+ locale.setlocale(locale.LC_ALL, 'C.UTF-8')\n except (ValueError, UnicodeError):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n", "issue": "Fail to install and py.test on docker environment.\n<!-- Hello! If you're filing a bug, please include every step so as to help us reproduce it on our machines. If you're unsure about how to file an issue, use the issue template. If you need any help regarding usage of coala, check out the documentation or hit us up on chat. You can ignore or delete this text, it is commented and won't appear when the issue is submitted or previewed.\r\n\r\nChat: https://coala.io/chat\r\nIssue Template: https://github.com/coala/coala/blob/master/CONTRIBUTING.rst#filing-issues\r\nDocumentation: https://docs.coala.io\r\n-->\r\nWhen I try to install by `python setup.py install`, it is failed with this message.\r\n`UnicodeEncodeError: 'ascii' codec can't encode character '\\xfc' in position 15224: ordinal not in range(128)`\r\nAlso, the same happening when I try to run unit test on local.\r\nIt needs to be fixed.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport datetime\nimport locale\nimport platform\nimport sys\nfrom os import getenv\nfrom subprocess import call\n\nimport setuptools.command.build_py\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\nfrom coalib import VERSION, assert_supported_version, get_version\nfrom coalib.misc.BuildManPage import BuildManPage\n\ntry:\n locale.getlocale()\nexcept (ValueError, UnicodeError):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\n\nassert_supported_version()\n\n\nclass BuildPyCommand(setuptools.command.build_py.build_py):\n\n def run(self):\n if platform.system() != 'Windows':\n self.run_command('build_manpage')\n setuptools.command.build_py.build_py.run(self)\n\n\nclass PyTestCommand(TestCommand):\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main([])\n sys.exit(errno)\n\n\nclass BuildDocsCommand(setuptools.command.build_py.build_py):\n apidoc_command = (\n 'sphinx-apidoc', '-f', '-o', 'docs', '--no-toc', 'coalib'\n )\n doc_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')\n\n def run(self):\n errOne = call(self.apidoc_command)\n errTwo = call(self.doc_command)\n sys.exit(errOne or errTwo)\n\n\n# Generate API documentation only if we are running on readthedocs.io\non_rtd = getenv('READTHEDOCS', None) is not None\nif on_rtd:\n call(BuildDocsCommand.apidoc_command)\n if 'dev' in VERSION:\n current_version = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n call(['python3', '.misc/adjust_version_number.py', 'coalib/VERSION',\n '-b {}'.format(current_version)])\n VERSION = get_version()\n\nwith open('requirements.txt') as requirements:\n required = requirements.read().splitlines()\n\nwith open('test-requirements.txt') as requirements:\n test_required = requirements.read().splitlines()\n\nwith open('README.rst') as readme:\n long_description = readme.read()\n\n\nif __name__ == '__main__':\n if platform.system() != 'Windows':\n data_files = [('.', ['coala.1'])]\n else:\n data_files = [('.', [])]\n\n setup(name='coala',\n version=VERSION,\n description='Linting and Fixing Code for All Languages',\n author='The coala developers',\n author_email='[email protected]',\n maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\\xfcger'\n if not on_rtd else 'L.S., F.N., M.K.',\n maintainer_email=('[email protected], '\n '[email protected], '\n '[email protected]'),\n url='http://coala.io/',\n platforms='any',\n packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),\n install_requires=required,\n tests_require=test_required,\n package_data={'coalib': ['default_coafile', 'VERSION',\n 'bearlib/languages/documentation/*.coalang']\n },\n license='AGPL-3.0',\n data_files=data_files,\n long_description=long_description,\n entry_points={\n 'console_scripts': [\n 'coala = coalib.coala:main',\n 'coala-ci = coalib.coala_ci:main',\n 'coala-json = coalib.coala_json:main',\n 'coala-format = coalib.coala_format:main',\n 'coala-delete-orig = coalib.coala_delete_orig:main']},\n # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 4 - Beta',\n\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications :: Gnome',\n\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n\n 'License :: OSI Approved :: GNU Affero General Public License '\n 'v3 or later (AGPLv3+)',\n\n 'Operating System :: OS Independent',\n\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3 :: Only',\n\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Text Processing :: Linguistic'],\n cmdclass={'build_manpage': BuildManPage,\n 'build_py': BuildPyCommand,\n 'docs': BuildDocsCommand,\n 'test': PyTestCommand})\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport datetime\nimport locale\nimport platform\nimport sys\nfrom os import getenv\nfrom subprocess import call\n\nimport setuptools.command.build_py\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\nfrom coalib import VERSION, assert_supported_version, get_version\nfrom coalib.misc.BuildManPage import BuildManPage\n\ntry:\n lc = locale.getlocale()\n pf = platform.system()\n if pf != 'Windows' and lc == (None, None):\n locale.setlocale(locale.LC_ALL, 'C.UTF-8')\nexcept (ValueError, UnicodeError):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\n\nassert_supported_version()\n\n\nclass BuildPyCommand(setuptools.command.build_py.build_py):\n\n def run(self):\n if platform.system() != 'Windows':\n self.run_command('build_manpage')\n setuptools.command.build_py.build_py.run(self)\n\n\nclass PyTestCommand(TestCommand):\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main([])\n sys.exit(errno)\n\n\nclass BuildDocsCommand(setuptools.command.build_py.build_py):\n apidoc_command = (\n 'sphinx-apidoc', '-f', '-o', 'docs', '--no-toc', 'coalib'\n )\n doc_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')\n\n def run(self):\n errOne = call(self.apidoc_command)\n errTwo = call(self.doc_command)\n sys.exit(errOne or errTwo)\n\n\n# Generate API documentation only if we are running on readthedocs.io\non_rtd = getenv('READTHEDOCS', None) is not None\nif on_rtd:\n call(BuildDocsCommand.apidoc_command)\n if 'dev' in VERSION:\n current_version = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n call(['python3', '.misc/adjust_version_number.py', 'coalib/VERSION',\n '-b {}'.format(current_version)])\n VERSION = get_version()\n\nwith open('requirements.txt') as requirements:\n required = requirements.read().splitlines()\n\nwith open('test-requirements.txt') as requirements:\n test_required = requirements.read().splitlines()\n\nwith open('README.rst') as readme:\n long_description = readme.read()\n\n\nif __name__ == '__main__':\n if platform.system() != 'Windows':\n data_files = [('.', ['coala.1'])]\n else:\n data_files = [('.', [])]\n\n setup(name='coala',\n version=VERSION,\n description='Linting and Fixing Code for All Languages',\n author='The coala developers',\n author_email='[email protected]',\n maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\\xfcger'\n if not on_rtd else 'L.S., F.N., M.K.',\n maintainer_email=('[email protected], '\n '[email protected], '\n '[email protected]'),\n url='http://coala.io/',\n platforms='any',\n packages=find_packages(exclude=['build.*', 'tests', 'tests.*']),\n install_requires=required,\n tests_require=test_required,\n package_data={'coalib': ['default_coafile', 'VERSION',\n 'bearlib/languages/documentation/*.coalang']\n },\n license='AGPL-3.0',\n data_files=data_files,\n long_description=long_description,\n entry_points={\n 'console_scripts': [\n 'coala = coalib.coala:main',\n 'coala-ci = coalib.coala_ci:main',\n 'coala-json = coalib.coala_json:main',\n 'coala-format = coalib.coala_format:main',\n 'coala-delete-orig = coalib.coala_delete_orig:main']},\n # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 4 - Beta',\n\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications :: Gnome',\n\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n\n 'License :: OSI Approved :: GNU Affero General Public License '\n 'v3 or later (AGPLv3+)',\n\n 'Operating System :: OS Independent',\n\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3 :: Only',\n\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Text Processing :: Linguistic'],\n cmdclass={'build_manpage': BuildManPage,\n 'build_py': BuildPyCommand,\n 'docs': BuildDocsCommand,\n 'test': PyTestCommand})\n", "path": "setup.py"}]} | 1,853 | 124 |
gh_patches_debug_28687 | rasdani/github-patches | git_diff | iterative__dvc-2853 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gs: support directories as external dependencies/outputs
I have a similar issue to #2678 but for GS.
I have a bucket with the following structure
```
my_bucket
├── data
│ ├── img1.png
│ ├── img2.png
│ ├── ...
└── cache
```
I have then created a clean project
```
$ git init
$ dvc init
$ dvc remote add gscache gs://my_bucket/cache
$ dvc config cache.gs gscache
$ dvc add gs://my_bucket/data
```
The output is as follows:
```
100%|██████████|Add 1/1 [00:00<00:00, 1.21file/s]
ERROR: output 'gs://my_bucket/data' does not exist
```
Adding a single file works (`dvc add gs://my_bucket/data/img1.png`).
A more verbose version:
```
$ dvc add gs://my_bucket/data -v
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
100%|██████████|Add 1/1 [00:01<00:00, 1.63s/file]
DEBUG: SELECT count from state_info WHERE rowid=?
DEBUG: fetched: [(0,)]
DEBUG: UPDATE state_info SET count = ? WHERE rowid = ?
ERROR: output 'gs://my_bucket/data' does not exist
------------------------------------------------------------
Traceback (most recent call last):
File "/home/egnyte/anaconda3/envs/dvc/lib/python3.7/site-packages/dvc/command/add.py", line 25, in run
fname=self.args.file,
File "/home/egnyte/anaconda3/envs/dvc/lib/python3.7/site-packages/dvc/repo/__init__.py", line 35, in wrapper
ret = f(repo, *args, **kwargs)
File "/home/egnyte/anaconda3/envs/dvc/lib/python3.7/site-packages/dvc/repo/scm_context.py", line 4, in run
result = method(repo, *args, **kw)
File "/home/egnyte/anaconda3/envs/dvc/lib/python3.7/site-packages/dvc/repo/add.py", line 53, in add
stage.save()
File "/home/egnyte/anaconda3/envs/dvc/lib/python3.7/site-packages/dvc/stage.py", line 716, in save
out.save()
File "/home/egnyte/anaconda3/envs/dvc/lib/python3.7/site-packages/dvc/output/base.py", line 219, in save
raise self.DoesNotExistError(self)
dvc.output.base.OutputDoesNotExistError: output 'gs://my_bucket/data' does not exist
------------------------------------------------------------
```
`dvc --version = 0.68.1`. I am using ubuntu, I installed using conda, python 3.7.5.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/remote/gs.py`
Content:
```
1 from __future__ import unicode_literals, division
2
3 import logging
4 from datetime import timedelta
5 from functools import wraps
6 import io
7 import os.path
8 import threading
9
10 from funcy import cached_property, wrap_prop
11
12 from dvc.config import Config
13 from dvc.exceptions import DvcException
14 from dvc.path_info import CloudURLInfo
15 from dvc.progress import Tqdm
16 from dvc.remote.base import RemoteBASE
17 from dvc.scheme import Schemes
18 from dvc.utils.compat import FileNotFoundError # skipcq: PYL-W0622
19
20 logger = logging.getLogger(__name__)
21
22
23 def dynamic_chunk_size(func):
24 @wraps(func)
25 def wrapper(*args, **kwargs):
26 import requests
27 from google.cloud.storage.blob import Blob
28
29 # `ConnectionError` may be due to too large `chunk_size`
30 # (see [#2572]) so try halving on error.
31 # Note: start with 40 * [default: 256K] = 10M.
32 # Note: must be multiple of 256K.
33 #
34 # [#2572]: https://github.com/iterative/dvc/issues/2572
35
36 # skipcq: PYL-W0212
37 multiplier = 40
38 while True:
39 try:
40 # skipcq: PYL-W0212
41 chunk_size = Blob._CHUNK_SIZE_MULTIPLE * multiplier
42 return func(*args, chunk_size=chunk_size, **kwargs)
43 except requests.exceptions.ConnectionError:
44 multiplier //= 2
45 if not multiplier:
46 raise
47
48 return wrapper
49
50
51 @dynamic_chunk_size
52 def _upload_to_bucket(
53 bucket,
54 from_file,
55 to_info,
56 chunk_size=None,
57 name=None,
58 no_progress_bar=True,
59 ):
60 blob = bucket.blob(to_info.path, chunk_size=chunk_size)
61 with Tqdm(
62 desc=name or to_info.path,
63 total=os.path.getsize(from_file),
64 bytes=True,
65 disable=no_progress_bar,
66 ) as pbar:
67 with io.open(from_file, mode="rb") as fobj:
68 raw_read = fobj.read
69
70 def read(size=chunk_size):
71 res = raw_read(size)
72 if res:
73 pbar.update(len(res))
74 return res
75
76 fobj.read = read
77 blob.upload_from_file(fobj)
78
79
80 class RemoteGS(RemoteBASE):
81 scheme = Schemes.GS
82 path_cls = CloudURLInfo
83 REQUIRES = {"google-cloud-storage": "google.cloud.storage"}
84 PARAM_CHECKSUM = "md5"
85
86 def __init__(self, repo, config):
87 super(RemoteGS, self).__init__(repo, config)
88
89 url = config.get(Config.SECTION_REMOTE_URL, "gs:///")
90 self.path_info = self.path_cls(url)
91
92 self.projectname = config.get(Config.SECTION_GCP_PROJECTNAME, None)
93 self.credentialpath = config.get(Config.SECTION_GCP_CREDENTIALPATH)
94
95 @wrap_prop(threading.Lock())
96 @cached_property
97 def gs(self):
98 from google.cloud.storage import Client
99
100 return (
101 Client.from_service_account_json(self.credentialpath)
102 if self.credentialpath
103 else Client(self.projectname)
104 )
105
106 def get_file_checksum(self, path_info):
107 import base64
108 import codecs
109
110 bucket = path_info.bucket
111 path = path_info.path
112 blob = self.gs.bucket(bucket).get_blob(path)
113 if not blob:
114 return None
115
116 b64_md5 = blob.md5_hash
117 md5 = base64.b64decode(b64_md5)
118 return codecs.getencoder("hex")(md5)[0].decode("utf-8")
119
120 def copy(self, from_info, to_info):
121 from_bucket = self.gs.bucket(from_info.bucket)
122 blob = from_bucket.get_blob(from_info.path)
123 if not blob:
124 msg = "'{}' doesn't exist in the cloud".format(from_info.path)
125 raise DvcException(msg)
126
127 to_bucket = self.gs.bucket(to_info.bucket)
128 from_bucket.copy_blob(blob, to_bucket, new_name=to_info.path)
129
130 def remove(self, path_info):
131 if path_info.scheme != "gs":
132 raise NotImplementedError
133
134 logger.debug("Removing gs://{}".format(path_info))
135 blob = self.gs.bucket(path_info.bucket).get_blob(path_info.path)
136 if not blob:
137 return
138
139 blob.delete()
140
141 def _list_paths(self, bucket, prefix):
142 for blob in self.gs.bucket(bucket).list_blobs(prefix=prefix):
143 yield blob.name
144
145 def list_cache_paths(self):
146 return self._list_paths(self.path_info.bucket, self.path_info.path)
147
148 def exists(self, path_info):
149 paths = set(self._list_paths(path_info.bucket, path_info.path))
150 return any(path_info.path == path for path in paths)
151
152 def _upload(self, from_file, to_info, name=None, no_progress_bar=True):
153 bucket = self.gs.bucket(to_info.bucket)
154 _upload_to_bucket(
155 bucket,
156 from_file,
157 to_info,
158 name=name,
159 no_progress_bar=no_progress_bar,
160 )
161
162 def _download(self, from_info, to_file, name=None, no_progress_bar=True):
163 bucket = self.gs.bucket(from_info.bucket)
164 blob = bucket.get_blob(from_info.path)
165 with Tqdm(
166 desc=name or from_info.path,
167 total=blob.size,
168 bytes=True,
169 disable=no_progress_bar,
170 ) as pbar:
171 with io.open(to_file, mode="wb") as fobj:
172 raw_write = fobj.write
173
174 def write(byte_string):
175 raw_write(byte_string)
176 pbar.update(len(byte_string))
177
178 fobj.write = write
179 blob.download_to_file(fobj)
180
181 def _generate_download_url(self, path_info, expires=3600):
182 expiration = timedelta(seconds=int(expires))
183
184 bucket = self.gs.bucket(path_info.bucket)
185 blob = bucket.get_blob(path_info.path)
186 if blob is None:
187 raise FileNotFoundError
188 return blob.generate_signed_url(expiration=expiration)
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/remote/gs.py b/dvc/remote/gs.py
--- a/dvc/remote/gs.py
+++ b/dvc/remote/gs.py
@@ -138,16 +138,45 @@
blob.delete()
- def _list_paths(self, bucket, prefix):
- for blob in self.gs.bucket(bucket).list_blobs(prefix=prefix):
+ def _list_paths(self, path_info, max_items=None):
+ for blob in self.gs.bucket(path_info.bucket).list_blobs(
+ prefix=path_info.path, max_results=max_items
+ ):
yield blob.name
def list_cache_paths(self):
- return self._list_paths(self.path_info.bucket, self.path_info.path)
+ return self._list_paths(self.path_info)
+
+ def walk_files(self, path_info):
+ for fname in self._list_paths(path_info / ""):
+ # skip nested empty directories
+ if fname.endswith("/"):
+ continue
+ yield path_info.replace(fname)
+
+ def makedirs(self, path_info):
+ self.gs.bucket(path_info.bucket).blob(
+ (path_info / "").path
+ ).upload_from_string("")
+
+ def isdir(self, path_info):
+ dir_path = path_info / ""
+ return bool(list(self._list_paths(dir_path, max_items=1)))
+
+ def isfile(self, path_info):
+ if path_info.path.endswith("/"):
+ return False
+
+ blob = self.gs.bucket(path_info.bucket).blob(path_info.path)
+ return blob.exists()
def exists(self, path_info):
- paths = set(self._list_paths(path_info.bucket, path_info.path))
- return any(path_info.path == path for path in paths)
+ """Check if the blob exists. If it does not exist,
+ it could be a part of a directory path.
+
+ eg: if `data/file.txt` exists, check for `data` should return True
+ """
+ return self.isfile(path_info) or self.isdir(path_info)
def _upload(self, from_file, to_info, name=None, no_progress_bar=True):
bucket = self.gs.bucket(to_info.bucket)
| {"golden_diff": "diff --git a/dvc/remote/gs.py b/dvc/remote/gs.py\n--- a/dvc/remote/gs.py\n+++ b/dvc/remote/gs.py\n@@ -138,16 +138,45 @@\n \n blob.delete()\n \n- def _list_paths(self, bucket, prefix):\n- for blob in self.gs.bucket(bucket).list_blobs(prefix=prefix):\n+ def _list_paths(self, path_info, max_items=None):\n+ for blob in self.gs.bucket(path_info.bucket).list_blobs(\n+ prefix=path_info.path, max_results=max_items\n+ ):\n yield blob.name\n \n def list_cache_paths(self):\n- return self._list_paths(self.path_info.bucket, self.path_info.path)\n+ return self._list_paths(self.path_info)\n+\n+ def walk_files(self, path_info):\n+ for fname in self._list_paths(path_info / \"\"):\n+ # skip nested empty directories\n+ if fname.endswith(\"/\"):\n+ continue\n+ yield path_info.replace(fname)\n+\n+ def makedirs(self, path_info):\n+ self.gs.bucket(path_info.bucket).blob(\n+ (path_info / \"\").path\n+ ).upload_from_string(\"\")\n+\n+ def isdir(self, path_info):\n+ dir_path = path_info / \"\"\n+ return bool(list(self._list_paths(dir_path, max_items=1)))\n+\n+ def isfile(self, path_info):\n+ if path_info.path.endswith(\"/\"):\n+ return False\n+\n+ blob = self.gs.bucket(path_info.bucket).blob(path_info.path)\n+ return blob.exists()\n \n def exists(self, path_info):\n- paths = set(self._list_paths(path_info.bucket, path_info.path))\n- return any(path_info.path == path for path in paths)\n+ \"\"\"Check if the blob exists. If it does not exist,\n+ it could be a part of a directory path.\n+\n+ eg: if `data/file.txt` exists, check for `data` should return True\n+ \"\"\"\n+ return self.isfile(path_info) or self.isdir(path_info)\n \n def _upload(self, from_file, to_info, name=None, no_progress_bar=True):\n bucket = self.gs.bucket(to_info.bucket)\n", "issue": "gs: support directories as external dependencies/outputs\nI have a similar issue to #2678 but for GS.\r\n\r\nI have a bucket with the following structure\r\n```\r\nmy_bucket\r\n \u251c\u2500\u2500 data\r\n \u2502 \u251c\u2500\u2500 img1.png\r\n \u2502 \u251c\u2500\u2500 img2.png\r\n \u2502 \u251c\u2500\u2500 ...\r\n \u2514\u2500\u2500 cache\r\n```\r\n\r\nI have then created a clean project\r\n\r\n```\r\n$ git init\r\n$ dvc init\r\n$ dvc remote add gscache gs://my_bucket/cache\r\n$ dvc config cache.gs gscache\r\n$ dvc add gs://my_bucket/data\r\n```\r\n\r\nThe output is as follows:\r\n```\r\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588|Add 1/1 [00:00<00:00, 1.21file/s]\r\nERROR: output 'gs://my_bucket/data' does not exist\r\n```\r\n\r\nAdding a single file works (`dvc add gs://my_bucket/data/img1.png`).\r\n\r\nA more verbose version:\r\n```\r\n$ dvc add gs://my_bucket/data -v \r\nDEBUG: PRAGMA user_version;\r\nDEBUG: fetched: [(3,)]\r\nDEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)\r\nDEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)\r\nDEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)\r\nDEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)\r\nDEBUG: PRAGMA user_version = 3;\r\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588|Add 1/1 [00:01<00:00, 1.63s/file]\r\nDEBUG: SELECT count from state_info WHERE rowid=?\r\nDEBUG: fetched: [(0,)]\r\nDEBUG: UPDATE state_info SET count = ? WHERE rowid = ?\r\nERROR: output 'gs://my_bucket/data' does not exist\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/home/egnyte/anaconda3/envs/dvc/lib/python3.7/site-packages/dvc/command/add.py\", line 25, in run\r\n fname=self.args.file,\r\n File \"/home/egnyte/anaconda3/envs/dvc/lib/python3.7/site-packages/dvc/repo/__init__.py\", line 35, in wrapper\r\n ret = f(repo, *args, **kwargs)\r\n File \"/home/egnyte/anaconda3/envs/dvc/lib/python3.7/site-packages/dvc/repo/scm_context.py\", line 4, in run\r\n result = method(repo, *args, **kw)\r\n File \"/home/egnyte/anaconda3/envs/dvc/lib/python3.7/site-packages/dvc/repo/add.py\", line 53, in add\r\n stage.save()\r\n File \"/home/egnyte/anaconda3/envs/dvc/lib/python3.7/site-packages/dvc/stage.py\", line 716, in save\r\n out.save()\r\n File \"/home/egnyte/anaconda3/envs/dvc/lib/python3.7/site-packages/dvc/output/base.py\", line 219, in save\r\n raise self.DoesNotExistError(self)\r\ndvc.output.base.OutputDoesNotExistError: output 'gs://my_bucket/data' does not exist\r\n------------------------------------------------------------\r\n```\r\n\r\n\r\n`dvc --version = 0.68.1`. I am using ubuntu, I installed using conda, python 3.7.5.\n", "before_files": [{"content": "from __future__ import unicode_literals, division\n\nimport logging\nfrom datetime import timedelta\nfrom functools import wraps\nimport io\nimport os.path\nimport threading\n\nfrom funcy import cached_property, wrap_prop\n\nfrom dvc.config import Config\nfrom dvc.exceptions import DvcException\nfrom dvc.path_info import CloudURLInfo\nfrom dvc.progress import Tqdm\nfrom dvc.remote.base import RemoteBASE\nfrom dvc.scheme import Schemes\nfrom dvc.utils.compat import FileNotFoundError # skipcq: PYL-W0622\n\nlogger = logging.getLogger(__name__)\n\n\ndef dynamic_chunk_size(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n import requests\n from google.cloud.storage.blob import Blob\n\n # `ConnectionError` may be due to too large `chunk_size`\n # (see [#2572]) so try halving on error.\n # Note: start with 40 * [default: 256K] = 10M.\n # Note: must be multiple of 256K.\n #\n # [#2572]: https://github.com/iterative/dvc/issues/2572\n\n # skipcq: PYL-W0212\n multiplier = 40\n while True:\n try:\n # skipcq: PYL-W0212\n chunk_size = Blob._CHUNK_SIZE_MULTIPLE * multiplier\n return func(*args, chunk_size=chunk_size, **kwargs)\n except requests.exceptions.ConnectionError:\n multiplier //= 2\n if not multiplier:\n raise\n\n return wrapper\n\n\n@dynamic_chunk_size\ndef _upload_to_bucket(\n bucket,\n from_file,\n to_info,\n chunk_size=None,\n name=None,\n no_progress_bar=True,\n):\n blob = bucket.blob(to_info.path, chunk_size=chunk_size)\n with Tqdm(\n desc=name or to_info.path,\n total=os.path.getsize(from_file),\n bytes=True,\n disable=no_progress_bar,\n ) as pbar:\n with io.open(from_file, mode=\"rb\") as fobj:\n raw_read = fobj.read\n\n def read(size=chunk_size):\n res = raw_read(size)\n if res:\n pbar.update(len(res))\n return res\n\n fobj.read = read\n blob.upload_from_file(fobj)\n\n\nclass RemoteGS(RemoteBASE):\n scheme = Schemes.GS\n path_cls = CloudURLInfo\n REQUIRES = {\"google-cloud-storage\": \"google.cloud.storage\"}\n PARAM_CHECKSUM = \"md5\"\n\n def __init__(self, repo, config):\n super(RemoteGS, self).__init__(repo, config)\n\n url = config.get(Config.SECTION_REMOTE_URL, \"gs:///\")\n self.path_info = self.path_cls(url)\n\n self.projectname = config.get(Config.SECTION_GCP_PROJECTNAME, None)\n self.credentialpath = config.get(Config.SECTION_GCP_CREDENTIALPATH)\n\n @wrap_prop(threading.Lock())\n @cached_property\n def gs(self):\n from google.cloud.storage import Client\n\n return (\n Client.from_service_account_json(self.credentialpath)\n if self.credentialpath\n else Client(self.projectname)\n )\n\n def get_file_checksum(self, path_info):\n import base64\n import codecs\n\n bucket = path_info.bucket\n path = path_info.path\n blob = self.gs.bucket(bucket).get_blob(path)\n if not blob:\n return None\n\n b64_md5 = blob.md5_hash\n md5 = base64.b64decode(b64_md5)\n return codecs.getencoder(\"hex\")(md5)[0].decode(\"utf-8\")\n\n def copy(self, from_info, to_info):\n from_bucket = self.gs.bucket(from_info.bucket)\n blob = from_bucket.get_blob(from_info.path)\n if not blob:\n msg = \"'{}' doesn't exist in the cloud\".format(from_info.path)\n raise DvcException(msg)\n\n to_bucket = self.gs.bucket(to_info.bucket)\n from_bucket.copy_blob(blob, to_bucket, new_name=to_info.path)\n\n def remove(self, path_info):\n if path_info.scheme != \"gs\":\n raise NotImplementedError\n\n logger.debug(\"Removing gs://{}\".format(path_info))\n blob = self.gs.bucket(path_info.bucket).get_blob(path_info.path)\n if not blob:\n return\n\n blob.delete()\n\n def _list_paths(self, bucket, prefix):\n for blob in self.gs.bucket(bucket).list_blobs(prefix=prefix):\n yield blob.name\n\n def list_cache_paths(self):\n return self._list_paths(self.path_info.bucket, self.path_info.path)\n\n def exists(self, path_info):\n paths = set(self._list_paths(path_info.bucket, path_info.path))\n return any(path_info.path == path for path in paths)\n\n def _upload(self, from_file, to_info, name=None, no_progress_bar=True):\n bucket = self.gs.bucket(to_info.bucket)\n _upload_to_bucket(\n bucket,\n from_file,\n to_info,\n name=name,\n no_progress_bar=no_progress_bar,\n )\n\n def _download(self, from_info, to_file, name=None, no_progress_bar=True):\n bucket = self.gs.bucket(from_info.bucket)\n blob = bucket.get_blob(from_info.path)\n with Tqdm(\n desc=name or from_info.path,\n total=blob.size,\n bytes=True,\n disable=no_progress_bar,\n ) as pbar:\n with io.open(to_file, mode=\"wb\") as fobj:\n raw_write = fobj.write\n\n def write(byte_string):\n raw_write(byte_string)\n pbar.update(len(byte_string))\n\n fobj.write = write\n blob.download_to_file(fobj)\n\n def _generate_download_url(self, path_info, expires=3600):\n expiration = timedelta(seconds=int(expires))\n\n bucket = self.gs.bucket(path_info.bucket)\n blob = bucket.get_blob(path_info.path)\n if blob is None:\n raise FileNotFoundError\n return blob.generate_signed_url(expiration=expiration)\n", "path": "dvc/remote/gs.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division\n\nimport logging\nfrom datetime import timedelta\nfrom functools import wraps\nimport io\nimport os.path\nimport threading\n\nfrom funcy import cached_property, wrap_prop\n\nfrom dvc.config import Config\nfrom dvc.exceptions import DvcException\nfrom dvc.path_info import CloudURLInfo\nfrom dvc.progress import Tqdm\nfrom dvc.remote.base import RemoteBASE\nfrom dvc.scheme import Schemes\nfrom dvc.utils.compat import FileNotFoundError # skipcq: PYL-W0622\n\nlogger = logging.getLogger(__name__)\n\n\ndef dynamic_chunk_size(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n import requests\n from google.cloud.storage.blob import Blob\n\n # `ConnectionError` may be due to too large `chunk_size`\n # (see [#2572]) so try halving on error.\n # Note: start with 40 * [default: 256K] = 10M.\n # Note: must be multiple of 256K.\n #\n # [#2572]: https://github.com/iterative/dvc/issues/2572\n\n # skipcq: PYL-W0212\n multiplier = 40\n while True:\n try:\n # skipcq: PYL-W0212\n chunk_size = Blob._CHUNK_SIZE_MULTIPLE * multiplier\n return func(*args, chunk_size=chunk_size, **kwargs)\n except requests.exceptions.ConnectionError:\n multiplier //= 2\n if not multiplier:\n raise\n\n return wrapper\n\n\n@dynamic_chunk_size\ndef _upload_to_bucket(\n bucket,\n from_file,\n to_info,\n chunk_size=None,\n name=None,\n no_progress_bar=True,\n):\n blob = bucket.blob(to_info.path, chunk_size=chunk_size)\n with Tqdm(\n desc=name or to_info.path,\n total=os.path.getsize(from_file),\n bytes=True,\n disable=no_progress_bar,\n ) as pbar:\n with io.open(from_file, mode=\"rb\") as fobj:\n raw_read = fobj.read\n\n def read(size=chunk_size):\n res = raw_read(size)\n if res:\n pbar.update(len(res))\n return res\n\n fobj.read = read\n blob.upload_from_file(fobj)\n\n\nclass RemoteGS(RemoteBASE):\n scheme = Schemes.GS\n path_cls = CloudURLInfo\n REQUIRES = {\"google-cloud-storage\": \"google.cloud.storage\"}\n PARAM_CHECKSUM = \"md5\"\n\n def __init__(self, repo, config):\n super(RemoteGS, self).__init__(repo, config)\n\n url = config.get(Config.SECTION_REMOTE_URL, \"gs:///\")\n self.path_info = self.path_cls(url)\n\n self.projectname = config.get(Config.SECTION_GCP_PROJECTNAME, None)\n self.credentialpath = config.get(Config.SECTION_GCP_CREDENTIALPATH)\n\n @wrap_prop(threading.Lock())\n @cached_property\n def gs(self):\n from google.cloud.storage import Client\n\n return (\n Client.from_service_account_json(self.credentialpath)\n if self.credentialpath\n else Client(self.projectname)\n )\n\n def get_file_checksum(self, path_info):\n import base64\n import codecs\n\n bucket = path_info.bucket\n path = path_info.path\n blob = self.gs.bucket(bucket).get_blob(path)\n if not blob:\n return None\n\n b64_md5 = blob.md5_hash\n md5 = base64.b64decode(b64_md5)\n return codecs.getencoder(\"hex\")(md5)[0].decode(\"utf-8\")\n\n def copy(self, from_info, to_info):\n from_bucket = self.gs.bucket(from_info.bucket)\n blob = from_bucket.get_blob(from_info.path)\n if not blob:\n msg = \"'{}' doesn't exist in the cloud\".format(from_info.path)\n raise DvcException(msg)\n\n to_bucket = self.gs.bucket(to_info.bucket)\n from_bucket.copy_blob(blob, to_bucket, new_name=to_info.path)\n\n def remove(self, path_info):\n if path_info.scheme != \"gs\":\n raise NotImplementedError\n\n logger.debug(\"Removing gs://{}\".format(path_info))\n blob = self.gs.bucket(path_info.bucket).get_blob(path_info.path)\n if not blob:\n return\n\n blob.delete()\n\n def _list_paths(self, path_info, max_items=None):\n for blob in self.gs.bucket(path_info.bucket).list_blobs(\n prefix=path_info.path, max_results=max_items\n ):\n yield blob.name\n\n def list_cache_paths(self):\n return self._list_paths(self.path_info)\n\n def walk_files(self, path_info):\n for fname in self._list_paths(path_info / \"\"):\n # skip nested empty directories\n if fname.endswith(\"/\"):\n continue\n yield path_info.replace(fname)\n\n def makedirs(self, path_info):\n self.gs.bucket(path_info.bucket).blob(\n (path_info / \"\").path\n ).upload_from_string(\"\")\n\n def isdir(self, path_info):\n dir_path = path_info / \"\"\n return bool(list(self._list_paths(dir_path, max_items=1)))\n\n def isfile(self, path_info):\n if path_info.path.endswith(\"/\"):\n return False\n\n blob = self.gs.bucket(path_info.bucket).blob(path_info.path)\n return blob.exists()\n\n def exists(self, path_info):\n \"\"\"Check if the blob exists. If it does not exist,\n it could be a part of a directory path.\n\n eg: if `data/file.txt` exists, check for `data` should return True\n \"\"\"\n return self.isfile(path_info) or self.isdir(path_info)\n\n def _upload(self, from_file, to_info, name=None, no_progress_bar=True):\n bucket = self.gs.bucket(to_info.bucket)\n _upload_to_bucket(\n bucket,\n from_file,\n to_info,\n name=name,\n no_progress_bar=no_progress_bar,\n )\n\n def _download(self, from_info, to_file, name=None, no_progress_bar=True):\n bucket = self.gs.bucket(from_info.bucket)\n blob = bucket.get_blob(from_info.path)\n with Tqdm(\n desc=name or from_info.path,\n total=blob.size,\n bytes=True,\n disable=no_progress_bar,\n ) as pbar:\n with io.open(to_file, mode=\"wb\") as fobj:\n raw_write = fobj.write\n\n def write(byte_string):\n raw_write(byte_string)\n pbar.update(len(byte_string))\n\n fobj.write = write\n blob.download_to_file(fobj)\n\n def _generate_download_url(self, path_info, expires=3600):\n expiration = timedelta(seconds=int(expires))\n\n bucket = self.gs.bucket(path_info.bucket)\n blob = bucket.get_blob(path_info.path)\n if blob is None:\n raise FileNotFoundError\n return blob.generate_signed_url(expiration=expiration)\n", "path": "dvc/remote/gs.py"}]} | 2,846 | 489 |
gh_patches_debug_20763 | rasdani/github-patches | git_diff | nv-legate__cunumeric-278 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add some missing array methods
A few numpy array methods were discovered to be missing:
```
"loads",
"mafromtxt",
"matmul",
"ndfromtxt",
```
These should be copied over as necessary, and the comparison blocklist updated.
ref: https://github.com/nv-legate/cunumeric/pull/224#discussion_r830657282
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/cunumeric/source/comparison/_comparison_generator.py`
Content:
```
1 import importlib
2
3 import numpy
4
5 blocklist = [
6 "abs",
7 "add_docstring",
8 "add_newdoc",
9 "add_newdoc_ufunc",
10 "alen",
11 "alltrue",
12 "bitwise_not",
13 "compare_chararrays",
14 "cumproduct",
15 "fastCopyAndTranspose",
16 "get_array_wrap",
17 "iterable",
18 "loads",
19 "mafromtxt",
20 "max",
21 "min",
22 "ndfromtxt",
23 "ndim",
24 "product",
25 "recfromcsv",
26 "recfromtxt",
27 "round",
28 "safe_eval",
29 "set_numeric_ops",
30 "size",
31 "sometrue",
32 "test",
33 ]
34
35
36 def check_ufunc(obj, n):
37 try:
38 return isinstance(getattr(obj, n), numpy.ufunc)
39 except: # noqa E722
40 return False
41
42
43 def _filter(obj, n, ufuncs=False):
44 is_ufunc = check_ufunc(obj, n)
45 if not ufuncs:
46 is_ufunc = not is_ufunc
47
48 try:
49 return (
50 n not in blocklist
51 and callable(getattr(obj, n)) # callable
52 and not isinstance(getattr(obj, n), type) # not class
53 and n[0].islower() # starts with lower char
54 and not n.startswith("__") # not special methods
55 and is_ufunc
56 )
57 except: # noqa: E722
58 return False
59
60
61 def _get_functions(obj, ufuncs=False):
62 return set([n for n in dir(obj) if (_filter(obj, n, ufuncs))])
63
64
65 def _import(mod, klass):
66 try:
67 obj = importlib.import_module(mod)
68 except ModuleNotFoundError:
69 return None, None
70
71 if klass:
72 obj = getattr(obj, klass)
73 return obj, ":meth:`{}.{}.{{}}`".format(mod, klass)
74 else:
75 # ufunc is not a function
76 return obj, ":obj:`{}.{{}}`".format(mod)
77
78
79 def _section(
80 header, mod_ext, other_lib, klass=None, exclude_mod=None, ufuncs=False
81 ):
82 base_mod = "numpy" + mod_ext
83 other_mod = other_lib + mod_ext
84
85 base_funcs = []
86 base_obj, base_fmt = _import(base_mod, klass)
87 base_funcs = _get_functions(base_obj, ufuncs)
88 lg_obj, lg_fmt = _import(other_mod, klass)
89
90 lg_funcs = []
91 for f in _get_functions(lg_obj):
92 obj = getattr(lg_obj, f)
93 if getattr(obj, "_cunumeric_implemented", False):
94 lg_funcs.append(f)
95 lg_funcs = set(lg_funcs)
96
97 if exclude_mod:
98 exclude_obj, _ = _import(exclude_mod, klass)
99 exclude_funcs = _get_functions(exclude_obj)
100 base_funcs -= exclude_funcs
101 lg_funcs -= exclude_funcs
102
103 buf = [
104 header,
105 "~" * len(header),
106 "",
107 ]
108
109 buf += [
110 ".. currentmodule:: cunumeric",
111 "",
112 ".. autosummary::",
113 " :toctree: generated/",
114 "",
115 ]
116
117 buf += [
118 ".. csv-table::",
119 " :header: NumPy, {}, single-GPU/CPU, multi-GPU/CPU".format(
120 other_mod
121 ),
122 "",
123 ]
124 for f in sorted(base_funcs):
125 base_cell = base_fmt.format(f)
126 lg_cell = r"\-"
127 single_gpu_cell = ""
128 multi_gpu_cell = ""
129 if f in lg_funcs:
130 lg_cell = lg_fmt.format(f)
131 obj = getattr(lg_obj, f)
132 if obj.__doc__ is not None and "Single GPU" in obj.__doc__:
133 multi_gpu_cell = "No"
134 single_gpu_cell = "Yes"
135 elif obj.__doc__ is not None and "Multiple GPUs" in obj.__doc__:
136 multi_gpu_cell = "Yes"
137 single_gpu_cell = "Yes"
138 if getattr(base_obj, f) is getattr(lg_obj, f):
139 lg_cell = "{} (*alias of* {})".format(lg_cell, base_cell)
140 line = " {}, {}, {}, {}".format(
141 base_cell, lg_cell, single_gpu_cell, multi_gpu_cell
142 )
143 buf.append(line)
144
145 buf += [
146 "",
147 ".. Summary:",
148 " Number of NumPy functions: {}".format(len(base_funcs)),
149 " Number of functions covered by "
150 f"{other_lib}: {len(lg_funcs & base_funcs)}",
151 ]
152 buf += [
153 "",
154 ]
155 return buf
156
157
158 def generate(other_lib):
159 buf = []
160 buf += [
161 "NumPy vs cuNumeric APIs",
162 "------------------------",
163 "",
164 ]
165 buf += _section("Module-Level", "", other_lib)
166 buf += _section("Ufuncs", "", other_lib, ufuncs=True)
167 buf += _section("Multi-Dimensional Array", "", other_lib, klass="ndarray")
168 buf += _section("Linear Algebra", ".linalg", other_lib)
169 buf += _section("Discrete Fourier Transform", ".fft", other_lib)
170 buf += _section("Random Sampling", ".random", other_lib)
171
172 return "\n".join(buf)
173
174
175 if __name__ == "__main__":
176 print(generate("cunumeric"))
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/cunumeric/source/comparison/_comparison_generator.py b/docs/cunumeric/source/comparison/_comparison_generator.py
--- a/docs/cunumeric/source/comparison/_comparison_generator.py
+++ b/docs/cunumeric/source/comparison/_comparison_generator.py
@@ -15,11 +15,8 @@
"fastCopyAndTranspose",
"get_array_wrap",
"iterable",
- "loads",
- "mafromtxt",
"max",
"min",
- "ndfromtxt",
"ndim",
"product",
"recfromcsv",
@@ -32,6 +29,13 @@
"test",
]
+# these do not have valid intersphinx references
+missing_numpy_refs = {
+ "loads",
+ "mafromtxt",
+ "ndfromtxt",
+}
+
def check_ufunc(obj, n):
try:
@@ -122,7 +126,10 @@
"",
]
for f in sorted(base_funcs):
- base_cell = base_fmt.format(f)
+ if f not in missing_numpy_refs:
+ base_cell = base_fmt.format(f)
+ else:
+ base_cell = f"``numpy.{f}``"
lg_cell = r"\-"
single_gpu_cell = ""
multi_gpu_cell = ""
| {"golden_diff": "diff --git a/docs/cunumeric/source/comparison/_comparison_generator.py b/docs/cunumeric/source/comparison/_comparison_generator.py\n--- a/docs/cunumeric/source/comparison/_comparison_generator.py\n+++ b/docs/cunumeric/source/comparison/_comparison_generator.py\n@@ -15,11 +15,8 @@\n \"fastCopyAndTranspose\",\n \"get_array_wrap\",\n \"iterable\",\n- \"loads\",\n- \"mafromtxt\",\n \"max\",\n \"min\",\n- \"ndfromtxt\",\n \"ndim\",\n \"product\",\n \"recfromcsv\",\n@@ -32,6 +29,13 @@\n \"test\",\n ]\n \n+# these do not have valid intersphinx references\n+missing_numpy_refs = {\n+ \"loads\",\n+ \"mafromtxt\",\n+ \"ndfromtxt\",\n+}\n+\n \n def check_ufunc(obj, n):\n try:\n@@ -122,7 +126,10 @@\n \"\",\n ]\n for f in sorted(base_funcs):\n- base_cell = base_fmt.format(f)\n+ if f not in missing_numpy_refs:\n+ base_cell = base_fmt.format(f)\n+ else:\n+ base_cell = f\"``numpy.{f}``\"\n lg_cell = r\"\\-\"\n single_gpu_cell = \"\"\n multi_gpu_cell = \"\"\n", "issue": "Add some missing array methods \nA few numpy array methods were discovered to be missing:\r\n```\r\n \"loads\",\r\n \"mafromtxt\",\r\n \"matmul\",\r\n \"ndfromtxt\",\r\n```\r\n\r\nThese should be copied over as necessary, and the comparison blocklist updated. \r\n\r\nref: https://github.com/nv-legate/cunumeric/pull/224#discussion_r830657282\r\n\n", "before_files": [{"content": "import importlib\n\nimport numpy\n\nblocklist = [\n \"abs\",\n \"add_docstring\",\n \"add_newdoc\",\n \"add_newdoc_ufunc\",\n \"alen\",\n \"alltrue\",\n \"bitwise_not\",\n \"compare_chararrays\",\n \"cumproduct\",\n \"fastCopyAndTranspose\",\n \"get_array_wrap\",\n \"iterable\",\n \"loads\",\n \"mafromtxt\",\n \"max\",\n \"min\",\n \"ndfromtxt\",\n \"ndim\",\n \"product\",\n \"recfromcsv\",\n \"recfromtxt\",\n \"round\",\n \"safe_eval\",\n \"set_numeric_ops\",\n \"size\",\n \"sometrue\",\n \"test\",\n]\n\n\ndef check_ufunc(obj, n):\n try:\n return isinstance(getattr(obj, n), numpy.ufunc)\n except: # noqa E722\n return False\n\n\ndef _filter(obj, n, ufuncs=False):\n is_ufunc = check_ufunc(obj, n)\n if not ufuncs:\n is_ufunc = not is_ufunc\n\n try:\n return (\n n not in blocklist\n and callable(getattr(obj, n)) # callable\n and not isinstance(getattr(obj, n), type) # not class\n and n[0].islower() # starts with lower char\n and not n.startswith(\"__\") # not special methods\n and is_ufunc\n )\n except: # noqa: E722\n return False\n\n\ndef _get_functions(obj, ufuncs=False):\n return set([n for n in dir(obj) if (_filter(obj, n, ufuncs))])\n\n\ndef _import(mod, klass):\n try:\n obj = importlib.import_module(mod)\n except ModuleNotFoundError:\n return None, None\n\n if klass:\n obj = getattr(obj, klass)\n return obj, \":meth:`{}.{}.{{}}`\".format(mod, klass)\n else:\n # ufunc is not a function\n return obj, \":obj:`{}.{{}}`\".format(mod)\n\n\ndef _section(\n header, mod_ext, other_lib, klass=None, exclude_mod=None, ufuncs=False\n):\n base_mod = \"numpy\" + mod_ext\n other_mod = other_lib + mod_ext\n\n base_funcs = []\n base_obj, base_fmt = _import(base_mod, klass)\n base_funcs = _get_functions(base_obj, ufuncs)\n lg_obj, lg_fmt = _import(other_mod, klass)\n\n lg_funcs = []\n for f in _get_functions(lg_obj):\n obj = getattr(lg_obj, f)\n if getattr(obj, \"_cunumeric_implemented\", False):\n lg_funcs.append(f)\n lg_funcs = set(lg_funcs)\n\n if exclude_mod:\n exclude_obj, _ = _import(exclude_mod, klass)\n exclude_funcs = _get_functions(exclude_obj)\n base_funcs -= exclude_funcs\n lg_funcs -= exclude_funcs\n\n buf = [\n header,\n \"~\" * len(header),\n \"\",\n ]\n\n buf += [\n \".. currentmodule:: cunumeric\",\n \"\",\n \".. autosummary::\",\n \" :toctree: generated/\",\n \"\",\n ]\n\n buf += [\n \".. csv-table::\",\n \" :header: NumPy, {}, single-GPU/CPU, multi-GPU/CPU\".format(\n other_mod\n ),\n \"\",\n ]\n for f in sorted(base_funcs):\n base_cell = base_fmt.format(f)\n lg_cell = r\"\\-\"\n single_gpu_cell = \"\"\n multi_gpu_cell = \"\"\n if f in lg_funcs:\n lg_cell = lg_fmt.format(f)\n obj = getattr(lg_obj, f)\n if obj.__doc__ is not None and \"Single GPU\" in obj.__doc__:\n multi_gpu_cell = \"No\"\n single_gpu_cell = \"Yes\"\n elif obj.__doc__ is not None and \"Multiple GPUs\" in obj.__doc__:\n multi_gpu_cell = \"Yes\"\n single_gpu_cell = \"Yes\"\n if getattr(base_obj, f) is getattr(lg_obj, f):\n lg_cell = \"{} (*alias of* {})\".format(lg_cell, base_cell)\n line = \" {}, {}, {}, {}\".format(\n base_cell, lg_cell, single_gpu_cell, multi_gpu_cell\n )\n buf.append(line)\n\n buf += [\n \"\",\n \".. Summary:\",\n \" Number of NumPy functions: {}\".format(len(base_funcs)),\n \" Number of functions covered by \"\n f\"{other_lib}: {len(lg_funcs & base_funcs)}\",\n ]\n buf += [\n \"\",\n ]\n return buf\n\n\ndef generate(other_lib):\n buf = []\n buf += [\n \"NumPy vs cuNumeric APIs\",\n \"------------------------\",\n \"\",\n ]\n buf += _section(\"Module-Level\", \"\", other_lib)\n buf += _section(\"Ufuncs\", \"\", other_lib, ufuncs=True)\n buf += _section(\"Multi-Dimensional Array\", \"\", other_lib, klass=\"ndarray\")\n buf += _section(\"Linear Algebra\", \".linalg\", other_lib)\n buf += _section(\"Discrete Fourier Transform\", \".fft\", other_lib)\n buf += _section(\"Random Sampling\", \".random\", other_lib)\n\n return \"\\n\".join(buf)\n\n\nif __name__ == \"__main__\":\n print(generate(\"cunumeric\"))\n", "path": "docs/cunumeric/source/comparison/_comparison_generator.py"}], "after_files": [{"content": "import importlib\n\nimport numpy\n\nblocklist = [\n \"abs\",\n \"add_docstring\",\n \"add_newdoc\",\n \"add_newdoc_ufunc\",\n \"alen\",\n \"alltrue\",\n \"bitwise_not\",\n \"compare_chararrays\",\n \"cumproduct\",\n \"fastCopyAndTranspose\",\n \"get_array_wrap\",\n \"iterable\",\n \"max\",\n \"min\",\n \"ndim\",\n \"product\",\n \"recfromcsv\",\n \"recfromtxt\",\n \"round\",\n \"safe_eval\",\n \"set_numeric_ops\",\n \"size\",\n \"sometrue\",\n \"test\",\n]\n\n# these do not have valid intersphinx references\nmissing_numpy_refs = {\n \"loads\",\n \"mafromtxt\",\n \"ndfromtxt\",\n}\n\n\ndef check_ufunc(obj, n):\n try:\n return isinstance(getattr(obj, n), numpy.ufunc)\n except: # noqa E722\n return False\n\n\ndef _filter(obj, n, ufuncs=False):\n is_ufunc = check_ufunc(obj, n)\n if not ufuncs:\n is_ufunc = not is_ufunc\n\n try:\n return (\n n not in blocklist\n and callable(getattr(obj, n)) # callable\n and not isinstance(getattr(obj, n), type) # not class\n and n[0].islower() # starts with lower char\n and not n.startswith(\"__\") # not special methods\n and is_ufunc\n )\n except: # noqa: E722\n return False\n\n\ndef _get_functions(obj, ufuncs=False):\n return set([n for n in dir(obj) if (_filter(obj, n, ufuncs))])\n\n\ndef _import(mod, klass):\n try:\n obj = importlib.import_module(mod)\n except ModuleNotFoundError:\n return None, None\n\n if klass:\n obj = getattr(obj, klass)\n return obj, \":meth:`{}.{}.{{}}`\".format(mod, klass)\n else:\n # ufunc is not a function\n return obj, \":obj:`{}.{{}}`\".format(mod)\n\n\ndef _section(\n header, mod_ext, other_lib, klass=None, exclude_mod=None, ufuncs=False\n):\n base_mod = \"numpy\" + mod_ext\n other_mod = other_lib + mod_ext\n\n base_funcs = []\n base_obj, base_fmt = _import(base_mod, klass)\n base_funcs = _get_functions(base_obj, ufuncs)\n lg_obj, lg_fmt = _import(other_mod, klass)\n\n lg_funcs = []\n for f in _get_functions(lg_obj):\n obj = getattr(lg_obj, f)\n if getattr(obj, \"_cunumeric_implemented\", False):\n lg_funcs.append(f)\n lg_funcs = set(lg_funcs)\n\n if exclude_mod:\n exclude_obj, _ = _import(exclude_mod, klass)\n exclude_funcs = _get_functions(exclude_obj)\n base_funcs -= exclude_funcs\n lg_funcs -= exclude_funcs\n\n buf = [\n header,\n \"~\" * len(header),\n \"\",\n ]\n\n buf += [\n \".. currentmodule:: cunumeric\",\n \"\",\n \".. autosummary::\",\n \" :toctree: generated/\",\n \"\",\n ]\n\n buf += [\n \".. csv-table::\",\n \" :header: NumPy, {}, single-GPU/CPU, multi-GPU/CPU\".format(\n other_mod\n ),\n \"\",\n ]\n for f in sorted(base_funcs):\n if f not in missing_numpy_refs:\n base_cell = base_fmt.format(f)\n else:\n base_cell = f\"``numpy.{f}``\"\n lg_cell = r\"\\-\"\n single_gpu_cell = \"\"\n multi_gpu_cell = \"\"\n if f in lg_funcs:\n lg_cell = lg_fmt.format(f)\n obj = getattr(lg_obj, f)\n if obj.__doc__ is not None and \"Single GPU\" in obj.__doc__:\n multi_gpu_cell = \"No\"\n single_gpu_cell = \"Yes\"\n elif obj.__doc__ is not None and \"Multiple GPUs\" in obj.__doc__:\n multi_gpu_cell = \"Yes\"\n single_gpu_cell = \"Yes\"\n if getattr(base_obj, f) is getattr(lg_obj, f):\n lg_cell = \"{} (*alias of* {})\".format(lg_cell, base_cell)\n line = \" {}, {}, {}, {}\".format(\n base_cell, lg_cell, single_gpu_cell, multi_gpu_cell\n )\n buf.append(line)\n\n buf += [\n \"\",\n \".. Summary:\",\n \" Number of NumPy functions: {}\".format(len(base_funcs)),\n \" Number of functions covered by \"\n f\"{other_lib}: {len(lg_funcs & base_funcs)}\",\n ]\n buf += [\n \"\",\n ]\n return buf\n\n\ndef generate(other_lib):\n buf = []\n buf += [\n \"NumPy vs cuNumeric APIs\",\n \"------------------------\",\n \"\",\n ]\n buf += _section(\"Module-Level\", \"\", other_lib)\n buf += _section(\"Ufuncs\", \"\", other_lib, ufuncs=True)\n buf += _section(\"Multi-Dimensional Array\", \"\", other_lib, klass=\"ndarray\")\n buf += _section(\"Linear Algebra\", \".linalg\", other_lib)\n buf += _section(\"Discrete Fourier Transform\", \".fft\", other_lib)\n buf += _section(\"Random Sampling\", \".random\", other_lib)\n\n return \"\\n\".join(buf)\n\n\nif __name__ == \"__main__\":\n print(generate(\"cunumeric\"))\n", "path": "docs/cunumeric/source/comparison/_comparison_generator.py"}]} | 1,970 | 294 |
gh_patches_debug_17995 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-3822 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Plone site id is a reserved id and can't be used any where in Zope as an short name
## BUG/PROBLEM REPORT (OR OTHER COMMON ISSUE)
### What I did:
Add a Plone site with the id example.
Add another Plone side with any other name.
In the second Plone side, try to create a Document with the title example.
You'll get a trace:
```
Traceback (innermost last):
Module ZPublisher.WSGIPublisher, line 181, in transaction_pubevents
Module ZPublisher.WSGIPublisher, line 390, in publish_module
Module ZPublisher.WSGIPublisher, line 285, in publish
Module ZPublisher.mapply, line 85, in mapply
Module ZPublisher.WSGIPublisher, line 68, in call_object
Module plone.z3cform.layout, line 61, in __call__
Module plone.z3cform.layout, line 45, in update
Module plone.dexterity.browser.add, line 136, in update
Module plone.z3cform.fieldsets.extensible, line 62, in update
Module plone.z3cform.patch, line 31, in GroupForm_update
Module z3c.form.group, line 145, in update
Module plone.app.z3cform.csrf, line 21, in execute
Module z3c.form.action, line 98, in execute
Module z3c.form.button, line 301, in __call__
Module z3c.form.button, line 159, in __call__
Module plone.dexterity.browser.add, line 114, in handleAdd
Module z3c.form.form, line 265, in createAndAdd
Module plone.dexterity.browser.add, line 89, in add
Module plone.dexterity.utils, line 174, in addContentToContainer
Module Products.BTreeFolder2.BTreeFolder2, line 434, in _setObject
Module plone.folder.ordered, line 253, in _checkId
Module Products.CMFCore.PortalFolder, line 318, in _checkId
Module OFS.ObjectManager, line 130, in checkValidId
zExceptions.BadRequest: The id "example" is reserved.
```
### What I expect to happen:
Having a page with the id example.
### What actually happened:
Got the trace above.
A Plone site id should not be blocked to be used in other places for object id's.
### What version of Plone/ Addons I am using:
Plone 6.0.6
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/Portal.py`
Content:
```
1 from AccessControl import ClassSecurityInfo
2 from AccessControl import Unauthorized
3 from AccessControl.class_init import InitializeClass
4 from Acquisition import aq_base
5 from ComputedAttribute import ComputedAttribute
6 from five.localsitemanager.registry import PersistentComponents
7 from OFS.ObjectManager import REPLACEABLE
8 from plone.base.interfaces.siteroot import IPloneSiteRoot
9 from plone.base.interfaces.syndication import ISyndicatable
10 from plone.base.permissions import AddPortalContent
11 from plone.base.permissions import AddPortalFolders
12 from plone.base.permissions import ListPortalMembers
13 from plone.base.permissions import ModifyPortalContent
14 from plone.base.permissions import ReplyToItem
15 from plone.base.permissions import View
16 from plone.dexterity.content import Container
17 from Products.CMFCore import permissions
18 from Products.CMFCore.interfaces import IContentish
19 from Products.CMFCore.interfaces import ISiteRoot
20 from Products.CMFCore.permissions import AccessContentsInformation
21 from Products.CMFCore.permissions import AddPortalMember
22 from Products.CMFCore.permissions import MailForgottenPassword
23 from Products.CMFCore.permissions import RequestReview
24 from Products.CMFCore.permissions import ReviewPortalContent
25 from Products.CMFCore.permissions import SetOwnPassword
26 from Products.CMFCore.permissions import SetOwnProperties
27 from Products.CMFCore.PortalFolder import PortalFolderBase
28 from Products.CMFCore.PortalObject import PortalObjectBase
29 from Products.CMFCore.Skinnable import SkinnableObjectManager
30 from Products.CMFCore.utils import _checkPermission
31 from Products.CMFCore.utils import getToolByName
32 from Products.CMFCore.utils import UniqueObject
33 from Products.CMFPlone import bbb
34 from Products.Five.component.interfaces import IObjectManagerSite
35 from zope.event import notify
36 from zope.interface import classImplementsOnly
37 from zope.interface import implementedBy
38 from zope.interface import implementer
39 from zope.interface.interfaces import ComponentLookupError
40 from zope.traversing.interfaces import BeforeTraverseEvent
41
42
43 if bbb.HAS_ZSERVER:
44 from webdav.NullResource import NullResource
45
46
47 @implementer(IPloneSiteRoot, ISiteRoot, ISyndicatable, IObjectManagerSite)
48 class PloneSite(Container, SkinnableObjectManager, UniqueObject):
49 """The Plone site object."""
50
51 security = ClassSecurityInfo()
52 meta_type = portal_type = "Plone Site"
53
54 # Ensure certain attributes come from the correct base class.
55 _checkId = SkinnableObjectManager._checkId
56 manage_main = PortalFolderBase.manage_main
57
58 def __getattr__(self, name):
59 try:
60 # Try DX
61 return super().__getattr__(name)
62 except AttributeError:
63 # Check portal_skins
64 return SkinnableObjectManager.__getattr__(self, name)
65
66 def __setattr__(self, name, obj):
67 # handle re setting an item as an attribute
68 if self._tree is not None and name in self:
69 del self[name]
70 self[name] = obj
71 else:
72 super().__setattr__(name, obj)
73
74 def __delattr__(self, name):
75 try:
76 return super().__delattr__(name)
77 except AttributeError:
78 return self.__delitem__(name)
79
80 # Removes the 'Components Folder'
81
82 manage_options = Container.manage_options[:2] + Container.manage_options[3:]
83
84 __ac_permissions__ = (
85 (AccessContentsInformation, ()),
86 (AddPortalMember, ()),
87 (SetOwnPassword, ()),
88 (SetOwnProperties, ()),
89 (MailForgottenPassword, ()),
90 (RequestReview, ()),
91 (ReviewPortalContent, ()),
92 (AddPortalContent, ()),
93 (AddPortalFolders, ()),
94 (ListPortalMembers, ()),
95 (ReplyToItem, ()),
96 (View, ("isEffective",)),
97 (
98 ModifyPortalContent,
99 (
100 "manage_cutObjects",
101 "manage_pasteObjects",
102 "manage_renameForm",
103 "manage_renameObject",
104 "manage_renameObjects",
105 ),
106 ),
107 )
108
109 # Switch off ZMI ordering interface as it assumes a slightly
110 # different functionality
111 has_order_support = 0
112 management_page_charset = "utf-8"
113 _default_sort_key = "id"
114 _properties = (
115 {"id": "title", "type": "string", "mode": "w"},
116 {"id": "description", "type": "text", "mode": "w"},
117 )
118 title = ""
119 description = ""
120 icon = "misc_/CMFPlone/tool.gif"
121
122 # From PortalObjectBase
123 def __init__(self, id, title=""):
124 super().__init__(id, title=title)
125 components = PersistentComponents("++etc++site")
126 components.__parent__ = self
127 self.setSiteManager(components)
128
129 # From PortalObjectBase
130 def __before_publishing_traverse__(self, arg1, arg2=None):
131 """Pre-traversal hook."""
132 # XXX hack around a bug(?) in BeforeTraverse.MultiHook
133 REQUEST = arg2 or arg1
134
135 try:
136 notify(BeforeTraverseEvent(self, REQUEST))
137 except ComponentLookupError:
138 # allow ZMI access, even if the portal's site manager is missing
139 pass
140 self.setupCurrentSkin(REQUEST)
141
142 super().__before_publishing_traverse__(arg1, arg2)
143
144 # Concept from OFS.OrderSupport
145 @security.protected(permissions.AccessContentsInformation)
146 def tpValues(self):
147 # Return a list of subobjects, used by ZMI tree tag (and only there).
148 # see also https://github.com/plone/Products.CMFPlone/issues/3323
149 return sorted(
150 (
151 obj
152 for obj in self.objectValues()
153 if getattr(aq_base(obj), "isPrincipiaFolderish", False)
154 ),
155 key=lambda obj: obj.getId(),
156 )
157
158 def __browser_default__(self, request):
159 """Set default so we can return whatever we want instead
160 of index_html"""
161 return getToolByName(self, "plone_utils").browserDefault(self)
162
163 def index_html(self):
164 """Acquire if not present."""
165 request = getattr(self, "REQUEST", None)
166 if (
167 request is not None
168 and "REQUEST_METHOD" in request
169 and request.maybe_webdav_client
170 ):
171 method = request["REQUEST_METHOD"]
172 if bbb.HAS_ZSERVER and method in ("PUT",):
173 # Very likely a WebDAV client trying to create something
174 result = NullResource(self, "index_html")
175 setattr(result, "__replaceable__", REPLACEABLE)
176 return result
177 elif method not in ("GET", "HEAD", "POST"):
178 raise AttributeError("index_html")
179 # Acquire from skin.
180 _target = self.__getattr__("index_html")
181 result = aq_base(_target).__of__(self)
182 setattr(result, "__replaceable__", REPLACEABLE)
183 return result
184
185 index_html = ComputedAttribute(index_html, 1)
186
187 def manage_beforeDelete(self, container, item):
188 # Should send out an Event before Site is being deleted.
189 self.removal_inprogress = 1
190 PloneSite.inheritedAttribute("manage_beforeDelete")(self, container, item)
191
192 @security.protected(permissions.DeleteObjects)
193 def manage_delObjects(self, ids=None, REQUEST=None):
194 """We need to enforce security."""
195 if ids is None:
196 ids = []
197 if isinstance(ids, str):
198 ids = [ids]
199 for id in ids:
200 item = self._getOb(id)
201 if not _checkPermission(permissions.DeleteObjects, item):
202 raise Unauthorized("Do not have permissions to remove this object")
203 return PortalObjectBase.manage_delObjects(self, ids, REQUEST=REQUEST)
204
205 def view(self):
206 """Ensure that we get a plain view of the object, via a delegation to
207 __call__(), which is defined in BrowserDefaultMixin
208 """
209 return self()
210
211 @security.protected(permissions.AccessContentsInformation)
212 def folderlistingFolderContents(self, contentFilter=None):
213 """Calls listFolderContents in protected only by ACI so that
214 folder_listing can work without the List folder contents permission.
215
216 This is copied from Archetypes Basefolder and is needed by the
217 reference browser.
218 """
219 return self.listFolderContents(contentFilter)
220
221 def isEffective(self, date):
222 # Override DefaultDublinCoreImpl's test, since we are always viewable.
223 return 1
224
225
226 # Remove the IContentish interface so we don't listen to events that won't
227 # apply to the site root, ie handleUidAnnotationEvent
228 classImplementsOnly(PloneSite, implementedBy(PloneSite) - IContentish)
229
230 InitializeClass(PloneSite)
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/Portal.py b/Products/CMFPlone/Portal.py
--- a/Products/CMFPlone/Portal.py
+++ b/Products/CMFPlone/Portal.py
@@ -29,7 +29,6 @@
from Products.CMFCore.Skinnable import SkinnableObjectManager
from Products.CMFCore.utils import _checkPermission
from Products.CMFCore.utils import getToolByName
-from Products.CMFCore.utils import UniqueObject
from Products.CMFPlone import bbb
from Products.Five.component.interfaces import IObjectManagerSite
from zope.event import notify
@@ -45,7 +44,7 @@
@implementer(IPloneSiteRoot, ISiteRoot, ISyndicatable, IObjectManagerSite)
-class PloneSite(Container, SkinnableObjectManager, UniqueObject):
+class PloneSite(Container, SkinnableObjectManager):
"""The Plone site object."""
security = ClassSecurityInfo()
| {"golden_diff": "diff --git a/Products/CMFPlone/Portal.py b/Products/CMFPlone/Portal.py\n--- a/Products/CMFPlone/Portal.py\n+++ b/Products/CMFPlone/Portal.py\n@@ -29,7 +29,6 @@\n from Products.CMFCore.Skinnable import SkinnableObjectManager\n from Products.CMFCore.utils import _checkPermission\n from Products.CMFCore.utils import getToolByName\n-from Products.CMFCore.utils import UniqueObject\n from Products.CMFPlone import bbb\n from Products.Five.component.interfaces import IObjectManagerSite\n from zope.event import notify\n@@ -45,7 +44,7 @@\n \n \n @implementer(IPloneSiteRoot, ISiteRoot, ISyndicatable, IObjectManagerSite)\n-class PloneSite(Container, SkinnableObjectManager, UniqueObject):\n+class PloneSite(Container, SkinnableObjectManager):\n \"\"\"The Plone site object.\"\"\"\n \n security = ClassSecurityInfo()\n", "issue": "Plone site id is a reserved id and can't be used any where in Zope as an short name \n## BUG/PROBLEM REPORT (OR OTHER COMMON ISSUE)\r\n\r\n### What I did:\r\n\r\nAdd a Plone site with the id example.\r\nAdd another Plone side with any other name.\r\nIn the second Plone side, try to create a Document with the title example.\r\n\r\nYou'll get a trace:\r\n\r\n```\r\nTraceback (innermost last):\r\n Module ZPublisher.WSGIPublisher, line 181, in transaction_pubevents\r\n Module ZPublisher.WSGIPublisher, line 390, in publish_module\r\n Module ZPublisher.WSGIPublisher, line 285, in publish\r\n Module ZPublisher.mapply, line 85, in mapply\r\n Module ZPublisher.WSGIPublisher, line 68, in call_object\r\n Module plone.z3cform.layout, line 61, in __call__\r\n Module plone.z3cform.layout, line 45, in update\r\n Module plone.dexterity.browser.add, line 136, in update\r\n Module plone.z3cform.fieldsets.extensible, line 62, in update\r\n Module plone.z3cform.patch, line 31, in GroupForm_update\r\n Module z3c.form.group, line 145, in update\r\n Module plone.app.z3cform.csrf, line 21, in execute\r\n Module z3c.form.action, line 98, in execute\r\n Module z3c.form.button, line 301, in __call__\r\n Module z3c.form.button, line 159, in __call__\r\n Module plone.dexterity.browser.add, line 114, in handleAdd\r\n Module z3c.form.form, line 265, in createAndAdd\r\n Module plone.dexterity.browser.add, line 89, in add\r\n Module plone.dexterity.utils, line 174, in addContentToContainer\r\n Module Products.BTreeFolder2.BTreeFolder2, line 434, in _setObject\r\n Module plone.folder.ordered, line 253, in _checkId\r\n Module Products.CMFCore.PortalFolder, line 318, in _checkId\r\n Module OFS.ObjectManager, line 130, in checkValidId\r\nzExceptions.BadRequest: The id \"example\" is reserved.\r\n```\r\n\r\n### What I expect to happen:\r\n\r\nHaving a page with the id example.\r\n\r\n### What actually happened:\r\n\r\nGot the trace above.\r\nA Plone site id should not be blocked to be used in other places for object id's.\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\nPlone 6.0.6\n", "before_files": [{"content": "from AccessControl import ClassSecurityInfo\nfrom AccessControl import Unauthorized\nfrom AccessControl.class_init import InitializeClass\nfrom Acquisition import aq_base\nfrom ComputedAttribute import ComputedAttribute\nfrom five.localsitemanager.registry import PersistentComponents\nfrom OFS.ObjectManager import REPLACEABLE\nfrom plone.base.interfaces.siteroot import IPloneSiteRoot\nfrom plone.base.interfaces.syndication import ISyndicatable\nfrom plone.base.permissions import AddPortalContent\nfrom plone.base.permissions import AddPortalFolders\nfrom plone.base.permissions import ListPortalMembers\nfrom plone.base.permissions import ModifyPortalContent\nfrom plone.base.permissions import ReplyToItem\nfrom plone.base.permissions import View\nfrom plone.dexterity.content import Container\nfrom Products.CMFCore import permissions\nfrom Products.CMFCore.interfaces import IContentish\nfrom Products.CMFCore.interfaces import ISiteRoot\nfrom Products.CMFCore.permissions import AccessContentsInformation\nfrom Products.CMFCore.permissions import AddPortalMember\nfrom Products.CMFCore.permissions import MailForgottenPassword\nfrom Products.CMFCore.permissions import RequestReview\nfrom Products.CMFCore.permissions import ReviewPortalContent\nfrom Products.CMFCore.permissions import SetOwnPassword\nfrom Products.CMFCore.permissions import SetOwnProperties\nfrom Products.CMFCore.PortalFolder import PortalFolderBase\nfrom Products.CMFCore.PortalObject import PortalObjectBase\nfrom Products.CMFCore.Skinnable import SkinnableObjectManager\nfrom Products.CMFCore.utils import _checkPermission\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFCore.utils import UniqueObject\nfrom Products.CMFPlone import bbb\nfrom Products.Five.component.interfaces import IObjectManagerSite\nfrom zope.event import notify\nfrom zope.interface import classImplementsOnly\nfrom zope.interface import implementedBy\nfrom zope.interface import implementer\nfrom zope.interface.interfaces import ComponentLookupError\nfrom zope.traversing.interfaces import BeforeTraverseEvent\n\n\nif bbb.HAS_ZSERVER:\n from webdav.NullResource import NullResource\n\n\n@implementer(IPloneSiteRoot, ISiteRoot, ISyndicatable, IObjectManagerSite)\nclass PloneSite(Container, SkinnableObjectManager, UniqueObject):\n \"\"\"The Plone site object.\"\"\"\n\n security = ClassSecurityInfo()\n meta_type = portal_type = \"Plone Site\"\n\n # Ensure certain attributes come from the correct base class.\n _checkId = SkinnableObjectManager._checkId\n manage_main = PortalFolderBase.manage_main\n\n def __getattr__(self, name):\n try:\n # Try DX\n return super().__getattr__(name)\n except AttributeError:\n # Check portal_skins\n return SkinnableObjectManager.__getattr__(self, name)\n\n def __setattr__(self, name, obj):\n # handle re setting an item as an attribute\n if self._tree is not None and name in self:\n del self[name]\n self[name] = obj\n else:\n super().__setattr__(name, obj)\n\n def __delattr__(self, name):\n try:\n return super().__delattr__(name)\n except AttributeError:\n return self.__delitem__(name)\n\n # Removes the 'Components Folder'\n\n manage_options = Container.manage_options[:2] + Container.manage_options[3:]\n\n __ac_permissions__ = (\n (AccessContentsInformation, ()),\n (AddPortalMember, ()),\n (SetOwnPassword, ()),\n (SetOwnProperties, ()),\n (MailForgottenPassword, ()),\n (RequestReview, ()),\n (ReviewPortalContent, ()),\n (AddPortalContent, ()),\n (AddPortalFolders, ()),\n (ListPortalMembers, ()),\n (ReplyToItem, ()),\n (View, (\"isEffective\",)),\n (\n ModifyPortalContent,\n (\n \"manage_cutObjects\",\n \"manage_pasteObjects\",\n \"manage_renameForm\",\n \"manage_renameObject\",\n \"manage_renameObjects\",\n ),\n ),\n )\n\n # Switch off ZMI ordering interface as it assumes a slightly\n # different functionality\n has_order_support = 0\n management_page_charset = \"utf-8\"\n _default_sort_key = \"id\"\n _properties = (\n {\"id\": \"title\", \"type\": \"string\", \"mode\": \"w\"},\n {\"id\": \"description\", \"type\": \"text\", \"mode\": \"w\"},\n )\n title = \"\"\n description = \"\"\n icon = \"misc_/CMFPlone/tool.gif\"\n\n # From PortalObjectBase\n def __init__(self, id, title=\"\"):\n super().__init__(id, title=title)\n components = PersistentComponents(\"++etc++site\")\n components.__parent__ = self\n self.setSiteManager(components)\n\n # From PortalObjectBase\n def __before_publishing_traverse__(self, arg1, arg2=None):\n \"\"\"Pre-traversal hook.\"\"\"\n # XXX hack around a bug(?) in BeforeTraverse.MultiHook\n REQUEST = arg2 or arg1\n\n try:\n notify(BeforeTraverseEvent(self, REQUEST))\n except ComponentLookupError:\n # allow ZMI access, even if the portal's site manager is missing\n pass\n self.setupCurrentSkin(REQUEST)\n\n super().__before_publishing_traverse__(arg1, arg2)\n\n # Concept from OFS.OrderSupport\n @security.protected(permissions.AccessContentsInformation)\n def tpValues(self):\n # Return a list of subobjects, used by ZMI tree tag (and only there).\n # see also https://github.com/plone/Products.CMFPlone/issues/3323\n return sorted(\n (\n obj\n for obj in self.objectValues()\n if getattr(aq_base(obj), \"isPrincipiaFolderish\", False)\n ),\n key=lambda obj: obj.getId(),\n )\n\n def __browser_default__(self, request):\n \"\"\"Set default so we can return whatever we want instead\n of index_html\"\"\"\n return getToolByName(self, \"plone_utils\").browserDefault(self)\n\n def index_html(self):\n \"\"\"Acquire if not present.\"\"\"\n request = getattr(self, \"REQUEST\", None)\n if (\n request is not None\n and \"REQUEST_METHOD\" in request\n and request.maybe_webdav_client\n ):\n method = request[\"REQUEST_METHOD\"]\n if bbb.HAS_ZSERVER and method in (\"PUT\",):\n # Very likely a WebDAV client trying to create something\n result = NullResource(self, \"index_html\")\n setattr(result, \"__replaceable__\", REPLACEABLE)\n return result\n elif method not in (\"GET\", \"HEAD\", \"POST\"):\n raise AttributeError(\"index_html\")\n # Acquire from skin.\n _target = self.__getattr__(\"index_html\")\n result = aq_base(_target).__of__(self)\n setattr(result, \"__replaceable__\", REPLACEABLE)\n return result\n\n index_html = ComputedAttribute(index_html, 1)\n\n def manage_beforeDelete(self, container, item):\n # Should send out an Event before Site is being deleted.\n self.removal_inprogress = 1\n PloneSite.inheritedAttribute(\"manage_beforeDelete\")(self, container, item)\n\n @security.protected(permissions.DeleteObjects)\n def manage_delObjects(self, ids=None, REQUEST=None):\n \"\"\"We need to enforce security.\"\"\"\n if ids is None:\n ids = []\n if isinstance(ids, str):\n ids = [ids]\n for id in ids:\n item = self._getOb(id)\n if not _checkPermission(permissions.DeleteObjects, item):\n raise Unauthorized(\"Do not have permissions to remove this object\")\n return PortalObjectBase.manage_delObjects(self, ids, REQUEST=REQUEST)\n\n def view(self):\n \"\"\"Ensure that we get a plain view of the object, via a delegation to\n __call__(), which is defined in BrowserDefaultMixin\n \"\"\"\n return self()\n\n @security.protected(permissions.AccessContentsInformation)\n def folderlistingFolderContents(self, contentFilter=None):\n \"\"\"Calls listFolderContents in protected only by ACI so that\n folder_listing can work without the List folder contents permission.\n\n This is copied from Archetypes Basefolder and is needed by the\n reference browser.\n \"\"\"\n return self.listFolderContents(contentFilter)\n\n def isEffective(self, date):\n # Override DefaultDublinCoreImpl's test, since we are always viewable.\n return 1\n\n\n# Remove the IContentish interface so we don't listen to events that won't\n# apply to the site root, ie handleUidAnnotationEvent\nclassImplementsOnly(PloneSite, implementedBy(PloneSite) - IContentish)\n\nInitializeClass(PloneSite)\n", "path": "Products/CMFPlone/Portal.py"}], "after_files": [{"content": "from AccessControl import ClassSecurityInfo\nfrom AccessControl import Unauthorized\nfrom AccessControl.class_init import InitializeClass\nfrom Acquisition import aq_base\nfrom ComputedAttribute import ComputedAttribute\nfrom five.localsitemanager.registry import PersistentComponents\nfrom OFS.ObjectManager import REPLACEABLE\nfrom plone.base.interfaces.siteroot import IPloneSiteRoot\nfrom plone.base.interfaces.syndication import ISyndicatable\nfrom plone.base.permissions import AddPortalContent\nfrom plone.base.permissions import AddPortalFolders\nfrom plone.base.permissions import ListPortalMembers\nfrom plone.base.permissions import ModifyPortalContent\nfrom plone.base.permissions import ReplyToItem\nfrom plone.base.permissions import View\nfrom plone.dexterity.content import Container\nfrom Products.CMFCore import permissions\nfrom Products.CMFCore.interfaces import IContentish\nfrom Products.CMFCore.interfaces import ISiteRoot\nfrom Products.CMFCore.permissions import AccessContentsInformation\nfrom Products.CMFCore.permissions import AddPortalMember\nfrom Products.CMFCore.permissions import MailForgottenPassword\nfrom Products.CMFCore.permissions import RequestReview\nfrom Products.CMFCore.permissions import ReviewPortalContent\nfrom Products.CMFCore.permissions import SetOwnPassword\nfrom Products.CMFCore.permissions import SetOwnProperties\nfrom Products.CMFCore.PortalFolder import PortalFolderBase\nfrom Products.CMFCore.PortalObject import PortalObjectBase\nfrom Products.CMFCore.Skinnable import SkinnableObjectManager\nfrom Products.CMFCore.utils import _checkPermission\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import bbb\nfrom Products.Five.component.interfaces import IObjectManagerSite\nfrom zope.event import notify\nfrom zope.interface import classImplementsOnly\nfrom zope.interface import implementedBy\nfrom zope.interface import implementer\nfrom zope.interface.interfaces import ComponentLookupError\nfrom zope.traversing.interfaces import BeforeTraverseEvent\n\n\nif bbb.HAS_ZSERVER:\n from webdav.NullResource import NullResource\n\n\n@implementer(IPloneSiteRoot, ISiteRoot, ISyndicatable, IObjectManagerSite)\nclass PloneSite(Container, SkinnableObjectManager):\n \"\"\"The Plone site object.\"\"\"\n\n security = ClassSecurityInfo()\n meta_type = portal_type = \"Plone Site\"\n\n # Ensure certain attributes come from the correct base class.\n _checkId = SkinnableObjectManager._checkId\n manage_main = PortalFolderBase.manage_main\n\n def __getattr__(self, name):\n try:\n # Try DX\n return super().__getattr__(name)\n except AttributeError:\n # Check portal_skins\n return SkinnableObjectManager.__getattr__(self, name)\n\n def __setattr__(self, name, obj):\n # handle re setting an item as an attribute\n if self._tree is not None and name in self:\n del self[name]\n self[name] = obj\n else:\n super().__setattr__(name, obj)\n\n def __delattr__(self, name):\n try:\n return super().__delattr__(name)\n except AttributeError:\n return self.__delitem__(name)\n\n # Removes the 'Components Folder'\n\n manage_options = Container.manage_options[:2] + Container.manage_options[3:]\n\n __ac_permissions__ = (\n (AccessContentsInformation, ()),\n (AddPortalMember, ()),\n (SetOwnPassword, ()),\n (SetOwnProperties, ()),\n (MailForgottenPassword, ()),\n (RequestReview, ()),\n (ReviewPortalContent, ()),\n (AddPortalContent, ()),\n (AddPortalFolders, ()),\n (ListPortalMembers, ()),\n (ReplyToItem, ()),\n (View, (\"isEffective\",)),\n (\n ModifyPortalContent,\n (\n \"manage_cutObjects\",\n \"manage_pasteObjects\",\n \"manage_renameForm\",\n \"manage_renameObject\",\n \"manage_renameObjects\",\n ),\n ),\n )\n\n # Switch off ZMI ordering interface as it assumes a slightly\n # different functionality\n has_order_support = 0\n management_page_charset = \"utf-8\"\n _default_sort_key = \"id\"\n _properties = (\n {\"id\": \"title\", \"type\": \"string\", \"mode\": \"w\"},\n {\"id\": \"description\", \"type\": \"text\", \"mode\": \"w\"},\n )\n title = \"\"\n description = \"\"\n icon = \"misc_/CMFPlone/tool.gif\"\n\n # From PortalObjectBase\n def __init__(self, id, title=\"\"):\n super().__init__(id, title=title)\n components = PersistentComponents(\"++etc++site\")\n components.__parent__ = self\n self.setSiteManager(components)\n\n # From PortalObjectBase\n def __before_publishing_traverse__(self, arg1, arg2=None):\n \"\"\"Pre-traversal hook.\"\"\"\n # XXX hack around a bug(?) in BeforeTraverse.MultiHook\n REQUEST = arg2 or arg1\n\n try:\n notify(BeforeTraverseEvent(self, REQUEST))\n except ComponentLookupError:\n # allow ZMI access, even if the portal's site manager is missing\n pass\n self.setupCurrentSkin(REQUEST)\n\n super().__before_publishing_traverse__(arg1, arg2)\n\n # Concept from OFS.OrderSupport\n @security.protected(permissions.AccessContentsInformation)\n def tpValues(self):\n # Return a list of subobjects, used by ZMI tree tag (and only there).\n # see also https://github.com/plone/Products.CMFPlone/issues/3323\n return sorted(\n (\n obj\n for obj in self.objectValues()\n if getattr(aq_base(obj), \"isPrincipiaFolderish\", False)\n ),\n key=lambda obj: obj.getId(),\n )\n\n def __browser_default__(self, request):\n \"\"\"Set default so we can return whatever we want instead\n of index_html\"\"\"\n return getToolByName(self, \"plone_utils\").browserDefault(self)\n\n def index_html(self):\n \"\"\"Acquire if not present.\"\"\"\n request = getattr(self, \"REQUEST\", None)\n if (\n request is not None\n and \"REQUEST_METHOD\" in request\n and request.maybe_webdav_client\n ):\n method = request[\"REQUEST_METHOD\"]\n if bbb.HAS_ZSERVER and method in (\"PUT\",):\n # Very likely a WebDAV client trying to create something\n result = NullResource(self, \"index_html\")\n setattr(result, \"__replaceable__\", REPLACEABLE)\n return result\n elif method not in (\"GET\", \"HEAD\", \"POST\"):\n raise AttributeError(\"index_html\")\n # Acquire from skin.\n _target = self.__getattr__(\"index_html\")\n result = aq_base(_target).__of__(self)\n setattr(result, \"__replaceable__\", REPLACEABLE)\n return result\n\n index_html = ComputedAttribute(index_html, 1)\n\n def manage_beforeDelete(self, container, item):\n # Should send out an Event before Site is being deleted.\n self.removal_inprogress = 1\n PloneSite.inheritedAttribute(\"manage_beforeDelete\")(self, container, item)\n\n @security.protected(permissions.DeleteObjects)\n def manage_delObjects(self, ids=None, REQUEST=None):\n \"\"\"We need to enforce security.\"\"\"\n if ids is None:\n ids = []\n if isinstance(ids, str):\n ids = [ids]\n for id in ids:\n item = self._getOb(id)\n if not _checkPermission(permissions.DeleteObjects, item):\n raise Unauthorized(\"Do not have permissions to remove this object\")\n return PortalObjectBase.manage_delObjects(self, ids, REQUEST=REQUEST)\n\n def view(self):\n \"\"\"Ensure that we get a plain view of the object, via a delegation to\n __call__(), which is defined in BrowserDefaultMixin\n \"\"\"\n return self()\n\n @security.protected(permissions.AccessContentsInformation)\n def folderlistingFolderContents(self, contentFilter=None):\n \"\"\"Calls listFolderContents in protected only by ACI so that\n folder_listing can work without the List folder contents permission.\n\n This is copied from Archetypes Basefolder and is needed by the\n reference browser.\n \"\"\"\n return self.listFolderContents(contentFilter)\n\n def isEffective(self, date):\n # Override DefaultDublinCoreImpl's test, since we are always viewable.\n return 1\n\n\n# Remove the IContentish interface so we don't listen to events that won't\n# apply to the site root, ie handleUidAnnotationEvent\nclassImplementsOnly(PloneSite, implementedBy(PloneSite) - IContentish)\n\nInitializeClass(PloneSite)\n", "path": "Products/CMFPlone/Portal.py"}]} | 3,355 | 219 |
gh_patches_debug_26080 | rasdani/github-patches | git_diff | mirumee__ariadne-246 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exceptions are never delivered to extensions
If you add an extension that implements the `def request_finished(self, context: ContextValue, error: Optional[Exception] = None)` method, and raise any kind of Exception in the resolver, the `request_finished` method of the extension always gets called without any errors.
A stack trace shows that the request context manager in `extensions.py` never catches the exception, so the `else` statement is always executed no matter what kind of exception I raise.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/contrib/tracing/opentracing.py`
Content:
```
1 from copy import deepcopy
2 from functools import partial
3 from inspect import isawaitable
4 from typing import Any, Callable, Dict, Optional
5
6 from graphql import GraphQLResolveInfo
7 from opentracing import Scope, Tracer, global_tracer
8 from opentracing.ext import tags
9
10 from ...types import ContextValue, Extension, Resolver
11 from .utils import format_path, should_trace
12
13 ArgFilter = Callable[[Dict[str, Any], GraphQLResolveInfo], Dict[str, Any]]
14
15
16 class OpenTracingExtension(Extension):
17 _arg_filter: Optional[ArgFilter]
18 _root_scope: Scope
19 _tracer: Tracer
20
21 def __init__(self, *, arg_filter: Optional[ArgFilter] = None):
22 self._arg_filter = arg_filter
23 self._tracer = global_tracer()
24 self._root_scope = None
25
26 def request_started(self, context: ContextValue):
27 self._root_scope = self._tracer.start_active_span("GraphQL Query")
28 self._root_scope.span.set_tag(tags.COMPONENT, "graphql")
29
30 def request_finished(
31 self, context: ContextValue, error: Optional[Exception] = None
32 ):
33 self._root_scope.close()
34
35 async def resolve(
36 self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs
37 ):
38 if not should_trace(info):
39 result = next_(parent, info, **kwargs)
40 if isawaitable(result):
41 result = await result
42 return result
43
44 with self._tracer.start_active_span(info.field_name) as scope:
45 span = scope.span
46 span.set_tag(tags.COMPONENT, "graphql")
47 span.set_tag("graphql.parentType", info.parent_type.name)
48
49 graphql_path = ".".join(
50 map(str, format_path(info.path)) # pylint: disable=bad-builtin
51 )
52 span.set_tag("graphql.path", graphql_path)
53
54 if kwargs:
55 filtered_kwargs = self.filter_resolver_args(kwargs, info)
56 for kwarg, value in filtered_kwargs.items():
57 span.set_tag(f"graphql.param.{kwarg}", value)
58
59 result = next_(parent, info, **kwargs)
60 if isawaitable(result):
61 result = await result
62 return result
63
64 def filter_resolver_args(
65 self, args: Dict[str, Any], info: GraphQLResolveInfo
66 ) -> Dict[str, Any]:
67 if not self._arg_filter:
68 return args
69
70 return self._arg_filter(deepcopy(args), info)
71
72
73 class OpenTracingExtensionSync(OpenTracingExtension):
74 def resolve(self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs):
75 if not should_trace(info):
76 result = next_(parent, info, **kwargs)
77 return result
78
79 with self._tracer.start_active_span(info.field_name) as scope:
80 span = scope.span
81 span.set_tag(tags.COMPONENT, "graphql")
82 span.set_tag("graphql.parentType", info.parent_type.name)
83
84 graphql_path = ".".join(
85 map(str, format_path(info.path)) # pylint: disable=bad-builtin
86 )
87 span.set_tag("graphql.path", graphql_path)
88
89 if kwargs:
90 filtered_kwargs = self.filter_resolver_args(kwargs, info)
91 for kwarg, value in filtered_kwargs.items():
92 span.set_tag(f"graphql.param.{kwarg}", value)
93
94 result = next_(parent, info, **kwargs)
95 return result
96
97
98 def opentracing_extension(*, arg_filter: Optional[ArgFilter] = None):
99 return partial(OpenTracingExtension, arg_filter=arg_filter)
100
101
102 def opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None):
103 return partial(OpenTracingExtensionSync, arg_filter=arg_filter)
104
```
Path: `ariadne/types.py`
Content:
```
1 from inspect import isawaitable
2 from typing import Any, AsyncGenerator, Callable, List, Optional, Tuple, Union
3 from typing_extensions import Protocol
4
5 from graphql import (
6 DocumentNode,
7 ExecutionResult,
8 GraphQLError,
9 GraphQLResolveInfo,
10 GraphQLSchema,
11 )
12
13 # Note: this should be [Any, GraphQLResolveInfo, **kwargs],
14 # but this is not achieveable with python types yet:
15 # https://github.com/mirumee/ariadne/pull/79
16 Resolver = Callable[..., Any]
17 GraphQLResult = Tuple[bool, dict]
18 SubscriptionResult = Tuple[
19 bool, Union[List[dict], AsyncGenerator[ExecutionResult, None]]
20 ]
21 Subscriber = Callable[..., AsyncGenerator]
22 ErrorFormatter = Callable[[GraphQLError, bool], dict]
23
24 ContextValue = Union[Any, Callable[[Any], Any]]
25 RootValue = Union[Any, Callable[[Optional[Any], DocumentNode], Any]]
26
27
28 class Extension(Protocol):
29 def request_started(self, context: ContextValue):
30 pass # pragma: no cover
31
32 def request_finished(
33 self, context: ContextValue, error: Optional[Exception] = None
34 ):
35 pass # pragma: no cover
36
37 async def resolve(
38 self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs
39 ):
40 result = next_(parent, info, **kwargs)
41 if isawaitable(result):
42 result = await result
43 return result
44
45 def has_errors(self, errors: List[GraphQLError]):
46 pass # pragma: no cover
47
48 def format(self) -> dict:
49 return {} # pragma: no cover
50
51
52 class ExtensionSync(Extension):
53 def resolve(self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs):
54 return next_(parent, info, **kwargs)
55
56
57 class SchemaBindable(Protocol):
58 def bind_to_schema(self, schema: GraphQLSchema) -> None:
59 pass # pragma: no cover
60
```
Path: `ariadne/extensions.py`
Content:
```
1 from contextlib import contextmanager
2 from typing import List, Optional, Type
3
4 from graphql import GraphQLError
5 from graphql.execution import MiddlewareManager
6
7 from .types import ContextValue, Extension
8
9
10 class ExtensionManager:
11 __slots__ = ("extensions", "extensions_reversed")
12
13 def __init__(self, extensions: Optional[List[Type[Extension]]] = None):
14 if extensions:
15 self.extensions = tuple(ext() for ext in extensions)
16 self.extensions_reversed = tuple(reversed(self.extensions))
17 else:
18 self.extensions_reversed = self.extensions = tuple()
19
20 def as_middleware_manager(
21 self, manager: Optional[MiddlewareManager]
22 ) -> MiddlewareManager:
23 if manager and manager.middlewares:
24 return MiddlewareManager(*manager.middlewares, *self.extensions)
25 return MiddlewareManager(*self.extensions)
26
27 @contextmanager
28 def request(self, context: ContextValue):
29 for ext in self.extensions:
30 ext.request_started(context)
31 try:
32 yield
33 except Exception as e:
34 for ext in self.extensions_reversed:
35 ext.request_finished(context, e)
36 raise
37 else:
38 for ext in self.extensions_reversed:
39 ext.request_finished(context)
40
41 def has_errors(self, errors: List[GraphQLError]):
42 for ext in self.extensions:
43 ext.has_errors(errors)
44
45 def format(self) -> dict:
46 data = {}
47 for ext in self.extensions:
48 ext_data = ext.format()
49 if ext_data:
50 data.update(ext_data)
51 return data
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ariadne/contrib/tracing/opentracing.py b/ariadne/contrib/tracing/opentracing.py
--- a/ariadne/contrib/tracing/opentracing.py
+++ b/ariadne/contrib/tracing/opentracing.py
@@ -27,9 +27,7 @@
self._root_scope = self._tracer.start_active_span("GraphQL Query")
self._root_scope.span.set_tag(tags.COMPONENT, "graphql")
- def request_finished(
- self, context: ContextValue, error: Optional[Exception] = None
- ):
+ def request_finished(self, context: ContextValue):
self._root_scope.close()
async def resolve(
diff --git a/ariadne/extensions.py b/ariadne/extensions.py
--- a/ariadne/extensions.py
+++ b/ariadne/extensions.py
@@ -30,11 +30,7 @@
ext.request_started(context)
try:
yield
- except Exception as e:
- for ext in self.extensions_reversed:
- ext.request_finished(context, e)
- raise
- else:
+ finally:
for ext in self.extensions_reversed:
ext.request_finished(context)
diff --git a/ariadne/types.py b/ariadne/types.py
--- a/ariadne/types.py
+++ b/ariadne/types.py
@@ -29,9 +29,7 @@
def request_started(self, context: ContextValue):
pass # pragma: no cover
- def request_finished(
- self, context: ContextValue, error: Optional[Exception] = None
- ):
+ def request_finished(self, context: ContextValue):
pass # pragma: no cover
async def resolve(
| {"golden_diff": "diff --git a/ariadne/contrib/tracing/opentracing.py b/ariadne/contrib/tracing/opentracing.py\n--- a/ariadne/contrib/tracing/opentracing.py\n+++ b/ariadne/contrib/tracing/opentracing.py\n@@ -27,9 +27,7 @@\n self._root_scope = self._tracer.start_active_span(\"GraphQL Query\")\n self._root_scope.span.set_tag(tags.COMPONENT, \"graphql\")\n \n- def request_finished(\n- self, context: ContextValue, error: Optional[Exception] = None\n- ):\n+ def request_finished(self, context: ContextValue):\n self._root_scope.close()\n \n async def resolve(\ndiff --git a/ariadne/extensions.py b/ariadne/extensions.py\n--- a/ariadne/extensions.py\n+++ b/ariadne/extensions.py\n@@ -30,11 +30,7 @@\n ext.request_started(context)\n try:\n yield\n- except Exception as e:\n- for ext in self.extensions_reversed:\n- ext.request_finished(context, e)\n- raise\n- else:\n+ finally:\n for ext in self.extensions_reversed:\n ext.request_finished(context)\n \ndiff --git a/ariadne/types.py b/ariadne/types.py\n--- a/ariadne/types.py\n+++ b/ariadne/types.py\n@@ -29,9 +29,7 @@\n def request_started(self, context: ContextValue):\n pass # pragma: no cover\n \n- def request_finished(\n- self, context: ContextValue, error: Optional[Exception] = None\n- ):\n+ def request_finished(self, context: ContextValue):\n pass # pragma: no cover\n \n async def resolve(\n", "issue": "Exceptions are never delivered to extensions\nIf you add an extension that implements the `def request_finished(self, context: ContextValue, error: Optional[Exception] = None)` method, and raise any kind of Exception in the resolver, the `request_finished` method of the extension always gets called without any errors.\r\n\r\nA stack trace shows that the request context manager in `extensions.py` never catches the exception, so the `else` statement is always executed no matter what kind of exception I raise.\n", "before_files": [{"content": "from copy import deepcopy\nfrom functools import partial\nfrom inspect import isawaitable\nfrom typing import Any, Callable, Dict, Optional\n\nfrom graphql import GraphQLResolveInfo\nfrom opentracing import Scope, Tracer, global_tracer\nfrom opentracing.ext import tags\n\nfrom ...types import ContextValue, Extension, Resolver\nfrom .utils import format_path, should_trace\n\nArgFilter = Callable[[Dict[str, Any], GraphQLResolveInfo], Dict[str, Any]]\n\n\nclass OpenTracingExtension(Extension):\n _arg_filter: Optional[ArgFilter]\n _root_scope: Scope\n _tracer: Tracer\n\n def __init__(self, *, arg_filter: Optional[ArgFilter] = None):\n self._arg_filter = arg_filter\n self._tracer = global_tracer()\n self._root_scope = None\n\n def request_started(self, context: ContextValue):\n self._root_scope = self._tracer.start_active_span(\"GraphQL Query\")\n self._root_scope.span.set_tag(tags.COMPONENT, \"graphql\")\n\n def request_finished(\n self, context: ContextValue, error: Optional[Exception] = None\n ):\n self._root_scope.close()\n\n async def resolve(\n self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs\n ):\n if not should_trace(info):\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n with self._tracer.start_active_span(info.field_name) as scope:\n span = scope.span\n span.set_tag(tags.COMPONENT, \"graphql\")\n span.set_tag(\"graphql.parentType\", info.parent_type.name)\n\n graphql_path = \".\".join(\n map(str, format_path(info.path)) # pylint: disable=bad-builtin\n )\n span.set_tag(\"graphql.path\", graphql_path)\n\n if kwargs:\n filtered_kwargs = self.filter_resolver_args(kwargs, info)\n for kwarg, value in filtered_kwargs.items():\n span.set_tag(f\"graphql.param.{kwarg}\", value)\n\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n def filter_resolver_args(\n self, args: Dict[str, Any], info: GraphQLResolveInfo\n ) -> Dict[str, Any]:\n if not self._arg_filter:\n return args\n\n return self._arg_filter(deepcopy(args), info)\n\n\nclass OpenTracingExtensionSync(OpenTracingExtension):\n def resolve(self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs):\n if not should_trace(info):\n result = next_(parent, info, **kwargs)\n return result\n\n with self._tracer.start_active_span(info.field_name) as scope:\n span = scope.span\n span.set_tag(tags.COMPONENT, \"graphql\")\n span.set_tag(\"graphql.parentType\", info.parent_type.name)\n\n graphql_path = \".\".join(\n map(str, format_path(info.path)) # pylint: disable=bad-builtin\n )\n span.set_tag(\"graphql.path\", graphql_path)\n\n if kwargs:\n filtered_kwargs = self.filter_resolver_args(kwargs, info)\n for kwarg, value in filtered_kwargs.items():\n span.set_tag(f\"graphql.param.{kwarg}\", value)\n\n result = next_(parent, info, **kwargs)\n return result\n\n\ndef opentracing_extension(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtension, arg_filter=arg_filter)\n\n\ndef opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtensionSync, arg_filter=arg_filter)\n", "path": "ariadne/contrib/tracing/opentracing.py"}, {"content": "from inspect import isawaitable\nfrom typing import Any, AsyncGenerator, Callable, List, Optional, Tuple, Union\nfrom typing_extensions import Protocol\n\nfrom graphql import (\n DocumentNode,\n ExecutionResult,\n GraphQLError,\n GraphQLResolveInfo,\n GraphQLSchema,\n)\n\n# Note: this should be [Any, GraphQLResolveInfo, **kwargs],\n# but this is not achieveable with python types yet:\n# https://github.com/mirumee/ariadne/pull/79\nResolver = Callable[..., Any]\nGraphQLResult = Tuple[bool, dict]\nSubscriptionResult = Tuple[\n bool, Union[List[dict], AsyncGenerator[ExecutionResult, None]]\n]\nSubscriber = Callable[..., AsyncGenerator]\nErrorFormatter = Callable[[GraphQLError, bool], dict]\n\nContextValue = Union[Any, Callable[[Any], Any]]\nRootValue = Union[Any, Callable[[Optional[Any], DocumentNode], Any]]\n\n\nclass Extension(Protocol):\n def request_started(self, context: ContextValue):\n pass # pragma: no cover\n\n def request_finished(\n self, context: ContextValue, error: Optional[Exception] = None\n ):\n pass # pragma: no cover\n\n async def resolve(\n self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs\n ):\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n def has_errors(self, errors: List[GraphQLError]):\n pass # pragma: no cover\n\n def format(self) -> dict:\n return {} # pragma: no cover\n\n\nclass ExtensionSync(Extension):\n def resolve(self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs):\n return next_(parent, info, **kwargs)\n\n\nclass SchemaBindable(Protocol):\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n pass # pragma: no cover\n", "path": "ariadne/types.py"}, {"content": "from contextlib import contextmanager\nfrom typing import List, Optional, Type\n\nfrom graphql import GraphQLError\nfrom graphql.execution import MiddlewareManager\n\nfrom .types import ContextValue, Extension\n\n\nclass ExtensionManager:\n __slots__ = (\"extensions\", \"extensions_reversed\")\n\n def __init__(self, extensions: Optional[List[Type[Extension]]] = None):\n if extensions:\n self.extensions = tuple(ext() for ext in extensions)\n self.extensions_reversed = tuple(reversed(self.extensions))\n else:\n self.extensions_reversed = self.extensions = tuple()\n\n def as_middleware_manager(\n self, manager: Optional[MiddlewareManager]\n ) -> MiddlewareManager:\n if manager and manager.middlewares:\n return MiddlewareManager(*manager.middlewares, *self.extensions)\n return MiddlewareManager(*self.extensions)\n\n @contextmanager\n def request(self, context: ContextValue):\n for ext in self.extensions:\n ext.request_started(context)\n try:\n yield\n except Exception as e:\n for ext in self.extensions_reversed:\n ext.request_finished(context, e)\n raise\n else:\n for ext in self.extensions_reversed:\n ext.request_finished(context)\n\n def has_errors(self, errors: List[GraphQLError]):\n for ext in self.extensions:\n ext.has_errors(errors)\n\n def format(self) -> dict:\n data = {}\n for ext in self.extensions:\n ext_data = ext.format()\n if ext_data:\n data.update(ext_data)\n return data\n", "path": "ariadne/extensions.py"}], "after_files": [{"content": "from copy import deepcopy\nfrom functools import partial\nfrom inspect import isawaitable\nfrom typing import Any, Callable, Dict, Optional\n\nfrom graphql import GraphQLResolveInfo\nfrom opentracing import Scope, Tracer, global_tracer\nfrom opentracing.ext import tags\n\nfrom ...types import ContextValue, Extension, Resolver\nfrom .utils import format_path, should_trace\n\nArgFilter = Callable[[Dict[str, Any], GraphQLResolveInfo], Dict[str, Any]]\n\n\nclass OpenTracingExtension(Extension):\n _arg_filter: Optional[ArgFilter]\n _root_scope: Scope\n _tracer: Tracer\n\n def __init__(self, *, arg_filter: Optional[ArgFilter] = None):\n self._arg_filter = arg_filter\n self._tracer = global_tracer()\n self._root_scope = None\n\n def request_started(self, context: ContextValue):\n self._root_scope = self._tracer.start_active_span(\"GraphQL Query\")\n self._root_scope.span.set_tag(tags.COMPONENT, \"graphql\")\n\n def request_finished(self, context: ContextValue):\n self._root_scope.close()\n\n async def resolve(\n self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs\n ):\n if not should_trace(info):\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n with self._tracer.start_active_span(info.field_name) as scope:\n span = scope.span\n span.set_tag(tags.COMPONENT, \"graphql\")\n span.set_tag(\"graphql.parentType\", info.parent_type.name)\n\n graphql_path = \".\".join(\n map(str, format_path(info.path)) # pylint: disable=bad-builtin\n )\n span.set_tag(\"graphql.path\", graphql_path)\n\n if kwargs:\n filtered_kwargs = self.filter_resolver_args(kwargs, info)\n for kwarg, value in filtered_kwargs.items():\n span.set_tag(f\"graphql.param.{kwarg}\", value)\n\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n def filter_resolver_args(\n self, args: Dict[str, Any], info: GraphQLResolveInfo\n ) -> Dict[str, Any]:\n if not self._arg_filter:\n return args\n\n return self._arg_filter(deepcopy(args), info)\n\n\nclass OpenTracingExtensionSync(OpenTracingExtension):\n def resolve(self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs):\n if not should_trace(info):\n result = next_(parent, info, **kwargs)\n return result\n\n with self._tracer.start_active_span(info.field_name) as scope:\n span = scope.span\n span.set_tag(tags.COMPONENT, \"graphql\")\n span.set_tag(\"graphql.parentType\", info.parent_type.name)\n\n graphql_path = \".\".join(\n map(str, format_path(info.path)) # pylint: disable=bad-builtin\n )\n span.set_tag(\"graphql.path\", graphql_path)\n\n if kwargs:\n filtered_kwargs = self.filter_resolver_args(kwargs, info)\n for kwarg, value in filtered_kwargs.items():\n span.set_tag(f\"graphql.param.{kwarg}\", value)\n\n result = next_(parent, info, **kwargs)\n return result\n\n\ndef opentracing_extension(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtension, arg_filter=arg_filter)\n\n\ndef opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtensionSync, arg_filter=arg_filter)\n", "path": "ariadne/contrib/tracing/opentracing.py"}, {"content": "from inspect import isawaitable\nfrom typing import Any, AsyncGenerator, Callable, List, Optional, Tuple, Union\nfrom typing_extensions import Protocol\n\nfrom graphql import (\n DocumentNode,\n ExecutionResult,\n GraphQLError,\n GraphQLResolveInfo,\n GraphQLSchema,\n)\n\n# Note: this should be [Any, GraphQLResolveInfo, **kwargs],\n# but this is not achieveable with python types yet:\n# https://github.com/mirumee/ariadne/pull/79\nResolver = Callable[..., Any]\nGraphQLResult = Tuple[bool, dict]\nSubscriptionResult = Tuple[\n bool, Union[List[dict], AsyncGenerator[ExecutionResult, None]]\n]\nSubscriber = Callable[..., AsyncGenerator]\nErrorFormatter = Callable[[GraphQLError, bool], dict]\n\nContextValue = Union[Any, Callable[[Any], Any]]\nRootValue = Union[Any, Callable[[Optional[Any], DocumentNode], Any]]\n\n\nclass Extension(Protocol):\n def request_started(self, context: ContextValue):\n pass # pragma: no cover\n\n def request_finished(self, context: ContextValue):\n pass # pragma: no cover\n\n async def resolve(\n self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs\n ):\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n def has_errors(self, errors: List[GraphQLError]):\n pass # pragma: no cover\n\n def format(self) -> dict:\n return {} # pragma: no cover\n\n\nclass ExtensionSync(Extension):\n def resolve(self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs):\n return next_(parent, info, **kwargs)\n\n\nclass SchemaBindable(Protocol):\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n pass # pragma: no cover\n", "path": "ariadne/types.py"}, {"content": "from contextlib import contextmanager\nfrom typing import List, Optional, Type\n\nfrom graphql import GraphQLError\nfrom graphql.execution import MiddlewareManager\n\nfrom .types import ContextValue, Extension\n\n\nclass ExtensionManager:\n __slots__ = (\"extensions\", \"extensions_reversed\")\n\n def __init__(self, extensions: Optional[List[Type[Extension]]] = None):\n if extensions:\n self.extensions = tuple(ext() for ext in extensions)\n self.extensions_reversed = tuple(reversed(self.extensions))\n else:\n self.extensions_reversed = self.extensions = tuple()\n\n def as_middleware_manager(\n self, manager: Optional[MiddlewareManager]\n ) -> MiddlewareManager:\n if manager and manager.middlewares:\n return MiddlewareManager(*manager.middlewares, *self.extensions)\n return MiddlewareManager(*self.extensions)\n\n @contextmanager\n def request(self, context: ContextValue):\n for ext in self.extensions:\n ext.request_started(context)\n try:\n yield\n finally:\n for ext in self.extensions_reversed:\n ext.request_finished(context)\n\n def has_errors(self, errors: List[GraphQLError]):\n for ext in self.extensions:\n ext.has_errors(errors)\n\n def format(self) -> dict:\n data = {}\n for ext in self.extensions:\n ext_data = ext.format()\n if ext_data:\n data.update(ext_data)\n return data\n", "path": "ariadne/extensions.py"}]} | 2,373 | 392 |
gh_patches_debug_13589 | rasdani/github-patches | git_diff | freedomofpress__securedrop-582 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Large files can be downloaded before the file is completely processed
Scenario:
As a source, upload a >250 MB file (something large)
While this is uploading, from the journalist view, reload the page of “files received from the source”
Once the file shows up, immediately click on the filename in order to download it.
Result:
You will download part of the encrypted file, but possibly not all of it. It can take a while for large files to be encrypted, and in the interim you will get confusing partial copies of the encrypted file.
Expected behavior:
We should either get the whole file, or none of the file. (i.e.: it shouldn’t show up if it is not in a state available for downloading).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/journalist.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import config
3 import version
4 import crypto_util
5 import store
6 from db import db_session, Source, Submission, SourceStar, get_one_or_else
7
8 import os
9 from datetime import datetime
10 from flask import (Flask, request, render_template, send_file, redirect, flash, url_for, g, abort)
11 from flask_wtf.csrf import CsrfProtect
12 from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
13
14 import background
15
16 app = Flask(__name__, template_folder=config.JOURNALIST_TEMPLATES_DIR)
17 app.config.from_object(config.FlaskConfig)
18 CsrfProtect(app)
19
20 app.jinja_env.globals['version'] = version.__version__
21 if getattr(config, 'CUSTOM_HEADER_IMAGE', None):
22 app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE
23 app.jinja_env.globals['use_custom_header_image'] = True
24 else:
25 app.jinja_env.globals['header_image'] = 'logo.png'
26 app.jinja_env.globals['use_custom_header_image'] = False
27
28
29 @app.template_filter('datetimeformat')
30 def _jinja2_datetimeformat(dt, fmt=None):
31 """Template filter for readable formatting of datetime.datetime"""
32 fmt = fmt or '%b %d, %Y %I:%M %p'
33 return dt.strftime(fmt)
34
35
36 @app.teardown_appcontext
37 def shutdown_session(exception=None):
38 """Automatically remove database sessions at the end of the request, or
39 when the application shuts down"""
40 db_session.remove()
41
42
43 def get_source(sid):
44 """Return a Source object, representing the database row, for the source
45 with id `sid`"""
46 source = None
47 query = Source.query.filter(Source.filesystem_id == sid)
48 source = get_one_or_else(query, app.logger, abort)
49
50 return source
51
52
53 @app.before_request
54 def setup_g():
55 """Store commonly used values in Flask's special g object"""
56 if request.method == 'POST':
57 sid = request.form.get('sid')
58 if sid:
59 g.sid = sid
60 g.source = get_source(sid)
61
62
63 def get_docs(sid):
64 """Get docs associated with source id `sid`, sorted by submission date"""
65 docs = []
66 for filename in os.listdir(store.path(sid)):
67 os_stat = os.stat(store.path(sid, filename))
68 docs.append(dict(
69 name=filename,
70 date=datetime.fromtimestamp(os_stat.st_mtime),
71 size=os_stat.st_size,
72 ))
73 # sort in chronological order
74 docs.sort(key=lambda x: int(x['name'].split('-')[0]))
75 return docs
76
77
78 def make_star_true(sid):
79 source = get_source(sid)
80 if source.star:
81 source.star.starred = True
82 else:
83 source_star = SourceStar(source)
84 db_session.add(source_star)
85
86
87 def make_star_false(sid):
88 source = get_source(sid)
89 source.star.starred = False
90
91
92 @app.route('/col/add_star/<sid>', methods=('POST',))
93 def add_star(sid):
94 make_star_true(sid)
95 db_session.commit()
96 return redirect(url_for('index'))
97
98
99 @app.route("/col/remove_star/<sid>", methods=('POST',))
100 def remove_star(sid):
101 make_star_false(sid)
102 db_session.commit()
103 return redirect(url_for('index'))
104
105
106 @app.route('/')
107 def index():
108 unstarred = []
109 starred = []
110 for source in Source.query.filter_by(pending=False).order_by(Source.last_updated.desc()).all():
111 star = SourceStar.query.filter(SourceStar.source_id == source.id).first()
112 if star and star.starred:
113 starred.append(source)
114 else:
115 unstarred.append(source)
116 source.num_unread = len(
117 Submission.query.filter(Submission.source_id == source.id, Submission.downloaded == False).all())
118
119 return render_template('index.html', unstarred=unstarred, starred=starred)
120
121
122 @app.route('/col/<sid>')
123 def col(sid):
124 source = get_source(sid)
125 docs = get_docs(sid)
126 haskey = crypto_util.getkey(sid)
127 return render_template("col.html", sid=sid,
128 codename=source.journalist_designation, docs=docs, haskey=haskey,
129 flagged=source.flagged)
130
131
132 def delete_collection(source_id):
133 # Delete the source's collection of submissions
134 store.delete_source_directory(source_id)
135
136 # Delete the source's reply keypair
137 crypto_util.delete_reply_keypair(source_id)
138
139 # Delete their entry in the db
140 source = get_source(source_id)
141 db_session.delete(source)
142 db_session.commit()
143
144
145 @app.route('/col/process', methods=('POST',))
146 def col_process():
147 actions = {'delete': col_delete, 'star': col_star, 'un-star': col_un_star}
148 if 'cols_selected' not in request.form:
149 return redirect(url_for('index'))
150
151 cols_selected = request.form.getlist('cols_selected') # getlist is cgi.FieldStorage.getlist
152 action = request.form['action']
153
154 if action not in actions:
155 return abort(500)
156
157 method = actions[action]
158 return method(cols_selected)
159
160
161 def col_star(cols_selected):
162 for sid in cols_selected:
163 make_star_true(sid)
164
165 db_session.commit()
166 return redirect(url_for('index'))
167
168
169 def col_un_star(cols_selected):
170 for source_id in cols_selected:
171 make_star_false(source_id)
172
173 db_session.commit()
174 return redirect(url_for('index'))
175
176
177 @app.route('/col/delete/<sid>', methods=('POST',))
178 def col_delete_single(sid):
179 """deleting a single collection from its /col page"""
180 source = get_source(sid)
181 delete_collection(sid)
182 flash("%s's collection deleted" % (source.journalist_designation,), "notification")
183 return redirect(url_for('index'))
184
185
186 def col_delete(cols_selected):
187 """deleting multiple collections from the index"""
188 if len(cols_selected) < 1:
189 flash("No collections selected to delete!", "warning")
190 else:
191 for source_id in cols_selected:
192 delete_collection(source_id)
193 flash("%s %s deleted" % (
194 len(cols_selected),
195 "collection" if len(cols_selected) == 1 else "collections"
196 ), "notification")
197
198 return redirect(url_for('index'))
199
200
201 @app.route('/col/<sid>/<fn>')
202 def doc(sid, fn):
203 if '..' in fn or fn.startswith('/'):
204 abort(404)
205 try:
206 Submission.query.filter(Submission.filename == fn).one().downloaded = True
207 except NoResultFound as e:
208 app.logger.error("Could not mark " + fn + " as downloaded: %s" % (e,))
209 db_session.commit()
210 return send_file(store.path(sid, fn), mimetype="application/pgp-encrypted")
211
212
213 @app.route('/reply', methods=('POST',))
214 def reply():
215 msg = request.form['msg']
216 g.source.interaction_count += 1
217 filename = "{0}-reply.gpg".format(g.source.interaction_count)
218
219 crypto_util.encrypt(crypto_util.getkey(g.sid), msg, output=
220 store.path(g.sid, filename))
221
222 db_session.commit()
223 return render_template('reply.html', sid=g.sid,
224 codename=g.source.journalist_designation)
225
226
227 @app.route('/regenerate-code', methods=('POST',))
228 def generate_code():
229 g.source.journalist_designation = crypto_util.display_id()
230 db_session.commit()
231 return redirect('/col/' + g.sid)
232
233
234 @app.route('/download_unread/<sid>')
235 def download_unread(sid):
236 id = Source.query.filter(Source.filesystem_id == sid).one().id
237 docs = [doc.filename for doc in
238 Submission.query.filter(Submission.source_id == id, Submission.downloaded == False).all()]
239 return bulk_download(sid, docs)
240
241
242 @app.route('/bulk', methods=('POST',))
243 def bulk():
244 action = request.form['action']
245
246 doc_names_selected = request.form.getlist('doc_names_selected')
247 docs_selected = [
248 doc for doc in get_docs(g.sid) if doc['name'] in doc_names_selected]
249 filenames_selected = [
250 doc['name'] for doc in docs_selected]
251
252 if action == 'download':
253 return bulk_download(g.sid, filenames_selected)
254 elif action == 'delete':
255 return bulk_delete(g.sid, docs_selected)
256 else:
257 abort(400)
258
259
260 def bulk_delete(sid, docs_selected):
261 source = get_source(sid)
262 confirm_delete = bool(request.form.get('confirm_delete', False))
263 if confirm_delete:
264 for doc in docs_selected:
265 db_session.delete(Submission.query.filter(Submission.filename == doc['name']).one())
266 fn = store.path(sid, doc['name'])
267 store.secure_unlink(fn)
268 db_session.commit()
269 return render_template('delete.html', sid=sid,
270 codename=source.journalist_designation,
271 docs_selected=docs_selected, confirm_delete=confirm_delete)
272
273
274 def bulk_download(sid, docs_selected):
275 source = get_source(sid)
276 filenames = []
277 for doc in docs_selected:
278 filenames.append(store.path(sid, doc))
279 try:
280 Submission.query.filter(Submission.filename == doc).one().downloaded = True
281 except NoResultFound as e:
282 app.logger.error("Could not mark " + doc + " as downloaded: %s" % (e,))
283 db_session.commit()
284 zip = store.get_bulk_archive(filenames)
285 return send_file(zip.name, mimetype="application/zip",
286 attachment_filename=source.journalist_designation + ".zip",
287 as_attachment=True)
288
289
290 @app.route('/flag', methods=('POST',))
291 def flag():
292 g.source.flagged = True
293 db_session.commit()
294 return render_template('flag.html', sid=g.sid,
295 codename=g.source.journalist_designation)
296 def write_pidfile():
297 pid = str(os.getpid())
298 with open(config.JOURNALIST_PIDFILE, 'w') as fp:
299 fp.write(pid)
300
301 if __name__ == "__main__":
302 write_pidfile()
303 # TODO make sure debug=False in production
304 app.run(debug=True, host='0.0.0.0', port=8081)
305
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -123,6 +123,11 @@
def col(sid):
source = get_source(sid)
docs = get_docs(sid)
+ submissions = [submission.filename for submission in Submission.query.filter(Submission.source_id == source.id).all()]
+ # Only include documents loaded from the filesystem which are replies or which are also listed in the
+ # submissions table to avoid displaying partially uploaded files (#561).
+ docs = [doc for doc in docs if doc['name'] in submissions or doc['name'].endswith('reply.gpg')]
+
haskey = crypto_util.getkey(sid)
return render_template("col.html", sid=sid,
codename=source.journalist_designation, docs=docs, haskey=haskey,
| {"golden_diff": "diff --git a/securedrop/journalist.py b/securedrop/journalist.py\n--- a/securedrop/journalist.py\n+++ b/securedrop/journalist.py\n@@ -123,6 +123,11 @@\n def col(sid):\n source = get_source(sid)\n docs = get_docs(sid)\n+ submissions = [submission.filename for submission in Submission.query.filter(Submission.source_id == source.id).all()]\n+ # Only include documents loaded from the filesystem which are replies or which are also listed in the\n+ # submissions table to avoid displaying partially uploaded files (#561).\n+ docs = [doc for doc in docs if doc['name'] in submissions or doc['name'].endswith('reply.gpg')]\n+\n haskey = crypto_util.getkey(sid)\n return render_template(\"col.html\", sid=sid,\n codename=source.journalist_designation, docs=docs, haskey=haskey,\n", "issue": "Large files can be downloaded before the file is completely processed\nScenario:\n\nAs a source, upload a >250 MB file (something large)\nWhile this is uploading, from the journalist view, reload the page of \u201cfiles received from the source\u201d\nOnce the file shows up, immediately click on the filename in order to download it.\n\nResult:\n\nYou will download part of the encrypted file, but possibly not all of it. It can take a while for large files to be encrypted, and in the interim you will get confusing partial copies of the encrypted file.\n\nExpected behavior:\n\nWe should either get the whole file, or none of the file. (i.e.: it shouldn\u2019t show up if it is not in a state available for downloading).\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport config\nimport version\nimport crypto_util\nimport store\nfrom db import db_session, Source, Submission, SourceStar, get_one_or_else\n\nimport os\nfrom datetime import datetime\nfrom flask import (Flask, request, render_template, send_file, redirect, flash, url_for, g, abort)\nfrom flask_wtf.csrf import CsrfProtect\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nimport background\n\napp = Flask(__name__, template_folder=config.JOURNALIST_TEMPLATES_DIR)\napp.config.from_object(config.FlaskConfig)\nCsrfProtect(app)\n\napp.jinja_env.globals['version'] = version.__version__\nif getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\nelse:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n\[email protected]_filter('datetimeformat')\ndef _jinja2_datetimeformat(dt, fmt=None):\n \"\"\"Template filter for readable formatting of datetime.datetime\"\"\"\n fmt = fmt or '%b %d, %Y %I:%M %p'\n return dt.strftime(fmt)\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n\ndef get_source(sid):\n \"\"\"Return a Source object, representing the database row, for the source\n with id `sid`\"\"\"\n source = None\n query = Source.query.filter(Source.filesystem_id == sid)\n source = get_one_or_else(query, app.logger, abort)\n\n return source\n\n\[email protected]_request\ndef setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n if request.method == 'POST':\n sid = request.form.get('sid')\n if sid:\n g.sid = sid\n g.source = get_source(sid)\n\n\ndef get_docs(sid):\n \"\"\"Get docs associated with source id `sid`, sorted by submission date\"\"\"\n docs = []\n for filename in os.listdir(store.path(sid)):\n os_stat = os.stat(store.path(sid, filename))\n docs.append(dict(\n name=filename,\n date=datetime.fromtimestamp(os_stat.st_mtime),\n size=os_stat.st_size,\n ))\n # sort in chronological order\n docs.sort(key=lambda x: int(x['name'].split('-')[0]))\n return docs\n\n\ndef make_star_true(sid):\n source = get_source(sid)\n if source.star:\n source.star.starred = True\n else:\n source_star = SourceStar(source)\n db_session.add(source_star)\n\n\ndef make_star_false(sid):\n source = get_source(sid)\n source.star.starred = False\n\n\[email protected]('/col/add_star/<sid>', methods=('POST',))\ndef add_star(sid):\n make_star_true(sid)\n db_session.commit()\n return redirect(url_for('index'))\n\n\[email protected](\"/col/remove_star/<sid>\", methods=('POST',))\ndef remove_star(sid):\n make_star_false(sid)\n db_session.commit()\n return redirect(url_for('index'))\n\n\[email protected]('/')\ndef index():\n unstarred = []\n starred = []\n for source in Source.query.filter_by(pending=False).order_by(Source.last_updated.desc()).all():\n star = SourceStar.query.filter(SourceStar.source_id == source.id).first()\n if star and star.starred:\n starred.append(source)\n else:\n unstarred.append(source)\n source.num_unread = len(\n Submission.query.filter(Submission.source_id == source.id, Submission.downloaded == False).all())\n\n return render_template('index.html', unstarred=unstarred, starred=starred)\n\n\[email protected]('/col/<sid>')\ndef col(sid):\n source = get_source(sid)\n docs = get_docs(sid)\n haskey = crypto_util.getkey(sid)\n return render_template(\"col.html\", sid=sid,\n codename=source.journalist_designation, docs=docs, haskey=haskey,\n flagged=source.flagged)\n\n\ndef delete_collection(source_id):\n # Delete the source's collection of submissions\n store.delete_source_directory(source_id)\n\n # Delete the source's reply keypair\n crypto_util.delete_reply_keypair(source_id)\n\n # Delete their entry in the db\n source = get_source(source_id)\n db_session.delete(source)\n db_session.commit()\n\n\[email protected]('/col/process', methods=('POST',))\ndef col_process():\n actions = {'delete': col_delete, 'star': col_star, 'un-star': col_un_star}\n if 'cols_selected' not in request.form:\n return redirect(url_for('index'))\n\n cols_selected = request.form.getlist('cols_selected') # getlist is cgi.FieldStorage.getlist\n action = request.form['action']\n\n if action not in actions:\n return abort(500)\n\n method = actions[action]\n return method(cols_selected)\n\n\ndef col_star(cols_selected):\n for sid in cols_selected:\n make_star_true(sid)\n\n db_session.commit()\n return redirect(url_for('index'))\n\n\ndef col_un_star(cols_selected):\n for source_id in cols_selected:\n make_star_false(source_id)\n\n db_session.commit()\n return redirect(url_for('index'))\n\n\[email protected]('/col/delete/<sid>', methods=('POST',))\ndef col_delete_single(sid):\n \"\"\"deleting a single collection from its /col page\"\"\"\n source = get_source(sid)\n delete_collection(sid)\n flash(\"%s's collection deleted\" % (source.journalist_designation,), \"notification\")\n return redirect(url_for('index'))\n\n\ndef col_delete(cols_selected):\n \"\"\"deleting multiple collections from the index\"\"\"\n if len(cols_selected) < 1:\n flash(\"No collections selected to delete!\", \"warning\")\n else:\n for source_id in cols_selected:\n delete_collection(source_id)\n flash(\"%s %s deleted\" % (\n len(cols_selected),\n \"collection\" if len(cols_selected) == 1 else \"collections\"\n ), \"notification\")\n\n return redirect(url_for('index'))\n\n\[email protected]('/col/<sid>/<fn>')\ndef doc(sid, fn):\n if '..' in fn or fn.startswith('/'):\n abort(404)\n try:\n Submission.query.filter(Submission.filename == fn).one().downloaded = True\n except NoResultFound as e:\n app.logger.error(\"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n db_session.commit()\n return send_file(store.path(sid, fn), mimetype=\"application/pgp-encrypted\")\n\n\[email protected]('/reply', methods=('POST',))\ndef reply():\n msg = request.form['msg']\n g.source.interaction_count += 1\n filename = \"{0}-reply.gpg\".format(g.source.interaction_count)\n\n crypto_util.encrypt(crypto_util.getkey(g.sid), msg, output=\n store.path(g.sid, filename))\n\n db_session.commit()\n return render_template('reply.html', sid=g.sid,\n codename=g.source.journalist_designation)\n\n\[email protected]('/regenerate-code', methods=('POST',))\ndef generate_code():\n g.source.journalist_designation = crypto_util.display_id()\n db_session.commit()\n return redirect('/col/' + g.sid)\n\n\[email protected]('/download_unread/<sid>')\ndef download_unread(sid):\n id = Source.query.filter(Source.filesystem_id == sid).one().id\n docs = [doc.filename for doc in\n Submission.query.filter(Submission.source_id == id, Submission.downloaded == False).all()]\n return bulk_download(sid, docs)\n\n\[email protected]('/bulk', methods=('POST',))\ndef bulk():\n action = request.form['action']\n\n doc_names_selected = request.form.getlist('doc_names_selected')\n docs_selected = [\n doc for doc in get_docs(g.sid) if doc['name'] in doc_names_selected]\n filenames_selected = [\n doc['name'] for doc in docs_selected]\n\n if action == 'download':\n return bulk_download(g.sid, filenames_selected)\n elif action == 'delete':\n return bulk_delete(g.sid, docs_selected)\n else:\n abort(400)\n\n\ndef bulk_delete(sid, docs_selected):\n source = get_source(sid)\n confirm_delete = bool(request.form.get('confirm_delete', False))\n if confirm_delete:\n for doc in docs_selected:\n db_session.delete(Submission.query.filter(Submission.filename == doc['name']).one())\n fn = store.path(sid, doc['name'])\n store.secure_unlink(fn)\n db_session.commit()\n return render_template('delete.html', sid=sid,\n codename=source.journalist_designation,\n docs_selected=docs_selected, confirm_delete=confirm_delete)\n\n\ndef bulk_download(sid, docs_selected):\n source = get_source(sid)\n filenames = []\n for doc in docs_selected:\n filenames.append(store.path(sid, doc))\n try:\n Submission.query.filter(Submission.filename == doc).one().downloaded = True\n except NoResultFound as e:\n app.logger.error(\"Could not mark \" + doc + \" as downloaded: %s\" % (e,))\n db_session.commit()\n zip = store.get_bulk_archive(filenames)\n return send_file(zip.name, mimetype=\"application/zip\",\n attachment_filename=source.journalist_designation + \".zip\",\n as_attachment=True)\n\n\[email protected]('/flag', methods=('POST',))\ndef flag():\n g.source.flagged = True\n db_session.commit()\n return render_template('flag.html', sid=g.sid,\n codename=g.source.journalist_designation)\ndef write_pidfile():\n pid = str(os.getpid())\n with open(config.JOURNALIST_PIDFILE, 'w') as fp:\n fp.write(pid)\n\nif __name__ == \"__main__\":\n write_pidfile()\n # TODO make sure debug=False in production\n app.run(debug=True, host='0.0.0.0', port=8081)\n", "path": "securedrop/journalist.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport config\nimport version\nimport crypto_util\nimport store\nfrom db import db_session, Source, Submission, SourceStar, get_one_or_else\n\nimport os\nfrom datetime import datetime\nfrom flask import (Flask, request, render_template, send_file, redirect, flash, url_for, g, abort)\nfrom flask_wtf.csrf import CsrfProtect\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nimport background\n\napp = Flask(__name__, template_folder=config.JOURNALIST_TEMPLATES_DIR)\napp.config.from_object(config.FlaskConfig)\nCsrfProtect(app)\n\napp.jinja_env.globals['version'] = version.__version__\nif getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\nelse:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n\[email protected]_filter('datetimeformat')\ndef _jinja2_datetimeformat(dt, fmt=None):\n \"\"\"Template filter for readable formatting of datetime.datetime\"\"\"\n fmt = fmt or '%b %d, %Y %I:%M %p'\n return dt.strftime(fmt)\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n\ndef get_source(sid):\n \"\"\"Return a Source object, representing the database row, for the source\n with id `sid`\"\"\"\n source = None\n query = Source.query.filter(Source.filesystem_id == sid)\n source = get_one_or_else(query, app.logger, abort)\n\n return source\n\n\[email protected]_request\ndef setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n if request.method == 'POST':\n sid = request.form.get('sid')\n if sid:\n g.sid = sid\n g.source = get_source(sid)\n\n\ndef get_docs(sid):\n \"\"\"Get docs associated with source id `sid`, sorted by submission date\"\"\"\n docs = []\n for filename in os.listdir(store.path(sid)):\n os_stat = os.stat(store.path(sid, filename))\n docs.append(dict(\n name=filename,\n date=datetime.fromtimestamp(os_stat.st_mtime),\n size=os_stat.st_size,\n ))\n # sort in chronological order\n docs.sort(key=lambda x: int(x['name'].split('-')[0]))\n return docs\n\n\ndef make_star_true(sid):\n source = get_source(sid)\n if source.star:\n source.star.starred = True\n else:\n source_star = SourceStar(source)\n db_session.add(source_star)\n\n\ndef make_star_false(sid):\n source = get_source(sid)\n source.star.starred = False\n\n\[email protected]('/col/add_star/<sid>', methods=('POST',))\ndef add_star(sid):\n make_star_true(sid)\n db_session.commit()\n return redirect(url_for('index'))\n\n\[email protected](\"/col/remove_star/<sid>\", methods=('POST',))\ndef remove_star(sid):\n make_star_false(sid)\n db_session.commit()\n return redirect(url_for('index'))\n\n\[email protected]('/')\ndef index():\n unstarred = []\n starred = []\n for source in Source.query.filter_by(pending=False).order_by(Source.last_updated.desc()).all():\n star = SourceStar.query.filter(SourceStar.source_id == source.id).first()\n if star and star.starred:\n starred.append(source)\n else:\n unstarred.append(source)\n source.num_unread = len(\n Submission.query.filter(Submission.source_id == source.id, Submission.downloaded == False).all())\n\n return render_template('index.html', unstarred=unstarred, starred=starred)\n\n\[email protected]('/col/<sid>')\ndef col(sid):\n source = get_source(sid)\n docs = get_docs(sid)\n submissions = [submission.filename for submission in Submission.query.filter(Submission.source_id == source.id).all()]\n # Only include documents loaded from the filesystem which are replies or which are also listed in the\n # submissions table to avoid displaying partially uploaded files (#561).\n docs = [doc for doc in docs if doc['name'] in submissions or doc['name'].endswith('reply.gpg')]\n\n haskey = crypto_util.getkey(sid)\n return render_template(\"col.html\", sid=sid,\n codename=source.journalist_designation, docs=docs, haskey=haskey,\n flagged=source.flagged)\n\n\ndef delete_collection(source_id):\n # Delete the source's collection of submissions\n store.delete_source_directory(source_id)\n\n # Delete the source's reply keypair\n crypto_util.delete_reply_keypair(source_id)\n\n # Delete their entry in the db\n source = get_source(source_id)\n db_session.delete(source)\n db_session.commit()\n\n\[email protected]('/col/process', methods=('POST',))\ndef col_process():\n actions = {'delete': col_delete, 'star': col_star, 'un-star': col_un_star}\n if 'cols_selected' not in request.form:\n return redirect(url_for('index'))\n\n cols_selected = request.form.getlist('cols_selected') # getlist is cgi.FieldStorage.getlist\n action = request.form['action']\n\n if action not in actions:\n return abort(500)\n\n method = actions[action]\n return method(cols_selected)\n\n\ndef col_star(cols_selected):\n for sid in cols_selected:\n make_star_true(sid)\n\n db_session.commit()\n return redirect(url_for('index'))\n\n\ndef col_un_star(cols_selected):\n for source_id in cols_selected:\n make_star_false(source_id)\n\n db_session.commit()\n return redirect(url_for('index'))\n\n\[email protected]('/col/delete/<sid>', methods=('POST',))\ndef col_delete_single(sid):\n \"\"\"deleting a single collection from its /col page\"\"\"\n source = get_source(sid)\n delete_collection(sid)\n flash(\"%s's collection deleted\" % (source.journalist_designation,), \"notification\")\n return redirect(url_for('index'))\n\n\ndef col_delete(cols_selected):\n \"\"\"deleting multiple collections from the index\"\"\"\n if len(cols_selected) < 1:\n flash(\"No collections selected to delete!\", \"warning\")\n else:\n for source_id in cols_selected:\n delete_collection(source_id)\n flash(\"%s %s deleted\" % (\n len(cols_selected),\n \"collection\" if len(cols_selected) == 1 else \"collections\"\n ), \"notification\")\n\n return redirect(url_for('index'))\n\n\[email protected]('/col/<sid>/<fn>')\ndef doc(sid, fn):\n if '..' in fn or fn.startswith('/'):\n abort(404)\n try:\n Submission.query.filter(Submission.filename == fn).one().downloaded = True\n except NoResultFound as e:\n app.logger.error(\"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n db_session.commit()\n return send_file(store.path(sid, fn), mimetype=\"application/pgp-encrypted\")\n\n\[email protected]('/reply', methods=('POST',))\ndef reply():\n msg = request.form['msg']\n g.source.interaction_count += 1\n filename = \"{0}-reply.gpg\".format(g.source.interaction_count)\n\n crypto_util.encrypt(crypto_util.getkey(g.sid), msg, output=\n store.path(g.sid, filename))\n\n db_session.commit()\n return render_template('reply.html', sid=g.sid,\n codename=g.source.journalist_designation)\n\n\[email protected]('/regenerate-code', methods=('POST',))\ndef generate_code():\n g.source.journalist_designation = crypto_util.display_id()\n db_session.commit()\n return redirect('/col/' + g.sid)\n\n\[email protected]('/download_unread/<sid>')\ndef download_unread(sid):\n id = Source.query.filter(Source.filesystem_id == sid).one().id\n docs = [doc.filename for doc in\n Submission.query.filter(Submission.source_id == id, Submission.downloaded == False).all()]\n return bulk_download(sid, docs)\n\n\[email protected]('/bulk', methods=('POST',))\ndef bulk():\n action = request.form['action']\n\n doc_names_selected = request.form.getlist('doc_names_selected')\n docs_selected = [\n doc for doc in get_docs(g.sid) if doc['name'] in doc_names_selected]\n filenames_selected = [\n doc['name'] for doc in docs_selected]\n\n if action == 'download':\n return bulk_download(g.sid, filenames_selected)\n elif action == 'delete':\n return bulk_delete(g.sid, docs_selected)\n else:\n abort(400)\n\n\ndef bulk_delete(sid, docs_selected):\n source = get_source(sid)\n confirm_delete = bool(request.form.get('confirm_delete', False))\n if confirm_delete:\n for doc in docs_selected:\n db_session.delete(Submission.query.filter(Submission.filename == doc['name']).one())\n fn = store.path(sid, doc['name'])\n store.secure_unlink(fn)\n db_session.commit()\n return render_template('delete.html', sid=sid,\n codename=source.journalist_designation,\n docs_selected=docs_selected, confirm_delete=confirm_delete)\n\n\ndef bulk_download(sid, docs_selected):\n source = get_source(sid)\n filenames = []\n for doc in docs_selected:\n filenames.append(store.path(sid, doc))\n try:\n Submission.query.filter(Submission.filename == doc).one().downloaded = True\n except NoResultFound as e:\n app.logger.error(\"Could not mark \" + doc + \" as downloaded: %s\" % (e,))\n db_session.commit()\n zip = store.get_bulk_archive(filenames)\n return send_file(zip.name, mimetype=\"application/zip\",\n attachment_filename=source.journalist_designation + \".zip\",\n as_attachment=True)\n\n\[email protected]('/flag', methods=('POST',))\ndef flag():\n g.source.flagged = True\n db_session.commit()\n return render_template('flag.html', sid=g.sid,\n codename=g.source.journalist_designation)\ndef write_pidfile():\n pid = str(os.getpid())\n with open(config.JOURNALIST_PIDFILE, 'w') as fp:\n fp.write(pid)\n\nif __name__ == \"__main__\":\n write_pidfile()\n # TODO make sure debug=False in production\n app.run(debug=True, host='0.0.0.0', port=8081)\n", "path": "securedrop/journalist.py"}]} | 3,431 | 206 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.