problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_27450 | rasdani/github-patches | git_diff | yt-dlp__yt-dlp-3512 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Disable thumbnails conversion
### Checklist
- [X] I'm reporting a feature request
- [X] I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
- [X] I've verified that I'm running yt-dlp version **2022.03.08.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
### Description
When embedding thumbnails using `--embed-thumbnail`, they get automatically converted to `.png`
People like me would like to keep them in the original format. (i.e. `webp`), which is compatible with the `.mkv` container when inserted as attachment.
actual output
```
[ThumbnailsConvertor] Converting thumbnail "/path/to/thumbnail.webp" to png
[EmbedThumbnail] ffmpeg: Adding thumbnail to "/path/to/video.mkv"
```
expected output
```
[EmbedThumbnail] ffmpeg: Adding thumbnail to "/path/to/thumbnail.mkv"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/postprocessor/embedthumbnail.py`
Content:
```
1 import base64
2 import imghdr
3 import os
4 import re
5 import subprocess
6
7 from .common import PostProcessor
8 from .ffmpeg import FFmpegPostProcessor, FFmpegThumbnailsConvertorPP
9 from ..dependencies import mutagen
10 from ..utils import (
11 Popen,
12 PostProcessingError,
13 check_executable,
14 encodeArgument,
15 encodeFilename,
16 error_to_compat_str,
17 prepend_extension,
18 shell_quote,
19 )
20
21 if mutagen:
22 from mutagen.flac import FLAC, Picture
23 from mutagen.mp4 import MP4, MP4Cover
24 from mutagen.oggopus import OggOpus
25 from mutagen.oggvorbis import OggVorbis
26
27
28 class EmbedThumbnailPPError(PostProcessingError):
29 pass
30
31
32 class EmbedThumbnailPP(FFmpegPostProcessor):
33
34 def __init__(self, downloader=None, already_have_thumbnail=False):
35 FFmpegPostProcessor.__init__(self, downloader)
36 self._already_have_thumbnail = already_have_thumbnail
37
38 def _get_thumbnail_resolution(self, filename, thumbnail_dict):
39 def guess():
40 width, height = thumbnail_dict.get('width'), thumbnail_dict.get('height')
41 if width and height:
42 return width, height
43
44 try:
45 size_regex = r',\s*(?P<w>\d+)x(?P<h>\d+)\s*[,\[]'
46 size_result = self.run_ffmpeg(filename, None, ['-hide_banner'], expected_retcodes=(1,))
47 mobj = re.search(size_regex, size_result)
48 if mobj is None:
49 return guess()
50 except PostProcessingError as err:
51 self.report_warning('unable to find the thumbnail resolution; %s' % error_to_compat_str(err))
52 return guess()
53 return int(mobj.group('w')), int(mobj.group('h'))
54
55 def _report_run(self, exe, filename):
56 self.to_screen(f'{exe}: Adding thumbnail to "{filename}"')
57
58 @PostProcessor._restrict_to(images=False)
59 def run(self, info):
60 filename = info['filepath']
61 temp_filename = prepend_extension(filename, 'temp')
62
63 if not info.get('thumbnails'):
64 self.to_screen('There aren\'t any thumbnails to embed')
65 return [], info
66
67 idx = next((-i for i, t in enumerate(info['thumbnails'][::-1], 1) if t.get('filepath')), None)
68 if idx is None:
69 self.to_screen('There are no thumbnails on disk')
70 return [], info
71 thumbnail_filename = info['thumbnails'][idx]['filepath']
72 if not os.path.exists(encodeFilename(thumbnail_filename)):
73 self.report_warning('Skipping embedding the thumbnail because the file is missing.')
74 return [], info
75
76 # Correct extension for WebP file with wrong extension (see #25687, #25717)
77 convertor = FFmpegThumbnailsConvertorPP(self._downloader)
78 convertor.fixup_webp(info, idx)
79
80 original_thumbnail = thumbnail_filename = info['thumbnails'][idx]['filepath']
81
82 # Convert unsupported thumbnail formats to PNG (see #25687, #25717)
83 # Original behavior was to convert to JPG, but since JPG is a lossy
84 # format, there will be some additional data loss.
85 # PNG, on the other hand, is lossless.
86 thumbnail_ext = os.path.splitext(thumbnail_filename)[1][1:]
87 if thumbnail_ext not in ('jpg', 'jpeg', 'png'):
88 thumbnail_filename = convertor.convert_thumbnail(thumbnail_filename, 'png')
89 thumbnail_ext = 'png'
90
91 mtime = os.stat(encodeFilename(filename)).st_mtime
92
93 success = True
94 if info['ext'] == 'mp3':
95 options = [
96 '-c', 'copy', '-map', '0:0', '-map', '1:0', '-write_id3v1', '1', '-id3v2_version', '3',
97 '-metadata:s:v', 'title="Album cover"', '-metadata:s:v', 'comment="Cover (front)"']
98
99 self._report_run('ffmpeg', filename)
100 self.run_ffmpeg_multiple_files([filename, thumbnail_filename], temp_filename, options)
101
102 elif info['ext'] in ['mkv', 'mka']:
103 options = list(self.stream_copy_opts())
104
105 mimetype = 'image/%s' % ('png' if thumbnail_ext == 'png' else 'jpeg')
106 old_stream, new_stream = self.get_stream_number(
107 filename, ('tags', 'mimetype'), mimetype)
108 if old_stream is not None:
109 options.extend(['-map', '-0:%d' % old_stream])
110 new_stream -= 1
111 options.extend([
112 '-attach', thumbnail_filename,
113 '-metadata:s:%d' % new_stream, 'mimetype=%s' % mimetype,
114 '-metadata:s:%d' % new_stream, 'filename=cover.%s' % thumbnail_ext])
115
116 self._report_run('ffmpeg', filename)
117 self.run_ffmpeg(filename, temp_filename, options)
118
119 elif info['ext'] in ['m4a', 'mp4', 'mov']:
120 prefer_atomicparsley = 'embed-thumbnail-atomicparsley' in self.get_param('compat_opts', [])
121 # Method 1: Use mutagen
122 if not mutagen or prefer_atomicparsley:
123 success = False
124 else:
125 try:
126 self._report_run('mutagen', filename)
127 meta = MP4(filename)
128 # NOTE: the 'covr' atom is a non-standard MPEG-4 atom,
129 # Apple iTunes 'M4A' files include the 'moov.udta.meta.ilst' atom.
130 f = {'jpeg': MP4Cover.FORMAT_JPEG, 'png': MP4Cover.FORMAT_PNG}[imghdr.what(thumbnail_filename)]
131 with open(thumbnail_filename, 'rb') as thumbfile:
132 thumb_data = thumbfile.read()
133 meta.tags['covr'] = [MP4Cover(data=thumb_data, imageformat=f)]
134 meta.save()
135 temp_filename = filename
136 except Exception as err:
137 self.report_warning('unable to embed using mutagen; %s' % error_to_compat_str(err))
138 success = False
139
140 # Method 2: Use AtomicParsley
141 if not success:
142 success = True
143 atomicparsley = next((
144 x for x in ['AtomicParsley', 'atomicparsley']
145 if check_executable(x, ['-v'])), None)
146 if atomicparsley is None:
147 self.to_screen('Neither mutagen nor AtomicParsley was found. Falling back to ffmpeg')
148 success = False
149 else:
150 if not prefer_atomicparsley:
151 self.to_screen('mutagen was not found. Falling back to AtomicParsley')
152 cmd = [encodeFilename(atomicparsley, True),
153 encodeFilename(filename, True),
154 encodeArgument('--artwork'),
155 encodeFilename(thumbnail_filename, True),
156 encodeArgument('-o'),
157 encodeFilename(temp_filename, True)]
158 cmd += [encodeArgument(o) for o in self._configuration_args('AtomicParsley')]
159
160 self._report_run('atomicparsley', filename)
161 self.write_debug('AtomicParsley command line: %s' % shell_quote(cmd))
162 p = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
163 stdout, stderr = p.communicate_or_kill()
164 if p.returncode != 0:
165 msg = stderr.decode('utf-8', 'replace').strip()
166 self.report_warning(f'Unable to embed thumbnails using AtomicParsley; {msg}')
167 # for formats that don't support thumbnails (like 3gp) AtomicParsley
168 # won't create to the temporary file
169 if b'No changes' in stdout:
170 self.report_warning('The file format doesn\'t support embedding a thumbnail')
171 success = False
172
173 # Method 3: Use ffmpeg+ffprobe
174 # Thumbnails attached using this method doesn't show up as cover in some cases
175 # See https://github.com/yt-dlp/yt-dlp/issues/2125, https://github.com/yt-dlp/yt-dlp/issues/411
176 if not success:
177 success = True
178 try:
179 options = [*self.stream_copy_opts(), '-map', '1']
180
181 old_stream, new_stream = self.get_stream_number(
182 filename, ('disposition', 'attached_pic'), 1)
183 if old_stream is not None:
184 options.extend(['-map', '-0:%d' % old_stream])
185 new_stream -= 1
186 options.extend(['-disposition:%s' % new_stream, 'attached_pic'])
187
188 self._report_run('ffmpeg', filename)
189 self.run_ffmpeg_multiple_files([filename, thumbnail_filename], temp_filename, options)
190 except PostProcessingError as err:
191 success = False
192 raise EmbedThumbnailPPError(f'Unable to embed using ffprobe & ffmpeg; {err}')
193
194 elif info['ext'] in ['ogg', 'opus', 'flac']:
195 if not mutagen:
196 raise EmbedThumbnailPPError('module mutagen was not found. Please install using `python -m pip install mutagen`')
197
198 self._report_run('mutagen', filename)
199 f = {'opus': OggOpus, 'flac': FLAC, 'ogg': OggVorbis}[info['ext']](filename)
200
201 pic = Picture()
202 pic.mime = 'image/%s' % imghdr.what(thumbnail_filename)
203 with open(thumbnail_filename, 'rb') as thumbfile:
204 pic.data = thumbfile.read()
205 pic.type = 3 # front cover
206 res = self._get_thumbnail_resolution(thumbnail_filename, info['thumbnails'][idx])
207 if res is not None:
208 pic.width, pic.height = res
209
210 if info['ext'] == 'flac':
211 f.add_picture(pic)
212 else:
213 # https://wiki.xiph.org/VorbisComment#METADATA_BLOCK_PICTURE
214 f['METADATA_BLOCK_PICTURE'] = base64.b64encode(pic.write()).decode('ascii')
215 f.save()
216 temp_filename = filename
217
218 else:
219 raise EmbedThumbnailPPError('Supported filetypes for thumbnail embedding are: mp3, mkv/mka, ogg/opus/flac, m4a/mp4/mov')
220
221 if success and temp_filename != filename:
222 os.replace(temp_filename, filename)
223
224 self.try_utime(filename, mtime, mtime)
225
226 files_to_delete = [thumbnail_filename]
227 if self._already_have_thumbnail:
228 if original_thumbnail == thumbnail_filename:
229 files_to_delete = []
230 elif original_thumbnail != thumbnail_filename:
231 files_to_delete.append(original_thumbnail)
232 return files_to_delete, info
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt_dlp/postprocessor/embedthumbnail.py b/yt_dlp/postprocessor/embedthumbnail.py
--- a/yt_dlp/postprocessor/embedthumbnail.py
+++ b/yt_dlp/postprocessor/embedthumbnail.py
@@ -79,12 +79,10 @@
original_thumbnail = thumbnail_filename = info['thumbnails'][idx]['filepath']
- # Convert unsupported thumbnail formats to PNG (see #25687, #25717)
- # Original behavior was to convert to JPG, but since JPG is a lossy
- # format, there will be some additional data loss.
- # PNG, on the other hand, is lossless.
thumbnail_ext = os.path.splitext(thumbnail_filename)[1][1:]
- if thumbnail_ext not in ('jpg', 'jpeg', 'png'):
+ # Convert unsupported thumbnail formats (see #25687, #25717)
+ # PNG is preferred since JPEG is lossy
+ if info['ext'] not in ('mkv', 'mka') and thumbnail_ext not in ('jpg', 'jpeg', 'png'):
thumbnail_filename = convertor.convert_thumbnail(thumbnail_filename, 'png')
thumbnail_ext = 'png'
@@ -102,7 +100,7 @@
elif info['ext'] in ['mkv', 'mka']:
options = list(self.stream_copy_opts())
- mimetype = 'image/%s' % ('png' if thumbnail_ext == 'png' else 'jpeg')
+ mimetype = 'image/%s' % ('jpeg' if thumbnail_ext in ('jpg', 'jpeg') else thumbnail_ext)
old_stream, new_stream = self.get_stream_number(
filename, ('tags', 'mimetype'), mimetype)
if old_stream is not None:
| {"golden_diff": "diff --git a/yt_dlp/postprocessor/embedthumbnail.py b/yt_dlp/postprocessor/embedthumbnail.py\n--- a/yt_dlp/postprocessor/embedthumbnail.py\n+++ b/yt_dlp/postprocessor/embedthumbnail.py\n@@ -79,12 +79,10 @@\n \n original_thumbnail = thumbnail_filename = info['thumbnails'][idx]['filepath']\n \n- # Convert unsupported thumbnail formats to PNG (see #25687, #25717)\n- # Original behavior was to convert to JPG, but since JPG is a lossy\n- # format, there will be some additional data loss.\n- # PNG, on the other hand, is lossless.\n thumbnail_ext = os.path.splitext(thumbnail_filename)[1][1:]\n- if thumbnail_ext not in ('jpg', 'jpeg', 'png'):\n+ # Convert unsupported thumbnail formats (see #25687, #25717)\n+ # PNG is preferred since JPEG is lossy\n+ if info['ext'] not in ('mkv', 'mka') and thumbnail_ext not in ('jpg', 'jpeg', 'png'):\n thumbnail_filename = convertor.convert_thumbnail(thumbnail_filename, 'png')\n thumbnail_ext = 'png'\n \n@@ -102,7 +100,7 @@\n elif info['ext'] in ['mkv', 'mka']:\n options = list(self.stream_copy_opts())\n \n- mimetype = 'image/%s' % ('png' if thumbnail_ext == 'png' else 'jpeg')\n+ mimetype = 'image/%s' % ('jpeg' if thumbnail_ext in ('jpg', 'jpeg') else thumbnail_ext)\n old_stream, new_stream = self.get_stream_number(\n filename, ('tags', 'mimetype'), mimetype)\n if old_stream is not None:\n", "issue": "Disable thumbnails conversion\n### Checklist\r\n\r\n- [X] I'm reporting a feature request\r\n- [X] I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)\r\n- [X] I've verified that I'm running yt-dlp version **2022.03.08.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))\r\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\r\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\r\n\r\n### Description\r\n\r\nWhen embedding thumbnails using `--embed-thumbnail`, they get automatically converted to `.png`\r\nPeople like me would like to keep them in the original format. (i.e. `webp`), which is compatible with the `.mkv` container when inserted as attachment.\r\n\r\nactual output\r\n```\r\n[ThumbnailsConvertor] Converting thumbnail \"/path/to/thumbnail.webp\" to png\r\n[EmbedThumbnail] ffmpeg: Adding thumbnail to \"/path/to/video.mkv\"\r\n```\r\n\r\nexpected output\r\n```\r\n[EmbedThumbnail] ffmpeg: Adding thumbnail to \"/path/to/thumbnail.mkv\"\r\n```\n", "before_files": [{"content": "import base64\nimport imghdr\nimport os\nimport re\nimport subprocess\n\nfrom .common import PostProcessor\nfrom .ffmpeg import FFmpegPostProcessor, FFmpegThumbnailsConvertorPP\nfrom ..dependencies import mutagen\nfrom ..utils import (\n Popen,\n PostProcessingError,\n check_executable,\n encodeArgument,\n encodeFilename,\n error_to_compat_str,\n prepend_extension,\n shell_quote,\n)\n\nif mutagen:\n from mutagen.flac import FLAC, Picture\n from mutagen.mp4 import MP4, MP4Cover\n from mutagen.oggopus import OggOpus\n from mutagen.oggvorbis import OggVorbis\n\n\nclass EmbedThumbnailPPError(PostProcessingError):\n pass\n\n\nclass EmbedThumbnailPP(FFmpegPostProcessor):\n\n def __init__(self, downloader=None, already_have_thumbnail=False):\n FFmpegPostProcessor.__init__(self, downloader)\n self._already_have_thumbnail = already_have_thumbnail\n\n def _get_thumbnail_resolution(self, filename, thumbnail_dict):\n def guess():\n width, height = thumbnail_dict.get('width'), thumbnail_dict.get('height')\n if width and height:\n return width, height\n\n try:\n size_regex = r',\\s*(?P<w>\\d+)x(?P<h>\\d+)\\s*[,\\[]'\n size_result = self.run_ffmpeg(filename, None, ['-hide_banner'], expected_retcodes=(1,))\n mobj = re.search(size_regex, size_result)\n if mobj is None:\n return guess()\n except PostProcessingError as err:\n self.report_warning('unable to find the thumbnail resolution; %s' % error_to_compat_str(err))\n return guess()\n return int(mobj.group('w')), int(mobj.group('h'))\n\n def _report_run(self, exe, filename):\n self.to_screen(f'{exe}: Adding thumbnail to \"{filename}\"')\n\n @PostProcessor._restrict_to(images=False)\n def run(self, info):\n filename = info['filepath']\n temp_filename = prepend_extension(filename, 'temp')\n\n if not info.get('thumbnails'):\n self.to_screen('There aren\\'t any thumbnails to embed')\n return [], info\n\n idx = next((-i for i, t in enumerate(info['thumbnails'][::-1], 1) if t.get('filepath')), None)\n if idx is None:\n self.to_screen('There are no thumbnails on disk')\n return [], info\n thumbnail_filename = info['thumbnails'][idx]['filepath']\n if not os.path.exists(encodeFilename(thumbnail_filename)):\n self.report_warning('Skipping embedding the thumbnail because the file is missing.')\n return [], info\n\n # Correct extension for WebP file with wrong extension (see #25687, #25717)\n convertor = FFmpegThumbnailsConvertorPP(self._downloader)\n convertor.fixup_webp(info, idx)\n\n original_thumbnail = thumbnail_filename = info['thumbnails'][idx]['filepath']\n\n # Convert unsupported thumbnail formats to PNG (see #25687, #25717)\n # Original behavior was to convert to JPG, but since JPG is a lossy\n # format, there will be some additional data loss.\n # PNG, on the other hand, is lossless.\n thumbnail_ext = os.path.splitext(thumbnail_filename)[1][1:]\n if thumbnail_ext not in ('jpg', 'jpeg', 'png'):\n thumbnail_filename = convertor.convert_thumbnail(thumbnail_filename, 'png')\n thumbnail_ext = 'png'\n\n mtime = os.stat(encodeFilename(filename)).st_mtime\n\n success = True\n if info['ext'] == 'mp3':\n options = [\n '-c', 'copy', '-map', '0:0', '-map', '1:0', '-write_id3v1', '1', '-id3v2_version', '3',\n '-metadata:s:v', 'title=\"Album cover\"', '-metadata:s:v', 'comment=\"Cover (front)\"']\n\n self._report_run('ffmpeg', filename)\n self.run_ffmpeg_multiple_files([filename, thumbnail_filename], temp_filename, options)\n\n elif info['ext'] in ['mkv', 'mka']:\n options = list(self.stream_copy_opts())\n\n mimetype = 'image/%s' % ('png' if thumbnail_ext == 'png' else 'jpeg')\n old_stream, new_stream = self.get_stream_number(\n filename, ('tags', 'mimetype'), mimetype)\n if old_stream is not None:\n options.extend(['-map', '-0:%d' % old_stream])\n new_stream -= 1\n options.extend([\n '-attach', thumbnail_filename,\n '-metadata:s:%d' % new_stream, 'mimetype=%s' % mimetype,\n '-metadata:s:%d' % new_stream, 'filename=cover.%s' % thumbnail_ext])\n\n self._report_run('ffmpeg', filename)\n self.run_ffmpeg(filename, temp_filename, options)\n\n elif info['ext'] in ['m4a', 'mp4', 'mov']:\n prefer_atomicparsley = 'embed-thumbnail-atomicparsley' in self.get_param('compat_opts', [])\n # Method 1: Use mutagen\n if not mutagen or prefer_atomicparsley:\n success = False\n else:\n try:\n self._report_run('mutagen', filename)\n meta = MP4(filename)\n # NOTE: the 'covr' atom is a non-standard MPEG-4 atom,\n # Apple iTunes 'M4A' files include the 'moov.udta.meta.ilst' atom.\n f = {'jpeg': MP4Cover.FORMAT_JPEG, 'png': MP4Cover.FORMAT_PNG}[imghdr.what(thumbnail_filename)]\n with open(thumbnail_filename, 'rb') as thumbfile:\n thumb_data = thumbfile.read()\n meta.tags['covr'] = [MP4Cover(data=thumb_data, imageformat=f)]\n meta.save()\n temp_filename = filename\n except Exception as err:\n self.report_warning('unable to embed using mutagen; %s' % error_to_compat_str(err))\n success = False\n\n # Method 2: Use AtomicParsley\n if not success:\n success = True\n atomicparsley = next((\n x for x in ['AtomicParsley', 'atomicparsley']\n if check_executable(x, ['-v'])), None)\n if atomicparsley is None:\n self.to_screen('Neither mutagen nor AtomicParsley was found. Falling back to ffmpeg')\n success = False\n else:\n if not prefer_atomicparsley:\n self.to_screen('mutagen was not found. Falling back to AtomicParsley')\n cmd = [encodeFilename(atomicparsley, True),\n encodeFilename(filename, True),\n encodeArgument('--artwork'),\n encodeFilename(thumbnail_filename, True),\n encodeArgument('-o'),\n encodeFilename(temp_filename, True)]\n cmd += [encodeArgument(o) for o in self._configuration_args('AtomicParsley')]\n\n self._report_run('atomicparsley', filename)\n self.write_debug('AtomicParsley command line: %s' % shell_quote(cmd))\n p = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = p.communicate_or_kill()\n if p.returncode != 0:\n msg = stderr.decode('utf-8', 'replace').strip()\n self.report_warning(f'Unable to embed thumbnails using AtomicParsley; {msg}')\n # for formats that don't support thumbnails (like 3gp) AtomicParsley\n # won't create to the temporary file\n if b'No changes' in stdout:\n self.report_warning('The file format doesn\\'t support embedding a thumbnail')\n success = False\n\n # Method 3: Use ffmpeg+ffprobe\n # Thumbnails attached using this method doesn't show up as cover in some cases\n # See https://github.com/yt-dlp/yt-dlp/issues/2125, https://github.com/yt-dlp/yt-dlp/issues/411\n if not success:\n success = True\n try:\n options = [*self.stream_copy_opts(), '-map', '1']\n\n old_stream, new_stream = self.get_stream_number(\n filename, ('disposition', 'attached_pic'), 1)\n if old_stream is not None:\n options.extend(['-map', '-0:%d' % old_stream])\n new_stream -= 1\n options.extend(['-disposition:%s' % new_stream, 'attached_pic'])\n\n self._report_run('ffmpeg', filename)\n self.run_ffmpeg_multiple_files([filename, thumbnail_filename], temp_filename, options)\n except PostProcessingError as err:\n success = False\n raise EmbedThumbnailPPError(f'Unable to embed using ffprobe & ffmpeg; {err}')\n\n elif info['ext'] in ['ogg', 'opus', 'flac']:\n if not mutagen:\n raise EmbedThumbnailPPError('module mutagen was not found. Please install using `python -m pip install mutagen`')\n\n self._report_run('mutagen', filename)\n f = {'opus': OggOpus, 'flac': FLAC, 'ogg': OggVorbis}[info['ext']](filename)\n\n pic = Picture()\n pic.mime = 'image/%s' % imghdr.what(thumbnail_filename)\n with open(thumbnail_filename, 'rb') as thumbfile:\n pic.data = thumbfile.read()\n pic.type = 3 # front cover\n res = self._get_thumbnail_resolution(thumbnail_filename, info['thumbnails'][idx])\n if res is not None:\n pic.width, pic.height = res\n\n if info['ext'] == 'flac':\n f.add_picture(pic)\n else:\n # https://wiki.xiph.org/VorbisComment#METADATA_BLOCK_PICTURE\n f['METADATA_BLOCK_PICTURE'] = base64.b64encode(pic.write()).decode('ascii')\n f.save()\n temp_filename = filename\n\n else:\n raise EmbedThumbnailPPError('Supported filetypes for thumbnail embedding are: mp3, mkv/mka, ogg/opus/flac, m4a/mp4/mov')\n\n if success and temp_filename != filename:\n os.replace(temp_filename, filename)\n\n self.try_utime(filename, mtime, mtime)\n\n files_to_delete = [thumbnail_filename]\n if self._already_have_thumbnail:\n if original_thumbnail == thumbnail_filename:\n files_to_delete = []\n elif original_thumbnail != thumbnail_filename:\n files_to_delete.append(original_thumbnail)\n return files_to_delete, info\n", "path": "yt_dlp/postprocessor/embedthumbnail.py"}], "after_files": [{"content": "import base64\nimport imghdr\nimport os\nimport re\nimport subprocess\n\nfrom .common import PostProcessor\nfrom .ffmpeg import FFmpegPostProcessor, FFmpegThumbnailsConvertorPP\nfrom ..dependencies import mutagen\nfrom ..utils import (\n Popen,\n PostProcessingError,\n check_executable,\n encodeArgument,\n encodeFilename,\n error_to_compat_str,\n prepend_extension,\n shell_quote,\n)\n\nif mutagen:\n from mutagen.flac import FLAC, Picture\n from mutagen.mp4 import MP4, MP4Cover\n from mutagen.oggopus import OggOpus\n from mutagen.oggvorbis import OggVorbis\n\n\nclass EmbedThumbnailPPError(PostProcessingError):\n pass\n\n\nclass EmbedThumbnailPP(FFmpegPostProcessor):\n\n def __init__(self, downloader=None, already_have_thumbnail=False):\n FFmpegPostProcessor.__init__(self, downloader)\n self._already_have_thumbnail = already_have_thumbnail\n\n def _get_thumbnail_resolution(self, filename, thumbnail_dict):\n def guess():\n width, height = thumbnail_dict.get('width'), thumbnail_dict.get('height')\n if width and height:\n return width, height\n\n try:\n size_regex = r',\\s*(?P<w>\\d+)x(?P<h>\\d+)\\s*[,\\[]'\n size_result = self.run_ffmpeg(filename, None, ['-hide_banner'], expected_retcodes=(1,))\n mobj = re.search(size_regex, size_result)\n if mobj is None:\n return guess()\n except PostProcessingError as err:\n self.report_warning('unable to find the thumbnail resolution; %s' % error_to_compat_str(err))\n return guess()\n return int(mobj.group('w')), int(mobj.group('h'))\n\n def _report_run(self, exe, filename):\n self.to_screen(f'{exe}: Adding thumbnail to \"{filename}\"')\n\n @PostProcessor._restrict_to(images=False)\n def run(self, info):\n filename = info['filepath']\n temp_filename = prepend_extension(filename, 'temp')\n\n if not info.get('thumbnails'):\n self.to_screen('There aren\\'t any thumbnails to embed')\n return [], info\n\n idx = next((-i for i, t in enumerate(info['thumbnails'][::-1], 1) if t.get('filepath')), None)\n if idx is None:\n self.to_screen('There are no thumbnails on disk')\n return [], info\n thumbnail_filename = info['thumbnails'][idx]['filepath']\n if not os.path.exists(encodeFilename(thumbnail_filename)):\n self.report_warning('Skipping embedding the thumbnail because the file is missing.')\n return [], info\n\n # Correct extension for WebP file with wrong extension (see #25687, #25717)\n convertor = FFmpegThumbnailsConvertorPP(self._downloader)\n convertor.fixup_webp(info, idx)\n\n original_thumbnail = thumbnail_filename = info['thumbnails'][idx]['filepath']\n\n thumbnail_ext = os.path.splitext(thumbnail_filename)[1][1:]\n # Convert unsupported thumbnail formats (see #25687, #25717)\n # PNG is preferred since JPEG is lossy\n if info['ext'] not in ('mkv', 'mka') and thumbnail_ext not in ('jpg', 'jpeg', 'png'):\n thumbnail_filename = convertor.convert_thumbnail(thumbnail_filename, 'png')\n thumbnail_ext = 'png'\n\n mtime = os.stat(encodeFilename(filename)).st_mtime\n\n success = True\n if info['ext'] == 'mp3':\n options = [\n '-c', 'copy', '-map', '0:0', '-map', '1:0', '-write_id3v1', '1', '-id3v2_version', '3',\n '-metadata:s:v', 'title=\"Album cover\"', '-metadata:s:v', 'comment=\"Cover (front)\"']\n\n self._report_run('ffmpeg', filename)\n self.run_ffmpeg_multiple_files([filename, thumbnail_filename], temp_filename, options)\n\n elif info['ext'] in ['mkv', 'mka']:\n options = list(self.stream_copy_opts())\n\n mimetype = 'image/%s' % ('jpeg' if thumbnail_ext in ('jpg', 'jpeg') else thumbnail_ext)\n old_stream, new_stream = self.get_stream_number(\n filename, ('tags', 'mimetype'), mimetype)\n if old_stream is not None:\n options.extend(['-map', '-0:%d' % old_stream])\n new_stream -= 1\n options.extend([\n '-attach', thumbnail_filename,\n '-metadata:s:%d' % new_stream, 'mimetype=%s' % mimetype,\n '-metadata:s:%d' % new_stream, 'filename=cover.%s' % thumbnail_ext])\n\n self._report_run('ffmpeg', filename)\n self.run_ffmpeg(filename, temp_filename, options)\n\n elif info['ext'] in ['m4a', 'mp4', 'mov']:\n prefer_atomicparsley = 'embed-thumbnail-atomicparsley' in self.get_param('compat_opts', [])\n # Method 1: Use mutagen\n if not mutagen or prefer_atomicparsley:\n success = False\n else:\n try:\n self._report_run('mutagen', filename)\n meta = MP4(filename)\n # NOTE: the 'covr' atom is a non-standard MPEG-4 atom,\n # Apple iTunes 'M4A' files include the 'moov.udta.meta.ilst' atom.\n f = {'jpeg': MP4Cover.FORMAT_JPEG, 'png': MP4Cover.FORMAT_PNG}[imghdr.what(thumbnail_filename)]\n with open(thumbnail_filename, 'rb') as thumbfile:\n thumb_data = thumbfile.read()\n meta.tags['covr'] = [MP4Cover(data=thumb_data, imageformat=f)]\n meta.save()\n temp_filename = filename\n except Exception as err:\n self.report_warning('unable to embed using mutagen; %s' % error_to_compat_str(err))\n success = False\n\n # Method 2: Use AtomicParsley\n if not success:\n success = True\n atomicparsley = next((\n x for x in ['AtomicParsley', 'atomicparsley']\n if check_executable(x, ['-v'])), None)\n if atomicparsley is None:\n self.to_screen('Neither mutagen nor AtomicParsley was found. Falling back to ffmpeg')\n success = False\n else:\n if not prefer_atomicparsley:\n self.to_screen('mutagen was not found. Falling back to AtomicParsley')\n cmd = [encodeFilename(atomicparsley, True),\n encodeFilename(filename, True),\n encodeArgument('--artwork'),\n encodeFilename(thumbnail_filename, True),\n encodeArgument('-o'),\n encodeFilename(temp_filename, True)]\n cmd += [encodeArgument(o) for o in self._configuration_args('AtomicParsley')]\n\n self._report_run('atomicparsley', filename)\n self.write_debug('AtomicParsley command line: %s' % shell_quote(cmd))\n p = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = p.communicate_or_kill()\n if p.returncode != 0:\n msg = stderr.decode('utf-8', 'replace').strip()\n self.report_warning(f'Unable to embed thumbnails using AtomicParsley; {msg}')\n # for formats that don't support thumbnails (like 3gp) AtomicParsley\n # won't create to the temporary file\n if b'No changes' in stdout:\n self.report_warning('The file format doesn\\'t support embedding a thumbnail')\n success = False\n\n # Method 3: Use ffmpeg+ffprobe\n # Thumbnails attached using this method doesn't show up as cover in some cases\n # See https://github.com/yt-dlp/yt-dlp/issues/2125, https://github.com/yt-dlp/yt-dlp/issues/411\n if not success:\n success = True\n try:\n options = [*self.stream_copy_opts(), '-map', '1']\n\n old_stream, new_stream = self.get_stream_number(\n filename, ('disposition', 'attached_pic'), 1)\n if old_stream is not None:\n options.extend(['-map', '-0:%d' % old_stream])\n new_stream -= 1\n options.extend(['-disposition:%s' % new_stream, 'attached_pic'])\n\n self._report_run('ffmpeg', filename)\n self.run_ffmpeg_multiple_files([filename, thumbnail_filename], temp_filename, options)\n except PostProcessingError as err:\n success = False\n raise EmbedThumbnailPPError(f'Unable to embed using ffprobe & ffmpeg; {err}')\n\n elif info['ext'] in ['ogg', 'opus', 'flac']:\n if not mutagen:\n raise EmbedThumbnailPPError('module mutagen was not found. Please install using `python -m pip install mutagen`')\n\n self._report_run('mutagen', filename)\n f = {'opus': OggOpus, 'flac': FLAC, 'ogg': OggVorbis}[info['ext']](filename)\n\n pic = Picture()\n pic.mime = 'image/%s' % imghdr.what(thumbnail_filename)\n with open(thumbnail_filename, 'rb') as thumbfile:\n pic.data = thumbfile.read()\n pic.type = 3 # front cover\n res = self._get_thumbnail_resolution(thumbnail_filename, info['thumbnails'][idx])\n if res is not None:\n pic.width, pic.height = res\n\n if info['ext'] == 'flac':\n f.add_picture(pic)\n else:\n # https://wiki.xiph.org/VorbisComment#METADATA_BLOCK_PICTURE\n f['METADATA_BLOCK_PICTURE'] = base64.b64encode(pic.write()).decode('ascii')\n f.save()\n temp_filename = filename\n\n else:\n raise EmbedThumbnailPPError('Supported filetypes for thumbnail embedding are: mp3, mkv/mka, ogg/opus/flac, m4a/mp4/mov')\n\n if success and temp_filename != filename:\n os.replace(temp_filename, filename)\n\n self.try_utime(filename, mtime, mtime)\n\n files_to_delete = [thumbnail_filename]\n if self._already_have_thumbnail:\n if original_thumbnail == thumbnail_filename:\n files_to_delete = []\n elif original_thumbnail != thumbnail_filename:\n files_to_delete.append(original_thumbnail)\n return files_to_delete, info\n", "path": "yt_dlp/postprocessor/embedthumbnail.py"}]} | 3,472 | 392 |
gh_patches_debug_11753 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2802 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Prevent admin journalists from deleting themselves
To prevent lockouts and other forms of usability degradation, I propose one of two things:
1) a full ban on any admin from deleting themselves
2) an admin may not delete themselves if they are the last remaining admin
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/journalist_app/admin.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import os
4
5 from flask import (Blueprint, render_template, request, url_for, redirect, g,
6 current_app, flash, abort)
7 from flask_babel import gettext
8 from sqlalchemy.exc import IntegrityError
9 from sqlalchemy.orm.exc import NoResultFound
10
11 from db import (db_session, Journalist, InvalidUsernameException,
12 PasswordError)
13 from journalist_app.decorators import admin_required
14 from journalist_app.utils import (make_password, commit_account_changes,
15 set_diceware_password)
16 from journalist_app.forms import LogoForm, NewUserForm
17
18
19 def make_blueprint(config):
20 view = Blueprint('admin', __name__)
21
22 @view.route('/', methods=('GET', 'POST'))
23 @admin_required
24 def index():
25 users = Journalist.query.all()
26 return render_template("admin.html", users=users)
27
28 @view.route('/config', methods=('GET', 'POST'))
29 @admin_required
30 def manage_config():
31 form = LogoForm()
32 if form.validate_on_submit():
33 f = form.logo.data
34 static_filepath = os.path.join(config.SECUREDROP_ROOT,
35 "static/i/logo.png")
36 f.save(static_filepath)
37 flash(gettext("Image updated."), "notification")
38 return redirect(url_for("admin.manage_config"))
39 else:
40 return render_template("config.html", form=form)
41
42 @view.route('/add', methods=('GET', 'POST'))
43 @admin_required
44 def add_user():
45 form = NewUserForm()
46 if form.validate_on_submit():
47 form_valid = True
48 username = request.form['username']
49 password = request.form['password']
50 is_admin = bool(request.form.get('is_admin'))
51
52 try:
53 otp_secret = None
54 if request.form.get('is_hotp', False):
55 otp_secret = request.form.get('otp_secret', '')
56 new_user = Journalist(username=username,
57 password=password,
58 is_admin=is_admin,
59 otp_secret=otp_secret)
60 db_session.add(new_user)
61 db_session.commit()
62 except PasswordError:
63 flash(gettext(
64 'There was an error with the autogenerated password. '
65 'User not created. Please try again.'), 'error')
66 form_valid = False
67 except InvalidUsernameException as e:
68 form_valid = False
69 flash('Invalid username: ' + str(e), "error")
70 except IntegrityError as e:
71 db_session.rollback()
72 form_valid = False
73 if "UNIQUE constraint failed: journalists.username" in str(e):
74 flash(gettext("That username is already in use"),
75 "error")
76 else:
77 flash(gettext("An error occurred saving this user"
78 " to the database."
79 " Please inform your administrator."),
80 "error")
81 current_app.logger.error("Adding user "
82 "'{}' failed: {}".format(
83 username, e))
84
85 if form_valid:
86 return redirect(url_for('admin.new_user_two_factor',
87 uid=new_user.id))
88
89 return render_template("admin_add_user.html",
90 password=make_password(config),
91 form=form)
92
93 @view.route('/2fa', methods=('GET', 'POST'))
94 @admin_required
95 def new_user_two_factor():
96 user = Journalist.query.get(request.args['uid'])
97
98 if request.method == 'POST':
99 token = request.form['token']
100 if user.verify_token(token):
101 flash(gettext(
102 "Token in two-factor authentication "
103 "accepted for user {user}.").format(
104 user=user.username),
105 "notification")
106 return redirect(url_for("admin.index"))
107 else:
108 flash(gettext(
109 "Could not verify token in two-factor authentication."),
110 "error")
111
112 return render_template("admin_new_user_two_factor.html", user=user)
113
114 @view.route('/reset-2fa-totp', methods=['POST'])
115 @admin_required
116 def reset_two_factor_totp():
117 uid = request.form['uid']
118 user = Journalist.query.get(uid)
119 user.is_totp = True
120 user.regenerate_totp_shared_secret()
121 db_session.commit()
122 return redirect(url_for('admin.new_user_two_factor', uid=uid))
123
124 @view.route('/reset-2fa-hotp', methods=['POST'])
125 @admin_required
126 def reset_two_factor_hotp():
127 uid = request.form['uid']
128 otp_secret = request.form.get('otp_secret', None)
129 if otp_secret:
130 user = Journalist.query.get(uid)
131 try:
132 user.set_hotp_secret(otp_secret)
133 except TypeError as e:
134 if "Non-hexadecimal digit found" in str(e):
135 flash(gettext(
136 "Invalid secret format: "
137 "please only submit letters A-F and numbers 0-9."),
138 "error")
139 elif "Odd-length string" in str(e):
140 flash(gettext(
141 "Invalid secret format: "
142 "odd-length secret. Did you mistype the secret?"),
143 "error")
144 else:
145 flash(gettext(
146 "An unexpected error occurred! "
147 "Please inform your administrator."), "error")
148 current_app.logger.error(
149 "set_hotp_secret '{}' (id {}) failed: {}".format(
150 otp_secret, uid, e))
151 return render_template('admin_edit_hotp_secret.html', uid=uid)
152 else:
153 db_session.commit()
154 return redirect(url_for('admin.new_user_two_factor', uid=uid))
155 else:
156 return render_template('admin_edit_hotp_secret.html', uid=uid)
157
158 @view.route('/edit/<int:user_id>', methods=('GET', 'POST'))
159 @admin_required
160 def edit_user(user_id):
161 user = Journalist.query.get(user_id)
162
163 if request.method == 'POST':
164 if request.form.get('username', None):
165 new_username = request.form['username']
166
167 try:
168 Journalist.check_username_acceptable(new_username)
169 except InvalidUsernameException as e:
170 flash('Invalid username: ' + str(e), 'error')
171 return redirect(url_for("admin.edit_user",
172 user_id=user_id))
173
174 if new_username == user.username:
175 pass
176 elif Journalist.query.filter_by(
177 username=new_username).one_or_none():
178 flash(gettext(
179 'Username "{user}" already taken.').format(
180 user=new_username),
181 "error")
182 return redirect(url_for("admin.edit_user",
183 user_id=user_id))
184 else:
185 user.username = new_username
186
187 user.is_admin = bool(request.form.get('is_admin'))
188
189 commit_account_changes(user)
190
191 password = make_password(config)
192 return render_template("edit_account.html", user=user,
193 password=password)
194
195 @view.route('/edit/<int:user_id>/new-password', methods=('POST',))
196 @admin_required
197 def set_password(user_id):
198 try:
199 user = Journalist.query.get(user_id)
200 except NoResultFound:
201 abort(404)
202
203 password = request.form.get('password')
204 set_diceware_password(user, password)
205 return redirect(url_for('admin.edit_user', user_id=user_id))
206
207 @view.route('/delete/<int:user_id>', methods=('POST',))
208 @admin_required
209 def delete_user(user_id):
210 user = Journalist.query.get(user_id)
211 if user:
212 db_session.delete(user)
213 db_session.commit()
214 flash(gettext("Deleted user '{user}'").format(
215 user=user.username), "notification")
216 else:
217 current_app.logger.error(
218 "Admin {} tried to delete nonexistent user with pk={}".format(
219 g.user.username, user_id))
220 abort(404)
221
222 return redirect(url_for('admin.index'))
223
224 @view.route('/edit/<int:user_id>/new-password', methods=('POST',))
225 @admin_required
226 def new_password(user_id):
227 try:
228 user = Journalist.query.get(user_id)
229 except NoResultFound:
230 abort(404)
231
232 password = request.form.get('password')
233 set_diceware_password(user, password)
234 return redirect(url_for('admin.edit_user', user_id=user_id))
235
236 @view.route('/ossec-test')
237 @admin_required
238 def ossec_test():
239 current_app.logger.error('This is a test OSSEC alert')
240 flash('Test alert sent. Check your email.', 'notification')
241 return redirect(url_for('admin.manage_config'))
242
243 return view
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/journalist_app/admin.py b/securedrop/journalist_app/admin.py
--- a/securedrop/journalist_app/admin.py
+++ b/securedrop/journalist_app/admin.py
@@ -208,7 +208,13 @@
@admin_required
def delete_user(user_id):
user = Journalist.query.get(user_id)
- if user:
+ if user_id == g.user.id:
+ # Do not flash because the interface already has safe guards.
+ # It can only happen by manually crafting a POST request
+ current_app.logger.error(
+ "Admin {} tried to delete itself".format(g.user.username))
+ abort(403)
+ elif user:
db_session.delete(user)
db_session.commit()
flash(gettext("Deleted user '{user}'").format(
| {"golden_diff": "diff --git a/securedrop/journalist_app/admin.py b/securedrop/journalist_app/admin.py\n--- a/securedrop/journalist_app/admin.py\n+++ b/securedrop/journalist_app/admin.py\n@@ -208,7 +208,13 @@\n @admin_required\n def delete_user(user_id):\n user = Journalist.query.get(user_id)\n- if user:\n+ if user_id == g.user.id:\n+ # Do not flash because the interface already has safe guards.\n+ # It can only happen by manually crafting a POST request\n+ current_app.logger.error(\n+ \"Admin {} tried to delete itself\".format(g.user.username))\n+ abort(403)\n+ elif user:\n db_session.delete(user)\n db_session.commit()\n flash(gettext(\"Deleted user '{user}'\").format(\n", "issue": "Prevent admin journalists from deleting themselves\nTo prevent lockouts and other forms of usability degradation, I propose one of two things:\r\n1) a full ban on any admin from deleting themselves\r\n2) an admin may not delete themselves if they are the last remaining admin\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\n\nfrom flask import (Blueprint, render_template, request, url_for, redirect, g,\n current_app, flash, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom db import (db_session, Journalist, InvalidUsernameException,\n PasswordError)\nfrom journalist_app.decorators import admin_required\nfrom journalist_app.utils import (make_password, commit_account_changes,\n set_diceware_password)\nfrom journalist_app.forms import LogoForm, NewUserForm\n\n\ndef make_blueprint(config):\n view = Blueprint('admin', __name__)\n\n @view.route('/', methods=('GET', 'POST'))\n @admin_required\n def index():\n users = Journalist.query.all()\n return render_template(\"admin.html\", users=users)\n\n @view.route('/config', methods=('GET', 'POST'))\n @admin_required\n def manage_config():\n form = LogoForm()\n if form.validate_on_submit():\n f = form.logo.data\n static_filepath = os.path.join(config.SECUREDROP_ROOT,\n \"static/i/logo.png\")\n f.save(static_filepath)\n flash(gettext(\"Image updated.\"), \"notification\")\n return redirect(url_for(\"admin.manage_config\"))\n else:\n return render_template(\"config.html\", form=form)\n\n @view.route('/add', methods=('GET', 'POST'))\n @admin_required\n def add_user():\n form = NewUserForm()\n if form.validate_on_submit():\n form_valid = True\n username = request.form['username']\n password = request.form['password']\n is_admin = bool(request.form.get('is_admin'))\n\n try:\n otp_secret = None\n if request.form.get('is_hotp', False):\n otp_secret = request.form.get('otp_secret', '')\n new_user = Journalist(username=username,\n password=password,\n is_admin=is_admin,\n otp_secret=otp_secret)\n db_session.add(new_user)\n db_session.commit()\n except PasswordError:\n flash(gettext(\n 'There was an error with the autogenerated password. '\n 'User not created. Please try again.'), 'error')\n form_valid = False\n except InvalidUsernameException as e:\n form_valid = False\n flash('Invalid username: ' + str(e), \"error\")\n except IntegrityError as e:\n db_session.rollback()\n form_valid = False\n if \"UNIQUE constraint failed: journalists.username\" in str(e):\n flash(gettext(\"That username is already in use\"),\n \"error\")\n else:\n flash(gettext(\"An error occurred saving this user\"\n \" to the database.\"\n \" Please inform your administrator.\"),\n \"error\")\n current_app.logger.error(\"Adding user \"\n \"'{}' failed: {}\".format(\n username, e))\n\n if form_valid:\n return redirect(url_for('admin.new_user_two_factor',\n uid=new_user.id))\n\n return render_template(\"admin_add_user.html\",\n password=make_password(config),\n form=form)\n\n @view.route('/2fa', methods=('GET', 'POST'))\n @admin_required\n def new_user_two_factor():\n user = Journalist.query.get(request.args['uid'])\n\n if request.method == 'POST':\n token = request.form['token']\n if user.verify_token(token):\n flash(gettext(\n \"Token in two-factor authentication \"\n \"accepted for user {user}.\").format(\n user=user.username),\n \"notification\")\n return redirect(url_for(\"admin.index\"))\n else:\n flash(gettext(\n \"Could not verify token in two-factor authentication.\"),\n \"error\")\n\n return render_template(\"admin_new_user_two_factor.html\", user=user)\n\n @view.route('/reset-2fa-totp', methods=['POST'])\n @admin_required\n def reset_two_factor_totp():\n uid = request.form['uid']\n user = Journalist.query.get(uid)\n user.is_totp = True\n user.regenerate_totp_shared_secret()\n db_session.commit()\n return redirect(url_for('admin.new_user_two_factor', uid=uid))\n\n @view.route('/reset-2fa-hotp', methods=['POST'])\n @admin_required\n def reset_two_factor_hotp():\n uid = request.form['uid']\n otp_secret = request.form.get('otp_secret', None)\n if otp_secret:\n user = Journalist.query.get(uid)\n try:\n user.set_hotp_secret(otp_secret)\n except TypeError as e:\n if \"Non-hexadecimal digit found\" in str(e):\n flash(gettext(\n \"Invalid secret format: \"\n \"please only submit letters A-F and numbers 0-9.\"),\n \"error\")\n elif \"Odd-length string\" in str(e):\n flash(gettext(\n \"Invalid secret format: \"\n \"odd-length secret. Did you mistype the secret?\"),\n \"error\")\n else:\n flash(gettext(\n \"An unexpected error occurred! \"\n \"Please inform your administrator.\"), \"error\")\n current_app.logger.error(\n \"set_hotp_secret '{}' (id {}) failed: {}\".format(\n otp_secret, uid, e))\n return render_template('admin_edit_hotp_secret.html', uid=uid)\n else:\n db_session.commit()\n return redirect(url_for('admin.new_user_two_factor', uid=uid))\n else:\n return render_template('admin_edit_hotp_secret.html', uid=uid)\n\n @view.route('/edit/<int:user_id>', methods=('GET', 'POST'))\n @admin_required\n def edit_user(user_id):\n user = Journalist.query.get(user_id)\n\n if request.method == 'POST':\n if request.form.get('username', None):\n new_username = request.form['username']\n\n try:\n Journalist.check_username_acceptable(new_username)\n except InvalidUsernameException as e:\n flash('Invalid username: ' + str(e), 'error')\n return redirect(url_for(\"admin.edit_user\",\n user_id=user_id))\n\n if new_username == user.username:\n pass\n elif Journalist.query.filter_by(\n username=new_username).one_or_none():\n flash(gettext(\n 'Username \"{user}\" already taken.').format(\n user=new_username),\n \"error\")\n return redirect(url_for(\"admin.edit_user\",\n user_id=user_id))\n else:\n user.username = new_username\n\n user.is_admin = bool(request.form.get('is_admin'))\n\n commit_account_changes(user)\n\n password = make_password(config)\n return render_template(\"edit_account.html\", user=user,\n password=password)\n\n @view.route('/edit/<int:user_id>/new-password', methods=('POST',))\n @admin_required\n def set_password(user_id):\n try:\n user = Journalist.query.get(user_id)\n except NoResultFound:\n abort(404)\n\n password = request.form.get('password')\n set_diceware_password(user, password)\n return redirect(url_for('admin.edit_user', user_id=user_id))\n\n @view.route('/delete/<int:user_id>', methods=('POST',))\n @admin_required\n def delete_user(user_id):\n user = Journalist.query.get(user_id)\n if user:\n db_session.delete(user)\n db_session.commit()\n flash(gettext(\"Deleted user '{user}'\").format(\n user=user.username), \"notification\")\n else:\n current_app.logger.error(\n \"Admin {} tried to delete nonexistent user with pk={}\".format(\n g.user.username, user_id))\n abort(404)\n\n return redirect(url_for('admin.index'))\n\n @view.route('/edit/<int:user_id>/new-password', methods=('POST',))\n @admin_required\n def new_password(user_id):\n try:\n user = Journalist.query.get(user_id)\n except NoResultFound:\n abort(404)\n\n password = request.form.get('password')\n set_diceware_password(user, password)\n return redirect(url_for('admin.edit_user', user_id=user_id))\n\n @view.route('/ossec-test')\n @admin_required\n def ossec_test():\n current_app.logger.error('This is a test OSSEC alert')\n flash('Test alert sent. Check your email.', 'notification')\n return redirect(url_for('admin.manage_config'))\n\n return view\n", "path": "securedrop/journalist_app/admin.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\n\nfrom flask import (Blueprint, render_template, request, url_for, redirect, g,\n current_app, flash, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom db import (db_session, Journalist, InvalidUsernameException,\n PasswordError)\nfrom journalist_app.decorators import admin_required\nfrom journalist_app.utils import (make_password, commit_account_changes,\n set_diceware_password)\nfrom journalist_app.forms import LogoForm, NewUserForm\n\n\ndef make_blueprint(config):\n view = Blueprint('admin', __name__)\n\n @view.route('/', methods=('GET', 'POST'))\n @admin_required\n def index():\n users = Journalist.query.all()\n return render_template(\"admin.html\", users=users)\n\n @view.route('/config', methods=('GET', 'POST'))\n @admin_required\n def manage_config():\n form = LogoForm()\n if form.validate_on_submit():\n f = form.logo.data\n static_filepath = os.path.join(config.SECUREDROP_ROOT,\n \"static/i/logo.png\")\n f.save(static_filepath)\n flash(gettext(\"Image updated.\"), \"notification\")\n return redirect(url_for(\"admin.manage_config\"))\n else:\n return render_template(\"config.html\", form=form)\n\n @view.route('/add', methods=('GET', 'POST'))\n @admin_required\n def add_user():\n form = NewUserForm()\n if form.validate_on_submit():\n form_valid = True\n username = request.form['username']\n password = request.form['password']\n is_admin = bool(request.form.get('is_admin'))\n\n try:\n otp_secret = None\n if request.form.get('is_hotp', False):\n otp_secret = request.form.get('otp_secret', '')\n new_user = Journalist(username=username,\n password=password,\n is_admin=is_admin,\n otp_secret=otp_secret)\n db_session.add(new_user)\n db_session.commit()\n except PasswordError:\n flash(gettext(\n 'There was an error with the autogenerated password. '\n 'User not created. Please try again.'), 'error')\n form_valid = False\n except InvalidUsernameException as e:\n form_valid = False\n flash('Invalid username: ' + str(e), \"error\")\n except IntegrityError as e:\n db_session.rollback()\n form_valid = False\n if \"UNIQUE constraint failed: journalists.username\" in str(e):\n flash(gettext(\"That username is already in use\"),\n \"error\")\n else:\n flash(gettext(\"An error occurred saving this user\"\n \" to the database.\"\n \" Please inform your administrator.\"),\n \"error\")\n current_app.logger.error(\"Adding user \"\n \"'{}' failed: {}\".format(\n username, e))\n\n if form_valid:\n return redirect(url_for('admin.new_user_two_factor',\n uid=new_user.id))\n\n return render_template(\"admin_add_user.html\",\n password=make_password(config),\n form=form)\n\n @view.route('/2fa', methods=('GET', 'POST'))\n @admin_required\n def new_user_two_factor():\n user = Journalist.query.get(request.args['uid'])\n\n if request.method == 'POST':\n token = request.form['token']\n if user.verify_token(token):\n flash(gettext(\n \"Token in two-factor authentication \"\n \"accepted for user {user}.\").format(\n user=user.username),\n \"notification\")\n return redirect(url_for(\"admin.index\"))\n else:\n flash(gettext(\n \"Could not verify token in two-factor authentication.\"),\n \"error\")\n\n return render_template(\"admin_new_user_two_factor.html\", user=user)\n\n @view.route('/reset-2fa-totp', methods=['POST'])\n @admin_required\n def reset_two_factor_totp():\n uid = request.form['uid']\n user = Journalist.query.get(uid)\n user.is_totp = True\n user.regenerate_totp_shared_secret()\n db_session.commit()\n return redirect(url_for('admin.new_user_two_factor', uid=uid))\n\n @view.route('/reset-2fa-hotp', methods=['POST'])\n @admin_required\n def reset_two_factor_hotp():\n uid = request.form['uid']\n otp_secret = request.form.get('otp_secret', None)\n if otp_secret:\n user = Journalist.query.get(uid)\n try:\n user.set_hotp_secret(otp_secret)\n except TypeError as e:\n if \"Non-hexadecimal digit found\" in str(e):\n flash(gettext(\n \"Invalid secret format: \"\n \"please only submit letters A-F and numbers 0-9.\"),\n \"error\")\n elif \"Odd-length string\" in str(e):\n flash(gettext(\n \"Invalid secret format: \"\n \"odd-length secret. Did you mistype the secret?\"),\n \"error\")\n else:\n flash(gettext(\n \"An unexpected error occurred! \"\n \"Please inform your administrator.\"), \"error\")\n current_app.logger.error(\n \"set_hotp_secret '{}' (id {}) failed: {}\".format(\n otp_secret, uid, e))\n return render_template('admin_edit_hotp_secret.html', uid=uid)\n else:\n db_session.commit()\n return redirect(url_for('admin.new_user_two_factor', uid=uid))\n else:\n return render_template('admin_edit_hotp_secret.html', uid=uid)\n\n @view.route('/edit/<int:user_id>', methods=('GET', 'POST'))\n @admin_required\n def edit_user(user_id):\n user = Journalist.query.get(user_id)\n\n if request.method == 'POST':\n if request.form.get('username', None):\n new_username = request.form['username']\n\n try:\n Journalist.check_username_acceptable(new_username)\n except InvalidUsernameException as e:\n flash('Invalid username: ' + str(e), 'error')\n return redirect(url_for(\"admin.edit_user\",\n user_id=user_id))\n\n if new_username == user.username:\n pass\n elif Journalist.query.filter_by(\n username=new_username).one_or_none():\n flash(gettext(\n 'Username \"{user}\" already taken.').format(\n user=new_username),\n \"error\")\n return redirect(url_for(\"admin.edit_user\",\n user_id=user_id))\n else:\n user.username = new_username\n\n user.is_admin = bool(request.form.get('is_admin'))\n\n commit_account_changes(user)\n\n password = make_password(config)\n return render_template(\"edit_account.html\", user=user,\n password=password)\n\n @view.route('/edit/<int:user_id>/new-password', methods=('POST',))\n @admin_required\n def set_password(user_id):\n try:\n user = Journalist.query.get(user_id)\n except NoResultFound:\n abort(404)\n\n password = request.form.get('password')\n set_diceware_password(user, password)\n return redirect(url_for('admin.edit_user', user_id=user_id))\n\n @view.route('/delete/<int:user_id>', methods=('POST',))\n @admin_required\n def delete_user(user_id):\n user = Journalist.query.get(user_id)\n if user_id == g.user.id:\n # Do not flash because the interface already has safe guards.\n # It can only happen by manually crafting a POST request\n current_app.logger.error(\n \"Admin {} tried to delete itself\".format(g.user.username))\n abort(403)\n elif user:\n db_session.delete(user)\n db_session.commit()\n flash(gettext(\"Deleted user '{user}'\").format(\n user=user.username), \"notification\")\n else:\n current_app.logger.error(\n \"Admin {} tried to delete nonexistent user with pk={}\".format(\n g.user.username, user_id))\n abort(404)\n\n return redirect(url_for('admin.index'))\n\n @view.route('/edit/<int:user_id>/new-password', methods=('POST',))\n @admin_required\n def new_password(user_id):\n try:\n user = Journalist.query.get(user_id)\n except NoResultFound:\n abort(404)\n\n password = request.form.get('password')\n set_diceware_password(user, password)\n return redirect(url_for('admin.edit_user', user_id=user_id))\n\n @view.route('/ossec-test')\n @admin_required\n def ossec_test():\n current_app.logger.error('This is a test OSSEC alert')\n flash('Test alert sent. Check your email.', 'notification')\n return redirect(url_for('admin.manage_config'))\n\n return view\n", "path": "securedrop/journalist_app/admin.py"}]} | 2,724 | 187 |
gh_patches_debug_7451 | rasdani/github-patches | git_diff | searxng__searxng-1934 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SearXNG Firefox user-agent doesn't match real Firefox
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
2022.10.29-fc9986de
**How did you install SearXNG?**
Script
**What happened?**
SearXNG will send a user-agent such as:
`Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:106.0.2) Gecko/20100101 Firefox/106.0.2`
However, real Firefox looks like this:
`Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:106.0) Gecko/20100101 Firefox/106.0`
Firefox does not send servers the patch version the user is running.
**How To Reproduce**
See https://github.com/searxng/searxng/blob/fc9986de0a21b01c1c8e342a9c4c8c6b96cc7ea2/searx/utils.py#L69-L74 and https://github.com/searxng/searxng/blob/fc9986de0a21b01c1c8e342a9c4c8c6b96cc7ea2/searx/data/useragents.json
To confirm Firefox's behavior, use a Firefox version with non-zero patch version and check its user-agent.
**Expected behavior**
SearXNG creates a User-Agent header that matches the format of the browser it is pretending to be.
**Additional context**
Firefox seems to have stopped sending the patch version with the release of Firefox 16: https://web.archive.org/web/20181123134532/https://www.fxsitecompat.com/en-CA/docs/2012/ua-string-no-longer-contains-patch-level-version-number/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searxng_extra/update/update_firefox_version.py`
Content:
```
1 #!/usr/bin/env python
2 # lint: pylint
3 # SPDX-License-Identifier: AGPL-3.0-or-later
4
5 """Fetch firefox useragent signatures
6
7 Output file: :origin:`searx/data/useragents.json` (:origin:`CI Update data ...
8 <.github/workflows/data-update.yml>`).
9
10 """
11
12 import json
13 import re
14 from os.path import join
15 from urllib.parse import urlparse, urljoin
16 from distutils.version import LooseVersion # pylint: disable=deprecated-module
17
18 import requests
19 from lxml import html
20 from searx import searx_dir
21
22 URL = 'https://ftp.mozilla.org/pub/firefox/releases/'
23 RELEASE_PATH = '/pub/firefox/releases/'
24
25 NORMAL_REGEX = re.compile(r'^[0-9]+\.[0-9](\.[0-9])?$')
26 # BETA_REGEX = re.compile(r'.*[0-9]b([0-9\-a-z]+)$')
27 # ESR_REGEX = re.compile(r'^[0-9]+\.[0-9](\.[0-9])?esr$')
28
29 #
30 useragents = {
31 # fmt: off
32 "versions": (),
33 "os": ('Windows NT 10.0; Win64; x64',
34 'X11; Linux x86_64'),
35 "ua": "Mozilla/5.0 ({os}; rv:{version}) Gecko/20100101 Firefox/{version}",
36 # fmt: on
37 }
38
39
40 def fetch_firefox_versions():
41 resp = requests.get(URL, timeout=2.0)
42 if resp.status_code != 200:
43 raise Exception("Error fetching firefox versions, HTTP code " + resp.status_code)
44 dom = html.fromstring(resp.text)
45 versions = []
46
47 for link in dom.xpath('//a/@href'):
48 url = urlparse(urljoin(URL, link))
49 path = url.path
50 if path.startswith(RELEASE_PATH):
51 version = path[len(RELEASE_PATH) : -1]
52 if NORMAL_REGEX.match(version):
53 versions.append(LooseVersion(version))
54
55 list.sort(versions, reverse=True)
56 return versions
57
58
59 def fetch_firefox_last_versions():
60 versions = fetch_firefox_versions()
61
62 result = []
63 major_last = versions[0].version[0]
64 major_list = (major_last, major_last - 1)
65 for version in versions:
66 major_current = version.version[0]
67 if major_current in major_list:
68 result.append(version.vstring)
69
70 return result
71
72
73 def get_useragents_filename():
74 return join(join(searx_dir, "data"), "useragents.json")
75
76
77 if __name__ == '__main__':
78 useragents["versions"] = fetch_firefox_last_versions()
79 with open(get_useragents_filename(), "w", encoding='utf-8') as f:
80 json.dump(useragents, f, indent=4, ensure_ascii=False)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searxng_extra/update/update_firefox_version.py b/searxng_extra/update/update_firefox_version.py
--- a/searxng_extra/update/update_firefox_version.py
+++ b/searxng_extra/update/update_firefox_version.py
@@ -64,8 +64,11 @@
major_list = (major_last, major_last - 1)
for version in versions:
major_current = version.version[0]
+ minor_current = version.version[1]
if major_current in major_list:
- result.append(version.vstring)
+ user_agent_version = f'{major_current}.{minor_current}'
+ if user_agent_version not in result:
+ result.append(user_agent_version)
return result
| {"golden_diff": "diff --git a/searxng_extra/update/update_firefox_version.py b/searxng_extra/update/update_firefox_version.py\n--- a/searxng_extra/update/update_firefox_version.py\n+++ b/searxng_extra/update/update_firefox_version.py\n@@ -64,8 +64,11 @@\n major_list = (major_last, major_last - 1)\n for version in versions:\n major_current = version.version[0]\n+ minor_current = version.version[1]\n if major_current in major_list:\n- result.append(version.vstring)\n+ user_agent_version = f'{major_current}.{minor_current}'\n+ if user_agent_version not in result:\n+ result.append(user_agent_version)\n \n return result\n", "issue": "SearXNG Firefox user-agent doesn't match real Firefox\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n2022.10.29-fc9986de\r\n**How did you install SearXNG?**\r\nScript\r\n**What happened?**\r\nSearXNG will send a user-agent such as: \r\n`Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:106.0.2) Gecko/20100101 Firefox/106.0.2`\r\nHowever, real Firefox looks like this: \r\n`Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:106.0) Gecko/20100101 Firefox/106.0`\r\n\r\nFirefox does not send servers the patch version the user is running.\r\n\r\n**How To Reproduce**\r\nSee https://github.com/searxng/searxng/blob/fc9986de0a21b01c1c8e342a9c4c8c6b96cc7ea2/searx/utils.py#L69-L74 and https://github.com/searxng/searxng/blob/fc9986de0a21b01c1c8e342a9c4c8c6b96cc7ea2/searx/data/useragents.json\r\n\r\nTo confirm Firefox's behavior, use a Firefox version with non-zero patch version and check its user-agent.\r\n\r\n**Expected behavior**\r\nSearXNG creates a User-Agent header that matches the format of the browser it is pretending to be.\r\n\r\n**Additional context**\r\nFirefox seems to have stopped sending the patch version with the release of Firefox 16: https://web.archive.org/web/20181123134532/https://www.fxsitecompat.com/en-CA/docs/2012/ua-string-no-longer-contains-patch-level-version-number/\n", "before_files": [{"content": "#!/usr/bin/env python\n# lint: pylint\n# SPDX-License-Identifier: AGPL-3.0-or-later\n\n\"\"\"Fetch firefox useragent signatures\n\nOutput file: :origin:`searx/data/useragents.json` (:origin:`CI Update data ...\n<.github/workflows/data-update.yml>`).\n\n\"\"\"\n\nimport json\nimport re\nfrom os.path import join\nfrom urllib.parse import urlparse, urljoin\nfrom distutils.version import LooseVersion # pylint: disable=deprecated-module\n\nimport requests\nfrom lxml import html\nfrom searx import searx_dir\n\nURL = 'https://ftp.mozilla.org/pub/firefox/releases/'\nRELEASE_PATH = '/pub/firefox/releases/'\n\nNORMAL_REGEX = re.compile(r'^[0-9]+\\.[0-9](\\.[0-9])?$')\n# BETA_REGEX = re.compile(r'.*[0-9]b([0-9\\-a-z]+)$')\n# ESR_REGEX = re.compile(r'^[0-9]+\\.[0-9](\\.[0-9])?esr$')\n\n#\nuseragents = {\n # fmt: off\n \"versions\": (),\n \"os\": ('Windows NT 10.0; Win64; x64',\n 'X11; Linux x86_64'),\n \"ua\": \"Mozilla/5.0 ({os}; rv:{version}) Gecko/20100101 Firefox/{version}\",\n # fmt: on\n}\n\n\ndef fetch_firefox_versions():\n resp = requests.get(URL, timeout=2.0)\n if resp.status_code != 200:\n raise Exception(\"Error fetching firefox versions, HTTP code \" + resp.status_code)\n dom = html.fromstring(resp.text)\n versions = []\n\n for link in dom.xpath('//a/@href'):\n url = urlparse(urljoin(URL, link))\n path = url.path\n if path.startswith(RELEASE_PATH):\n version = path[len(RELEASE_PATH) : -1]\n if NORMAL_REGEX.match(version):\n versions.append(LooseVersion(version))\n\n list.sort(versions, reverse=True)\n return versions\n\n\ndef fetch_firefox_last_versions():\n versions = fetch_firefox_versions()\n\n result = []\n major_last = versions[0].version[0]\n major_list = (major_last, major_last - 1)\n for version in versions:\n major_current = version.version[0]\n if major_current in major_list:\n result.append(version.vstring)\n\n return result\n\n\ndef get_useragents_filename():\n return join(join(searx_dir, \"data\"), \"useragents.json\")\n\n\nif __name__ == '__main__':\n useragents[\"versions\"] = fetch_firefox_last_versions()\n with open(get_useragents_filename(), \"w\", encoding='utf-8') as f:\n json.dump(useragents, f, indent=4, ensure_ascii=False)\n", "path": "searxng_extra/update/update_firefox_version.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# lint: pylint\n# SPDX-License-Identifier: AGPL-3.0-or-later\n\n\"\"\"Fetch firefox useragent signatures\n\nOutput file: :origin:`searx/data/useragents.json` (:origin:`CI Update data ...\n<.github/workflows/data-update.yml>`).\n\n\"\"\"\n\nimport json\nimport re\nfrom os.path import join\nfrom urllib.parse import urlparse, urljoin\nfrom distutils.version import LooseVersion # pylint: disable=deprecated-module\n\nimport requests\nfrom lxml import html\nfrom searx import searx_dir\n\nURL = 'https://ftp.mozilla.org/pub/firefox/releases/'\nRELEASE_PATH = '/pub/firefox/releases/'\n\nNORMAL_REGEX = re.compile(r'^[0-9]+\\.[0-9](\\.[0-9])?$')\n# BETA_REGEX = re.compile(r'.*[0-9]b([0-9\\-a-z]+)$')\n# ESR_REGEX = re.compile(r'^[0-9]+\\.[0-9](\\.[0-9])?esr$')\n\n#\nuseragents = {\n # fmt: off\n \"versions\": (),\n \"os\": ('Windows NT 10.0; Win64; x64',\n 'X11; Linux x86_64'),\n \"ua\": \"Mozilla/5.0 ({os}; rv:{version}) Gecko/20100101 Firefox/{version}\",\n # fmt: on\n}\n\n\ndef fetch_firefox_versions():\n resp = requests.get(URL, timeout=2.0)\n if resp.status_code != 200:\n raise Exception(\"Error fetching firefox versions, HTTP code \" + resp.status_code)\n dom = html.fromstring(resp.text)\n versions = []\n\n for link in dom.xpath('//a/@href'):\n url = urlparse(urljoin(URL, link))\n path = url.path\n if path.startswith(RELEASE_PATH):\n version = path[len(RELEASE_PATH) : -1]\n if NORMAL_REGEX.match(version):\n versions.append(LooseVersion(version))\n\n list.sort(versions, reverse=True)\n return versions\n\n\ndef fetch_firefox_last_versions():\n versions = fetch_firefox_versions()\n\n result = []\n major_last = versions[0].version[0]\n major_list = (major_last, major_last - 1)\n for version in versions:\n major_current = version.version[0]\n minor_current = version.version[1]\n if major_current in major_list:\n user_agent_version = f'{major_current}.{minor_current}'\n if user_agent_version not in result:\n result.append(user_agent_version)\n\n return result\n\n\ndef get_useragents_filename():\n return join(join(searx_dir, \"data\"), \"useragents.json\")\n\n\nif __name__ == '__main__':\n useragents[\"versions\"] = fetch_firefox_last_versions()\n with open(get_useragents_filename(), \"w\", encoding='utf-8') as f:\n json.dump(useragents, f, indent=4, ensure_ascii=False)\n", "path": "searxng_extra/update/update_firefox_version.py"}]} | 1,511 | 161 |
gh_patches_debug_3085 | rasdani/github-patches | git_diff | WordPress__openverse-api-599 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Some waveform peaks have negative values
## Description
<!-- Concisely describe the bug. -->
The frontend expects peaks to be in range of [0..1], however, some peaks returned from the API can have negative values.
## Reproduction
<!-- Provide detailed steps to reproduce the bug. -->
1. Go to https://api.openverse.engineering/v1/audio/727b9979-98f1-4604-9097-0a8b6fa68f2e/
2. Look at the last values of the long array.
3. See error: `...-0.00036,-0.00099,-0.00162,0.00012,0.0]}`
## Expectation
<!-- Concisely describe what you expected to happen. -->
All the peaks should be in [0..1] range.
## Additional context
<!-- Add any other context about the problem here; or delete the section entirely. -->
We could use a hack to fix this on the front end by setting negative values to 0 in the mean time, but the solution should be on the API side.
Could this possibly be some leftovers from the mock waveform data?
## Resolution
<!-- Replace the [ ] with [x] to check the box. -->
- [ ] 🙋 I would be interested in resolving this bug.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/catalog/api/utils/waveform.py`
Content:
```
1 import json
2 import logging
3 import math
4 import mimetypes
5 import os
6 import pathlib
7 import shutil
8 import subprocess
9 from typing import List
10
11 import requests
12
13
14 log = logging.getLogger(__name__)
15
16 TMP_DIR = pathlib.Path("/tmp").resolve()
17 UA_STRING = "OpenverseWaveform/0.0 (https://wordpress.org/openverse)"
18
19
20 def ext_from_url(url):
21 """
22 Get the file extension from the given URL. Looks at the last part of the URL
23 path, and returns the string after the last dot.
24
25 :param url: the URL to the file whose extension is being determined
26 :returns: the file extension or ``None``
27 """
28 file_name = url.split("/")[-1]
29 if "." in file_name:
30 ext = file_name.split(".")[-1]
31 return f".{ext}"
32 else:
33 return None
34
35
36 def download_audio(url, identifier):
37 """
38 Download the audio from the given URL to a location on the disk.
39
40 :param url: the URL to the file being downloaded
41 :param identifier: the identifier of the media object to name the file
42 :returns: the name of the file on the disk
43 """
44 log.info(f"Downloading file at {url}")
45
46 headers = {"User-Agent": UA_STRING}
47 with requests.get(url, stream=True, headers=headers) as res:
48 log.debug(f"Response code: {res.status_code}")
49 mimetype = res.headers["content-type"]
50 log.debug(f"MIME type: {mimetype}")
51 ext = ext_from_url(url) or mimetypes.guess_extension(mimetype)
52 if ext is None:
53 raise ValueError("Could not identify media extension")
54 file_name = f"audio-{identifier}{ext}"
55 log.debug(f"File name: {file_name}")
56 with open(TMP_DIR.joinpath(file_name), "wb") as file:
57 shutil.copyfileobj(res.raw, file)
58 return file_name
59
60
61 def generate_waveform(file_name, duration):
62 """
63 Generate the waveform for the file by invoking the ``audiowaveform`` binary.
64 The Python module ``subprocess`` is used to execute the binary and get the
65 results that it emits to STDOUT.
66
67 :param file_name: the name of the downloaded audio file
68 :param duration: the duration of the audio to determine pixels per second
69 """
70 log.info("Invoking audiowaveform")
71
72 pps = math.ceil(1e6 / duration) # approx 1000 points in total
73 args = [
74 "audiowaveform",
75 "--input-filename",
76 file_name,
77 "--output-format",
78 "json",
79 "--pixels-per-second",
80 str(pps),
81 ]
82 log.debug(f'Command: {" ".join(args)}')
83 proc = subprocess.run(args, cwd=TMP_DIR, check=True, capture_output=True)
84 log.debug(f"Subprocess exit code: {proc.returncode}")
85 return proc.stdout
86
87
88 def process_waveform_output(json_out):
89 """
90 Parse the waveform output generated by the ``audiowaveform`` binary. The
91 output consists of alternating positive and negative values, that are almost
92 equal in amplitude. We discard the negative values. We also scale down the
93 amplitudes by the largest value so that they lie in the range [0, 1].
94
95 :param json_out: the JSON output generated by ``audiowaveform``
96 :returns: the list of peaks
97 """
98 log.info("Transforming points")
99
100 output = json.loads(json_out)
101 data = output["data"]
102 log.debug(f"Original umber of points: {len(data)}")
103
104 transformed_data = []
105 max_val = 0
106 for idx, val in enumerate(data):
107 if idx % 2 == 0:
108 continue
109 transformed_data.append(val)
110 if val > max_val:
111 max_val = val
112 transformed_data = [round(val / max_val, 5) for val in transformed_data]
113 log.debug(f"Transformed number of points: {len(transformed_data)}")
114 return transformed_data
115
116
117 def cleanup(file_name):
118 """
119 Delete the audio file after it has been processed.
120
121 :param file_name: the name of the file to delete
122 """
123 log.info(f"Deleting {file_name}")
124
125 file_path = TMP_DIR.joinpath(file_name)
126 log.debug(f"File path: {file_path}")
127 if file_path.exists():
128 log.info(f"Deleting file {file_path}")
129 os.remove(file_path)
130 else:
131 log.info("File not found, nothing deleted")
132
133
134 def generate_peaks(audio) -> List[float]:
135 file_name = None
136 try:
137 file_name = download_audio(audio.url, audio.identifier)
138 awf_out = generate_waveform(file_name, audio.duration)
139 return process_waveform_output(awf_out)
140 finally:
141 if file_name is not None:
142 cleanup(file_name)
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/api/catalog/api/utils/waveform.py b/api/catalog/api/utils/waveform.py
--- a/api/catalog/api/utils/waveform.py
+++ b/api/catalog/api/utils/waveform.py
@@ -106,6 +106,8 @@
for idx, val in enumerate(data):
if idx % 2 == 0:
continue
+ if val < 0: # Any other odd values are negligible and can be ignored
+ val = 0
transformed_data.append(val)
if val > max_val:
max_val = val
| {"golden_diff": "diff --git a/api/catalog/api/utils/waveform.py b/api/catalog/api/utils/waveform.py\n--- a/api/catalog/api/utils/waveform.py\n+++ b/api/catalog/api/utils/waveform.py\n@@ -106,6 +106,8 @@\n for idx, val in enumerate(data):\n if idx % 2 == 0:\n continue\n+ if val < 0: # Any other odd values are negligible and can be ignored\n+ val = 0\n transformed_data.append(val)\n if val > max_val:\n max_val = val\n", "issue": "Some waveform peaks have negative values\n## Description\r\n<!-- Concisely describe the bug. -->\r\nThe frontend expects peaks to be in range of [0..1], however, some peaks returned from the API can have negative values.\r\n\r\n## Reproduction\r\n<!-- Provide detailed steps to reproduce the bug. -->\r\n1. Go to https://api.openverse.engineering/v1/audio/727b9979-98f1-4604-9097-0a8b6fa68f2e/\r\n2. Look at the last values of the long array.\r\n3. See error: `...-0.00036,-0.00099,-0.00162,0.00012,0.0]}`\r\n\r\n## Expectation\r\n<!-- Concisely describe what you expected to happen. -->\r\nAll the peaks should be in [0..1] range.\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem here; or delete the section entirely. -->\r\nWe could use a hack to fix this on the front end by setting negative values to 0 in the mean time, but the solution should be on the API side.\r\nCould this possibly be some leftovers from the mock waveform data?\r\n## Resolution\r\n<!-- Replace the [ ] with [x] to check the box. -->\r\n- [ ] \ud83d\ude4b I would be interested in resolving this bug.\r\n\n", "before_files": [{"content": "import json\nimport logging\nimport math\nimport mimetypes\nimport os\nimport pathlib\nimport shutil\nimport subprocess\nfrom typing import List\n\nimport requests\n\n\nlog = logging.getLogger(__name__)\n\nTMP_DIR = pathlib.Path(\"/tmp\").resolve()\nUA_STRING = \"OpenverseWaveform/0.0 (https://wordpress.org/openverse)\"\n\n\ndef ext_from_url(url):\n \"\"\"\n Get the file extension from the given URL. Looks at the last part of the URL\n path, and returns the string after the last dot.\n\n :param url: the URL to the file whose extension is being determined\n :returns: the file extension or ``None``\n \"\"\"\n file_name = url.split(\"/\")[-1]\n if \".\" in file_name:\n ext = file_name.split(\".\")[-1]\n return f\".{ext}\"\n else:\n return None\n\n\ndef download_audio(url, identifier):\n \"\"\"\n Download the audio from the given URL to a location on the disk.\n\n :param url: the URL to the file being downloaded\n :param identifier: the identifier of the media object to name the file\n :returns: the name of the file on the disk\n \"\"\"\n log.info(f\"Downloading file at {url}\")\n\n headers = {\"User-Agent\": UA_STRING}\n with requests.get(url, stream=True, headers=headers) as res:\n log.debug(f\"Response code: {res.status_code}\")\n mimetype = res.headers[\"content-type\"]\n log.debug(f\"MIME type: {mimetype}\")\n ext = ext_from_url(url) or mimetypes.guess_extension(mimetype)\n if ext is None:\n raise ValueError(\"Could not identify media extension\")\n file_name = f\"audio-{identifier}{ext}\"\n log.debug(f\"File name: {file_name}\")\n with open(TMP_DIR.joinpath(file_name), \"wb\") as file:\n shutil.copyfileobj(res.raw, file)\n return file_name\n\n\ndef generate_waveform(file_name, duration):\n \"\"\"\n Generate the waveform for the file by invoking the ``audiowaveform`` binary.\n The Python module ``subprocess`` is used to execute the binary and get the\n results that it emits to STDOUT.\n\n :param file_name: the name of the downloaded audio file\n :param duration: the duration of the audio to determine pixels per second\n \"\"\"\n log.info(\"Invoking audiowaveform\")\n\n pps = math.ceil(1e6 / duration) # approx 1000 points in total\n args = [\n \"audiowaveform\",\n \"--input-filename\",\n file_name,\n \"--output-format\",\n \"json\",\n \"--pixels-per-second\",\n str(pps),\n ]\n log.debug(f'Command: {\" \".join(args)}')\n proc = subprocess.run(args, cwd=TMP_DIR, check=True, capture_output=True)\n log.debug(f\"Subprocess exit code: {proc.returncode}\")\n return proc.stdout\n\n\ndef process_waveform_output(json_out):\n \"\"\"\n Parse the waveform output generated by the ``audiowaveform`` binary. The\n output consists of alternating positive and negative values, that are almost\n equal in amplitude. We discard the negative values. We also scale down the\n amplitudes by the largest value so that they lie in the range [0, 1].\n\n :param json_out: the JSON output generated by ``audiowaveform``\n :returns: the list of peaks\n \"\"\"\n log.info(\"Transforming points\")\n\n output = json.loads(json_out)\n data = output[\"data\"]\n log.debug(f\"Original umber of points: {len(data)}\")\n\n transformed_data = []\n max_val = 0\n for idx, val in enumerate(data):\n if idx % 2 == 0:\n continue\n transformed_data.append(val)\n if val > max_val:\n max_val = val\n transformed_data = [round(val / max_val, 5) for val in transformed_data]\n log.debug(f\"Transformed number of points: {len(transformed_data)}\")\n return transformed_data\n\n\ndef cleanup(file_name):\n \"\"\"\n Delete the audio file after it has been processed.\n\n :param file_name: the name of the file to delete\n \"\"\"\n log.info(f\"Deleting {file_name}\")\n\n file_path = TMP_DIR.joinpath(file_name)\n log.debug(f\"File path: {file_path}\")\n if file_path.exists():\n log.info(f\"Deleting file {file_path}\")\n os.remove(file_path)\n else:\n log.info(\"File not found, nothing deleted\")\n\n\ndef generate_peaks(audio) -> List[float]:\n file_name = None\n try:\n file_name = download_audio(audio.url, audio.identifier)\n awf_out = generate_waveform(file_name, audio.duration)\n return process_waveform_output(awf_out)\n finally:\n if file_name is not None:\n cleanup(file_name)\n", "path": "api/catalog/api/utils/waveform.py"}], "after_files": [{"content": "import json\nimport logging\nimport math\nimport mimetypes\nimport os\nimport pathlib\nimport shutil\nimport subprocess\nfrom typing import List\n\nimport requests\n\n\nlog = logging.getLogger(__name__)\n\nTMP_DIR = pathlib.Path(\"/tmp\").resolve()\nUA_STRING = \"OpenverseWaveform/0.0 (https://wordpress.org/openverse)\"\n\n\ndef ext_from_url(url):\n \"\"\"\n Get the file extension from the given URL. Looks at the last part of the URL\n path, and returns the string after the last dot.\n\n :param url: the URL to the file whose extension is being determined\n :returns: the file extension or ``None``\n \"\"\"\n file_name = url.split(\"/\")[-1]\n if \".\" in file_name:\n ext = file_name.split(\".\")[-1]\n return f\".{ext}\"\n else:\n return None\n\n\ndef download_audio(url, identifier):\n \"\"\"\n Download the audio from the given URL to a location on the disk.\n\n :param url: the URL to the file being downloaded\n :param identifier: the identifier of the media object to name the file\n :returns: the name of the file on the disk\n \"\"\"\n log.info(f\"Downloading file at {url}\")\n\n headers = {\"User-Agent\": UA_STRING}\n with requests.get(url, stream=True, headers=headers) as res:\n log.debug(f\"Response code: {res.status_code}\")\n mimetype = res.headers[\"content-type\"]\n log.debug(f\"MIME type: {mimetype}\")\n ext = ext_from_url(url) or mimetypes.guess_extension(mimetype)\n if ext is None:\n raise ValueError(\"Could not identify media extension\")\n file_name = f\"audio-{identifier}{ext}\"\n log.debug(f\"File name: {file_name}\")\n with open(TMP_DIR.joinpath(file_name), \"wb\") as file:\n shutil.copyfileobj(res.raw, file)\n return file_name\n\n\ndef generate_waveform(file_name, duration):\n \"\"\"\n Generate the waveform for the file by invoking the ``audiowaveform`` binary.\n The Python module ``subprocess`` is used to execute the binary and get the\n results that it emits to STDOUT.\n\n :param file_name: the name of the downloaded audio file\n :param duration: the duration of the audio to determine pixels per second\n \"\"\"\n log.info(\"Invoking audiowaveform\")\n\n pps = math.ceil(1e6 / duration) # approx 1000 points in total\n args = [\n \"audiowaveform\",\n \"--input-filename\",\n file_name,\n \"--output-format\",\n \"json\",\n \"--pixels-per-second\",\n str(pps),\n ]\n log.debug(f'Command: {\" \".join(args)}')\n proc = subprocess.run(args, cwd=TMP_DIR, check=True, capture_output=True)\n log.debug(f\"Subprocess exit code: {proc.returncode}\")\n return proc.stdout\n\n\ndef process_waveform_output(json_out):\n \"\"\"\n Parse the waveform output generated by the ``audiowaveform`` binary. The\n output consists of alternating positive and negative values, that are almost\n equal in amplitude. We discard the negative values. We also scale down the\n amplitudes by the largest value so that they lie in the range [0, 1].\n\n :param json_out: the JSON output generated by ``audiowaveform``\n :returns: the list of peaks\n \"\"\"\n log.info(\"Transforming points\")\n\n output = json.loads(json_out)\n data = output[\"data\"]\n log.debug(f\"Original umber of points: {len(data)}\")\n\n transformed_data = []\n max_val = 0\n for idx, val in enumerate(data):\n if idx % 2 == 0:\n continue\n if val < 0: # Any other odd values are negligible and can be ignored\n val = 0\n transformed_data.append(val)\n if val > max_val:\n max_val = val\n transformed_data = [round(val / max_val, 5) for val in transformed_data]\n log.debug(f\"Transformed number of points: {len(transformed_data)}\")\n return transformed_data\n\n\ndef cleanup(file_name):\n \"\"\"\n Delete the audio file after it has been processed.\n\n :param file_name: the name of the file to delete\n \"\"\"\n log.info(f\"Deleting {file_name}\")\n\n file_path = TMP_DIR.joinpath(file_name)\n log.debug(f\"File path: {file_path}\")\n if file_path.exists():\n log.info(f\"Deleting file {file_path}\")\n os.remove(file_path)\n else:\n log.info(\"File not found, nothing deleted\")\n\n\ndef generate_peaks(audio) -> List[float]:\n file_name = None\n try:\n file_name = download_audio(audio.url, audio.identifier)\n awf_out = generate_waveform(file_name, audio.duration)\n return process_waveform_output(awf_out)\n finally:\n if file_name is not None:\n cleanup(file_name)\n", "path": "api/catalog/api/utils/waveform.py"}]} | 1,952 | 127 |
gh_patches_debug_18023 | rasdani/github-patches | git_diff | mdn__kuma-7686 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong redirect on localized home page
Examples:
```
▶ curl -I https:/developer.mozilla.org/fr
HTTP/2 301
location: /fr/docs/fr
server: gunicorn/20.0.4
```
The exact same is happening to `ja`
But other locales seem fine, e.g.
```
▶ curl -I https:/developer.mozilla.org/de
HTTP/2 301
location: /de/
server: gunicorn/20.0.4
```
The exact same is happening to `sv-SE` and `en-US`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/wiki/views/legacy.py`
Content:
```
1 from django.conf import settings
2 from django.http import Http404
3 from django.shortcuts import redirect
4
5 from kuma.core.decorators import shared_cache_control
6
7 from ..constants import LEGACY_MINDTOUCH_NAMESPACES
8 from ..models import Document, Revision
9
10
11 # Legacy MindTouch redirects.
12
13
14 def mindtouch_namespace_to_kuma_url(locale, namespace, slug):
15 """
16 Convert MindTouch namespace URLs to Kuma URLs.
17
18 For special namespaces like Talk:, User:, etc., convert to the
19 approproate new URL, converting MT locales to Kuma locales.
20 If the locale cannot be correctly determined, fall back to en-US
21 """
22 new_locale = new_slug = None
23 if namespace in ("Talk", "Project", "Project_talk"):
24 # These namespaces carry the old locale in their URL, which
25 # simplifies figuring out where to send them.
26 mt_locale, _, doc_slug = slug.partition("/")
27 new_locale = settings.MT_TO_KUMA_LOCALE_MAP.get(mt_locale, "en-US")
28 new_slug = "%s:%s" % (namespace, doc_slug)
29 elif namespace == "User":
30 # For users, we look up the latest revision and get the locale
31 # from there.
32 new_slug = "%s:%s" % (namespace, slug)
33 try:
34 # TODO: Tests do not include a matching revision
35 rev = Revision.objects.filter(document__slug=new_slug).latest("created")
36 new_locale = rev.document.locale
37 except Revision.DoesNotExist:
38 # If that doesn't work, bail out to en-US.
39 new_locale = "en-US"
40 else:
41 # Templates, etc. don't actually have a locale, so we give
42 # them the default.
43 new_locale = "en-US"
44 new_slug = "%s:%s" % (namespace, slug)
45 if new_locale:
46 # TODO: new_locale is unused, no alternate branch
47 new_url = "/%s/docs/%s" % (locale, new_slug)
48 return new_url
49
50
51 def mindtouch_to_kuma_url(locale, path):
52 """
53 Convert valid MindTouch namespace URLs to Kuma URLs.
54
55 If there is an appropriate Kuma URL, then it is returned.
56 If there is no appropriate Kuma URL, then None is returned.
57 """
58 if path.startswith("%s/" % locale):
59 # Convert from Django-based LocaleMiddleware path to zamboni/amo style
60 path = path.replace("%s/" % locale, "", 1)
61
62 if path.startswith("Template:MindTouch"):
63 # MindTouch's default templates. There shouldn't be links to
64 # them anywhere in the wild, but just in case we 404 them.
65 # TODO: Tests don't exercise this branch
66 return None
67
68 if path.endswith("/"):
69 # If there's a trailing slash, snip it off.
70 path = path[:-1]
71
72 if ":" in path:
73 namespace, _, slug = path.partition(":")
74 # The namespaces (Talk:, User:, etc.) get their own
75 # special-case handling.
76 # TODO: Test invalid namespace
77 if namespace in LEGACY_MINDTOUCH_NAMESPACES:
78 return mindtouch_namespace_to_kuma_url(locale, namespace, slug)
79
80 # Last attempt: we try the request locale as the document locale,
81 # and see if that matches something.
82 try:
83 doc = Document.objects.get(slug=path, locale=locale)
84 except Document.DoesNotExist:
85 return None
86
87 location = doc.get_absolute_url()
88 return location
89
90
91 @shared_cache_control(s_maxage=60 * 60 * 24 * 30)
92 def mindtouch_to_kuma_redirect(request, path):
93 """
94 Given a request to a Mindtouch-generated URL, generate a redirect
95 to the correct corresponding kuma URL.
96
97 TODO: Retire this catch-all view and Mindtouch redirects.
98 Safest: Ensure no current content includes these URLs, no incoming links.
99 Middle: Monitor 404s and their referrer headers, fix links after removal.
100 Fastest: Remove it, ignore 404s.
101 """
102 locale = request.LANGUAGE_CODE
103 url = mindtouch_to_kuma_url(locale, path)
104 if url:
105 if "view" in request.GET:
106 url = "%s$%s" % (url, request.GET["view"])
107 return redirect(url, permanent=True)
108 else:
109 raise Http404
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/wiki/views/legacy.py b/kuma/wiki/views/legacy.py
--- a/kuma/wiki/views/legacy.py
+++ b/kuma/wiki/views/legacy.py
@@ -5,7 +5,7 @@
from kuma.core.decorators import shared_cache_control
from ..constants import LEGACY_MINDTOUCH_NAMESPACES
-from ..models import Document, Revision
+from ..models import Revision
# Legacy MindTouch redirects.
@@ -77,15 +77,7 @@
if namespace in LEGACY_MINDTOUCH_NAMESPACES:
return mindtouch_namespace_to_kuma_url(locale, namespace, slug)
- # Last attempt: we try the request locale as the document locale,
- # and see if that matches something.
- try:
- doc = Document.objects.get(slug=path, locale=locale)
- except Document.DoesNotExist:
- return None
-
- location = doc.get_absolute_url()
- return location
+ return None
@shared_cache_control(s_maxage=60 * 60 * 24 * 30)
| {"golden_diff": "diff --git a/kuma/wiki/views/legacy.py b/kuma/wiki/views/legacy.py\n--- a/kuma/wiki/views/legacy.py\n+++ b/kuma/wiki/views/legacy.py\n@@ -5,7 +5,7 @@\n from kuma.core.decorators import shared_cache_control\n \n from ..constants import LEGACY_MINDTOUCH_NAMESPACES\n-from ..models import Document, Revision\n+from ..models import Revision\n \n \n # Legacy MindTouch redirects.\n@@ -77,15 +77,7 @@\n if namespace in LEGACY_MINDTOUCH_NAMESPACES:\n return mindtouch_namespace_to_kuma_url(locale, namespace, slug)\n \n- # Last attempt: we try the request locale as the document locale,\n- # and see if that matches something.\n- try:\n- doc = Document.objects.get(slug=path, locale=locale)\n- except Document.DoesNotExist:\n- return None\n-\n- location = doc.get_absolute_url()\n- return location\n+ return None\n \n \n @shared_cache_control(s_maxage=60 * 60 * 24 * 30)\n", "issue": "Wrong redirect on localized home page\nExamples:\r\n```\r\n\u25b6 curl -I https:/developer.mozilla.org/fr\r\nHTTP/2 301\r\nlocation: /fr/docs/fr\r\nserver: gunicorn/20.0.4\r\n```\r\nThe exact same is happening to `ja`\r\n\r\nBut other locales seem fine, e.g.\r\n```\r\n\u25b6 curl -I https:/developer.mozilla.org/de\r\nHTTP/2 301\r\nlocation: /de/\r\nserver: gunicorn/20.0.4\r\n```\r\nThe exact same is happening to `sv-SE` and `en-US`. \n", "before_files": [{"content": "from django.conf import settings\nfrom django.http import Http404\nfrom django.shortcuts import redirect\n\nfrom kuma.core.decorators import shared_cache_control\n\nfrom ..constants import LEGACY_MINDTOUCH_NAMESPACES\nfrom ..models import Document, Revision\n\n\n# Legacy MindTouch redirects.\n\n\ndef mindtouch_namespace_to_kuma_url(locale, namespace, slug):\n \"\"\"\n Convert MindTouch namespace URLs to Kuma URLs.\n\n For special namespaces like Talk:, User:, etc., convert to the\n approproate new URL, converting MT locales to Kuma locales.\n If the locale cannot be correctly determined, fall back to en-US\n \"\"\"\n new_locale = new_slug = None\n if namespace in (\"Talk\", \"Project\", \"Project_talk\"):\n # These namespaces carry the old locale in their URL, which\n # simplifies figuring out where to send them.\n mt_locale, _, doc_slug = slug.partition(\"/\")\n new_locale = settings.MT_TO_KUMA_LOCALE_MAP.get(mt_locale, \"en-US\")\n new_slug = \"%s:%s\" % (namespace, doc_slug)\n elif namespace == \"User\":\n # For users, we look up the latest revision and get the locale\n # from there.\n new_slug = \"%s:%s\" % (namespace, slug)\n try:\n # TODO: Tests do not include a matching revision\n rev = Revision.objects.filter(document__slug=new_slug).latest(\"created\")\n new_locale = rev.document.locale\n except Revision.DoesNotExist:\n # If that doesn't work, bail out to en-US.\n new_locale = \"en-US\"\n else:\n # Templates, etc. don't actually have a locale, so we give\n # them the default.\n new_locale = \"en-US\"\n new_slug = \"%s:%s\" % (namespace, slug)\n if new_locale:\n # TODO: new_locale is unused, no alternate branch\n new_url = \"/%s/docs/%s\" % (locale, new_slug)\n return new_url\n\n\ndef mindtouch_to_kuma_url(locale, path):\n \"\"\"\n Convert valid MindTouch namespace URLs to Kuma URLs.\n\n If there is an appropriate Kuma URL, then it is returned.\n If there is no appropriate Kuma URL, then None is returned.\n \"\"\"\n if path.startswith(\"%s/\" % locale):\n # Convert from Django-based LocaleMiddleware path to zamboni/amo style\n path = path.replace(\"%s/\" % locale, \"\", 1)\n\n if path.startswith(\"Template:MindTouch\"):\n # MindTouch's default templates. There shouldn't be links to\n # them anywhere in the wild, but just in case we 404 them.\n # TODO: Tests don't exercise this branch\n return None\n\n if path.endswith(\"/\"):\n # If there's a trailing slash, snip it off.\n path = path[:-1]\n\n if \":\" in path:\n namespace, _, slug = path.partition(\":\")\n # The namespaces (Talk:, User:, etc.) get their own\n # special-case handling.\n # TODO: Test invalid namespace\n if namespace in LEGACY_MINDTOUCH_NAMESPACES:\n return mindtouch_namespace_to_kuma_url(locale, namespace, slug)\n\n # Last attempt: we try the request locale as the document locale,\n # and see if that matches something.\n try:\n doc = Document.objects.get(slug=path, locale=locale)\n except Document.DoesNotExist:\n return None\n\n location = doc.get_absolute_url()\n return location\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 30)\ndef mindtouch_to_kuma_redirect(request, path):\n \"\"\"\n Given a request to a Mindtouch-generated URL, generate a redirect\n to the correct corresponding kuma URL.\n\n TODO: Retire this catch-all view and Mindtouch redirects.\n Safest: Ensure no current content includes these URLs, no incoming links.\n Middle: Monitor 404s and their referrer headers, fix links after removal.\n Fastest: Remove it, ignore 404s.\n \"\"\"\n locale = request.LANGUAGE_CODE\n url = mindtouch_to_kuma_url(locale, path)\n if url:\n if \"view\" in request.GET:\n url = \"%s$%s\" % (url, request.GET[\"view\"])\n return redirect(url, permanent=True)\n else:\n raise Http404\n", "path": "kuma/wiki/views/legacy.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.http import Http404\nfrom django.shortcuts import redirect\n\nfrom kuma.core.decorators import shared_cache_control\n\nfrom ..constants import LEGACY_MINDTOUCH_NAMESPACES\nfrom ..models import Revision\n\n\n# Legacy MindTouch redirects.\n\n\ndef mindtouch_namespace_to_kuma_url(locale, namespace, slug):\n \"\"\"\n Convert MindTouch namespace URLs to Kuma URLs.\n\n For special namespaces like Talk:, User:, etc., convert to the\n approproate new URL, converting MT locales to Kuma locales.\n If the locale cannot be correctly determined, fall back to en-US\n \"\"\"\n new_locale = new_slug = None\n if namespace in (\"Talk\", \"Project\", \"Project_talk\"):\n # These namespaces carry the old locale in their URL, which\n # simplifies figuring out where to send them.\n mt_locale, _, doc_slug = slug.partition(\"/\")\n new_locale = settings.MT_TO_KUMA_LOCALE_MAP.get(mt_locale, \"en-US\")\n new_slug = \"%s:%s\" % (namespace, doc_slug)\n elif namespace == \"User\":\n # For users, we look up the latest revision and get the locale\n # from there.\n new_slug = \"%s:%s\" % (namespace, slug)\n try:\n # TODO: Tests do not include a matching revision\n rev = Revision.objects.filter(document__slug=new_slug).latest(\"created\")\n new_locale = rev.document.locale\n except Revision.DoesNotExist:\n # If that doesn't work, bail out to en-US.\n new_locale = \"en-US\"\n else:\n # Templates, etc. don't actually have a locale, so we give\n # them the default.\n new_locale = \"en-US\"\n new_slug = \"%s:%s\" % (namespace, slug)\n if new_locale:\n # TODO: new_locale is unused, no alternate branch\n new_url = \"/%s/docs/%s\" % (locale, new_slug)\n return new_url\n\n\ndef mindtouch_to_kuma_url(locale, path):\n \"\"\"\n Convert valid MindTouch namespace URLs to Kuma URLs.\n\n If there is an appropriate Kuma URL, then it is returned.\n If there is no appropriate Kuma URL, then None is returned.\n \"\"\"\n if path.startswith(\"%s/\" % locale):\n # Convert from Django-based LocaleMiddleware path to zamboni/amo style\n path = path.replace(\"%s/\" % locale, \"\", 1)\n\n if path.startswith(\"Template:MindTouch\"):\n # MindTouch's default templates. There shouldn't be links to\n # them anywhere in the wild, but just in case we 404 them.\n # TODO: Tests don't exercise this branch\n return None\n\n if path.endswith(\"/\"):\n # If there's a trailing slash, snip it off.\n path = path[:-1]\n\n if \":\" in path:\n namespace, _, slug = path.partition(\":\")\n # The namespaces (Talk:, User:, etc.) get their own\n # special-case handling.\n # TODO: Test invalid namespace\n if namespace in LEGACY_MINDTOUCH_NAMESPACES:\n return mindtouch_namespace_to_kuma_url(locale, namespace, slug)\n\n return None\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 30)\ndef mindtouch_to_kuma_redirect(request, path):\n \"\"\"\n Given a request to a Mindtouch-generated URL, generate a redirect\n to the correct corresponding kuma URL.\n\n TODO: Retire this catch-all view and Mindtouch redirects.\n Safest: Ensure no current content includes these URLs, no incoming links.\n Middle: Monitor 404s and their referrer headers, fix links after removal.\n Fastest: Remove it, ignore 404s.\n \"\"\"\n locale = request.LANGUAGE_CODE\n url = mindtouch_to_kuma_url(locale, path)\n if url:\n if \"view\" in request.GET:\n url = \"%s$%s\" % (url, request.GET[\"view\"])\n return redirect(url, permanent=True)\n else:\n raise Http404\n", "path": "kuma/wiki/views/legacy.py"}]} | 1,574 | 238 |
gh_patches_debug_6919 | rasdani/github-patches | git_diff | zulip__zulip-15887 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot react with a custom emoji to a bot message
Attempting to react to a bot message with a custom emoji appears to work but if you refresh the page it's gone. Inspecting the network requests reveals that the request to add the reaction fails with a 400 Bad Request error `Reaction doesn't exist.`.
This can be easily reproduced with the Notification Bot. Note that the `zulip` reaction works since it's not really a custom emoji, but you can reproduce the problem with any other custom emoji.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/views/reactions.py`
Content:
```
1 from typing import Optional
2
3 from django.http import HttpRequest, HttpResponse
4 from django.utils.translation import ugettext as _
5
6 from zerver.decorator import REQ, has_request_variables
7 from zerver.lib.actions import do_add_reaction, do_remove_reaction
8 from zerver.lib.emoji import check_emoji_request, emoji_name_to_emoji_code
9 from zerver.lib.message import access_message
10 from zerver.lib.request import JsonableError
11 from zerver.lib.response import json_success
12 from zerver.models import Message, Reaction, UserMessage, UserProfile
13
14
15 def create_historical_message(user_profile: UserProfile, message: Message) -> None:
16 # Users can see and react to messages sent to streams they
17 # were not a subscriber to; in order to receive events for
18 # those, we give the user a `historical` UserMessage objects
19 # for the message. This is the same trick we use for starring
20 # messages.
21 UserMessage.objects.create(user_profile=user_profile,
22 message=message,
23 flags=UserMessage.flags.historical | UserMessage.flags.read)
24
25 @has_request_variables
26 def add_reaction(request: HttpRequest, user_profile: UserProfile, message_id: int,
27 emoji_name: str=REQ(),
28 emoji_code: Optional[str]=REQ(default=None),
29 reaction_type: Optional[str]=REQ(default=None)) -> HttpResponse:
30 message, user_message = access_message(user_profile, message_id)
31
32 if emoji_code is None:
33 # The emoji_code argument is only required for rare corner
34 # cases discussed in the long block comment below. For simple
35 # API clients, we allow specifying just the name, and just
36 # look up the code using the current name->code mapping.
37 emoji_code = emoji_name_to_emoji_code(message.sender.realm,
38 emoji_name)[0]
39
40 if reaction_type is None:
41 reaction_type = emoji_name_to_emoji_code(message.sender.realm,
42 emoji_name)[1]
43
44 if Reaction.objects.filter(user_profile=user_profile,
45 message=message,
46 emoji_code=emoji_code,
47 reaction_type=reaction_type).exists():
48 raise JsonableError(_("Reaction already exists."))
49
50 query = Reaction.objects.filter(message=message,
51 emoji_code=emoji_code,
52 reaction_type=reaction_type)
53 if query.exists():
54 # If another user has already reacted to this message with
55 # same emoji code, we treat the new reaction as a vote for the
56 # existing reaction. So the emoji name used by that earlier
57 # reaction takes precedence over whatever was passed in this
58 # request. This is necessary to avoid a message having 2
59 # "different" emoji reactions with the same emoji code (and
60 # thus same image) on the same message, which looks ugly.
61 #
62 # In this "voting for an existing reaction" case, we shouldn't
63 # check whether the emoji code and emoji name match, since
64 # it's possible that the (emoji_type, emoji_name, emoji_code)
65 # triple for this existing rection xmay not pass validation
66 # now (e.g. because it is for a realm emoji that has been
67 # since deactivated). We still want to allow users to add a
68 # vote any old reaction they see in the UI even if that is a
69 # deactivated custom emoji, so we just use the emoji name from
70 # the existing reaction with no further validation.
71 emoji_name = query.first().emoji_name
72 else:
73 # Otherwise, use the name provided in this request, but verify
74 # it is valid in the user's realm (e.g. not a deactivated
75 # realm emoji).
76 check_emoji_request(message.sender.realm, emoji_name,
77 emoji_code, reaction_type)
78
79 if user_message is None:
80 create_historical_message(user_profile, message)
81
82 do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type)
83
84 return json_success()
85
86 @has_request_variables
87 def remove_reaction(request: HttpRequest, user_profile: UserProfile, message_id: int,
88 emoji_name: Optional[str]=REQ(default=None),
89 emoji_code: Optional[str]=REQ(default=None),
90 reaction_type: str=REQ(default="unicode_emoji")) -> HttpResponse:
91 message, user_message = access_message(user_profile, message_id)
92
93 if emoji_code is None:
94 if emoji_name is None:
95 raise JsonableError(_('At least one of the following arguments '
96 'must be present: emoji_name, emoji_code'))
97 # A correct full Zulip client implementation should always
98 # pass an emoji_code, because of the corner cases discussed in
99 # the long block comments elsewhere in this file. However, to
100 # make it easy for simple API clients to use the reactions API
101 # without needing the mapping between emoji names and codes,
102 # we allow instead passing the emoji_name and looking up the
103 # corresponding code using the current data.
104 emoji_code = emoji_name_to_emoji_code(message.sender.realm, emoji_name)[0]
105
106 if not Reaction.objects.filter(user_profile=user_profile,
107 message=message,
108 emoji_code=emoji_code,
109 reaction_type=reaction_type).exists():
110 raise JsonableError(_("Reaction doesn't exist."))
111
112 # Unlike adding reactions, while deleting a reaction, we don't
113 # check whether the provided (emoji_type, emoji_code) pair is
114 # valid in this realm. Since there's a row in the database, we
115 # know it was valid when the user added their reaction in the
116 # first place, so it is safe to just remove the reaction if it
117 # exists. And the (reaction_type, emoji_code) pair may no longer be
118 # valid in legitimate situations (e.g. if a realm emoji was
119 # deactivated by an administrator in the meantime).
120 do_remove_reaction(user_profile, message, emoji_code, reaction_type)
121
122 return json_success()
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/views/reactions.py b/zerver/views/reactions.py
--- a/zerver/views/reactions.py
+++ b/zerver/views/reactions.py
@@ -73,7 +73,7 @@
# Otherwise, use the name provided in this request, but verify
# it is valid in the user's realm (e.g. not a deactivated
# realm emoji).
- check_emoji_request(message.sender.realm, emoji_name,
+ check_emoji_request(user_profile.realm, emoji_name,
emoji_code, reaction_type)
if user_message is None:
| {"golden_diff": "diff --git a/zerver/views/reactions.py b/zerver/views/reactions.py\n--- a/zerver/views/reactions.py\n+++ b/zerver/views/reactions.py\n@@ -73,7 +73,7 @@\n # Otherwise, use the name provided in this request, but verify\n # it is valid in the user's realm (e.g. not a deactivated\n # realm emoji).\n- check_emoji_request(message.sender.realm, emoji_name,\n+ check_emoji_request(user_profile.realm, emoji_name,\n emoji_code, reaction_type)\n \n if user_message is None:\n", "issue": "Cannot react with a custom emoji to a bot message \nAttempting to react to a bot message with a custom emoji appears to work but if you refresh the page it's gone. Inspecting the network requests reveals that the request to add the reaction fails with a 400 Bad Request error `Reaction doesn't exist.`.\r\n\r\nThis can be easily reproduced with the Notification Bot. Note that the `zulip` reaction works since it's not really a custom emoji, but you can reproduce the problem with any other custom emoji.\n", "before_files": [{"content": "from typing import Optional\n\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.translation import ugettext as _\n\nfrom zerver.decorator import REQ, has_request_variables\nfrom zerver.lib.actions import do_add_reaction, do_remove_reaction\nfrom zerver.lib.emoji import check_emoji_request, emoji_name_to_emoji_code\nfrom zerver.lib.message import access_message\nfrom zerver.lib.request import JsonableError\nfrom zerver.lib.response import json_success\nfrom zerver.models import Message, Reaction, UserMessage, UserProfile\n\n\ndef create_historical_message(user_profile: UserProfile, message: Message) -> None:\n # Users can see and react to messages sent to streams they\n # were not a subscriber to; in order to receive events for\n # those, we give the user a `historical` UserMessage objects\n # for the message. This is the same trick we use for starring\n # messages.\n UserMessage.objects.create(user_profile=user_profile,\n message=message,\n flags=UserMessage.flags.historical | UserMessage.flags.read)\n\n@has_request_variables\ndef add_reaction(request: HttpRequest, user_profile: UserProfile, message_id: int,\n emoji_name: str=REQ(),\n emoji_code: Optional[str]=REQ(default=None),\n reaction_type: Optional[str]=REQ(default=None)) -> HttpResponse:\n message, user_message = access_message(user_profile, message_id)\n\n if emoji_code is None:\n # The emoji_code argument is only required for rare corner\n # cases discussed in the long block comment below. For simple\n # API clients, we allow specifying just the name, and just\n # look up the code using the current name->code mapping.\n emoji_code = emoji_name_to_emoji_code(message.sender.realm,\n emoji_name)[0]\n\n if reaction_type is None:\n reaction_type = emoji_name_to_emoji_code(message.sender.realm,\n emoji_name)[1]\n\n if Reaction.objects.filter(user_profile=user_profile,\n message=message,\n emoji_code=emoji_code,\n reaction_type=reaction_type).exists():\n raise JsonableError(_(\"Reaction already exists.\"))\n\n query = Reaction.objects.filter(message=message,\n emoji_code=emoji_code,\n reaction_type=reaction_type)\n if query.exists():\n # If another user has already reacted to this message with\n # same emoji code, we treat the new reaction as a vote for the\n # existing reaction. So the emoji name used by that earlier\n # reaction takes precedence over whatever was passed in this\n # request. This is necessary to avoid a message having 2\n # \"different\" emoji reactions with the same emoji code (and\n # thus same image) on the same message, which looks ugly.\n #\n # In this \"voting for an existing reaction\" case, we shouldn't\n # check whether the emoji code and emoji name match, since\n # it's possible that the (emoji_type, emoji_name, emoji_code)\n # triple for this existing rection xmay not pass validation\n # now (e.g. because it is for a realm emoji that has been\n # since deactivated). We still want to allow users to add a\n # vote any old reaction they see in the UI even if that is a\n # deactivated custom emoji, so we just use the emoji name from\n # the existing reaction with no further validation.\n emoji_name = query.first().emoji_name\n else:\n # Otherwise, use the name provided in this request, but verify\n # it is valid in the user's realm (e.g. not a deactivated\n # realm emoji).\n check_emoji_request(message.sender.realm, emoji_name,\n emoji_code, reaction_type)\n\n if user_message is None:\n create_historical_message(user_profile, message)\n\n do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type)\n\n return json_success()\n\n@has_request_variables\ndef remove_reaction(request: HttpRequest, user_profile: UserProfile, message_id: int,\n emoji_name: Optional[str]=REQ(default=None),\n emoji_code: Optional[str]=REQ(default=None),\n reaction_type: str=REQ(default=\"unicode_emoji\")) -> HttpResponse:\n message, user_message = access_message(user_profile, message_id)\n\n if emoji_code is None:\n if emoji_name is None:\n raise JsonableError(_('At least one of the following arguments '\n 'must be present: emoji_name, emoji_code'))\n # A correct full Zulip client implementation should always\n # pass an emoji_code, because of the corner cases discussed in\n # the long block comments elsewhere in this file. However, to\n # make it easy for simple API clients to use the reactions API\n # without needing the mapping between emoji names and codes,\n # we allow instead passing the emoji_name and looking up the\n # corresponding code using the current data.\n emoji_code = emoji_name_to_emoji_code(message.sender.realm, emoji_name)[0]\n\n if not Reaction.objects.filter(user_profile=user_profile,\n message=message,\n emoji_code=emoji_code,\n reaction_type=reaction_type).exists():\n raise JsonableError(_(\"Reaction doesn't exist.\"))\n\n # Unlike adding reactions, while deleting a reaction, we don't\n # check whether the provided (emoji_type, emoji_code) pair is\n # valid in this realm. Since there's a row in the database, we\n # know it was valid when the user added their reaction in the\n # first place, so it is safe to just remove the reaction if it\n # exists. And the (reaction_type, emoji_code) pair may no longer be\n # valid in legitimate situations (e.g. if a realm emoji was\n # deactivated by an administrator in the meantime).\n do_remove_reaction(user_profile, message, emoji_code, reaction_type)\n\n return json_success()\n", "path": "zerver/views/reactions.py"}], "after_files": [{"content": "from typing import Optional\n\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.translation import ugettext as _\n\nfrom zerver.decorator import REQ, has_request_variables\nfrom zerver.lib.actions import do_add_reaction, do_remove_reaction\nfrom zerver.lib.emoji import check_emoji_request, emoji_name_to_emoji_code\nfrom zerver.lib.message import access_message\nfrom zerver.lib.request import JsonableError\nfrom zerver.lib.response import json_success\nfrom zerver.models import Message, Reaction, UserMessage, UserProfile\n\n\ndef create_historical_message(user_profile: UserProfile, message: Message) -> None:\n # Users can see and react to messages sent to streams they\n # were not a subscriber to; in order to receive events for\n # those, we give the user a `historical` UserMessage objects\n # for the message. This is the same trick we use for starring\n # messages.\n UserMessage.objects.create(user_profile=user_profile,\n message=message,\n flags=UserMessage.flags.historical | UserMessage.flags.read)\n\n@has_request_variables\ndef add_reaction(request: HttpRequest, user_profile: UserProfile, message_id: int,\n emoji_name: str=REQ(),\n emoji_code: Optional[str]=REQ(default=None),\n reaction_type: Optional[str]=REQ(default=None)) -> HttpResponse:\n message, user_message = access_message(user_profile, message_id)\n\n if emoji_code is None:\n # The emoji_code argument is only required for rare corner\n # cases discussed in the long block comment below. For simple\n # API clients, we allow specifying just the name, and just\n # look up the code using the current name->code mapping.\n emoji_code = emoji_name_to_emoji_code(message.sender.realm,\n emoji_name)[0]\n\n if reaction_type is None:\n reaction_type = emoji_name_to_emoji_code(message.sender.realm,\n emoji_name)[1]\n\n if Reaction.objects.filter(user_profile=user_profile,\n message=message,\n emoji_code=emoji_code,\n reaction_type=reaction_type).exists():\n raise JsonableError(_(\"Reaction already exists.\"))\n\n query = Reaction.objects.filter(message=message,\n emoji_code=emoji_code,\n reaction_type=reaction_type)\n if query.exists():\n # If another user has already reacted to this message with\n # same emoji code, we treat the new reaction as a vote for the\n # existing reaction. So the emoji name used by that earlier\n # reaction takes precedence over whatever was passed in this\n # request. This is necessary to avoid a message having 2\n # \"different\" emoji reactions with the same emoji code (and\n # thus same image) on the same message, which looks ugly.\n #\n # In this \"voting for an existing reaction\" case, we shouldn't\n # check whether the emoji code and emoji name match, since\n # it's possible that the (emoji_type, emoji_name, emoji_code)\n # triple for this existing rection xmay not pass validation\n # now (e.g. because it is for a realm emoji that has been\n # since deactivated). We still want to allow users to add a\n # vote any old reaction they see in the UI even if that is a\n # deactivated custom emoji, so we just use the emoji name from\n # the existing reaction with no further validation.\n emoji_name = query.first().emoji_name\n else:\n # Otherwise, use the name provided in this request, but verify\n # it is valid in the user's realm (e.g. not a deactivated\n # realm emoji).\n check_emoji_request(user_profile.realm, emoji_name,\n emoji_code, reaction_type)\n\n if user_message is None:\n create_historical_message(user_profile, message)\n\n do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type)\n\n return json_success()\n\n@has_request_variables\ndef remove_reaction(request: HttpRequest, user_profile: UserProfile, message_id: int,\n emoji_name: Optional[str]=REQ(default=None),\n emoji_code: Optional[str]=REQ(default=None),\n reaction_type: str=REQ(default=\"unicode_emoji\")) -> HttpResponse:\n message, user_message = access_message(user_profile, message_id)\n\n if emoji_code is None:\n if emoji_name is None:\n raise JsonableError(_('At least one of the following arguments '\n 'must be present: emoji_name, emoji_code'))\n # A correct full Zulip client implementation should always\n # pass an emoji_code, because of the corner cases discussed in\n # the long block comments elsewhere in this file. However, to\n # make it easy for simple API clients to use the reactions API\n # without needing the mapping between emoji names and codes,\n # we allow instead passing the emoji_name and looking up the\n # corresponding code using the current data.\n emoji_code = emoji_name_to_emoji_code(message.sender.realm, emoji_name)[0]\n\n if not Reaction.objects.filter(user_profile=user_profile,\n message=message,\n emoji_code=emoji_code,\n reaction_type=reaction_type).exists():\n raise JsonableError(_(\"Reaction doesn't exist.\"))\n\n # Unlike adding reactions, while deleting a reaction, we don't\n # check whether the provided (emoji_type, emoji_code) pair is\n # valid in this realm. Since there's a row in the database, we\n # know it was valid when the user added their reaction in the\n # first place, so it is safe to just remove the reaction if it\n # exists. And the (reaction_type, emoji_code) pair may no longer be\n # valid in legitimate situations (e.g. if a realm emoji was\n # deactivated by an administrator in the meantime).\n do_remove_reaction(user_profile, message, emoji_code, reaction_type)\n\n return json_success()\n", "path": "zerver/views/reactions.py"}]} | 1,885 | 127 |
gh_patches_debug_42501 | rasdani/github-patches | git_diff | arviz-devs__arviz-847 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
from_tfp with multiple chains?
How should `from_tfp` be used with multiple chains? It looks like it is only compatible with one chain, and it treats the different chains as different variables.
## Example
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow_probability.python.edward2 as ed
import numpy as np
import arviz as az
dtype = np.float32
def unnormalized_log_prob(x):
return -x**2.
samples_gauss, _ = tfp.mcmc.sample_chain(
num_results=1000,
current_state=np.ones(20, dtype=dtype),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
unnormalized_log_prob,
step_size=1.0,
num_leapfrog_steps=3),
num_burnin_steps=500,
parallel_iterations=4)
with tf.Session() as sess:
[samples_gauss_, ] = sess.run([samples_gauss, ])
az.from_tfp([samples_gauss_]).posterior
# It is expected to be a list with a variable (array) at each position
#### Output
<xarray.Dataset>
Dimensions: (chain: 1, draw: 1000, var_0_dim_0: 20)
Coordinates:
* chain (chain) int64 0
* draw (draw) int64 0 1 2 3 4 5 6 7 ... 993 994 995 996 997 998 999
* var_0_dim_0 (var_0_dim_0) int64 0 1 2 3 4 5 6 7 ... 12 13 14 15 16 17 18 19
Data variables:
var_0 (chain, draw, var_0_dim_0) float32 0.38988703 ... -0.04516393
Attributes:
...
### Workaround
My array has dimensions (ndraws, nchains), therefore simply transposing the array allows to call:
from_array = az.convert_to_inference_data(samples_gauss_.T)
az.plot_trace(from_array);
#### Output
Which as it can be seen, works properly.

I don't know if I am missing how to use the coords or dims parameter to somehow sort this out or if it is an implementation detail.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/data/io_tfp.py`
Content:
```
1 """Tfp-specific conversion code."""
2 import numpy as np
3 import xarray as xr
4
5 from .inference_data import InferenceData
6 from .base import dict_to_dataset, generate_dims_coords, make_attrs
7 from .. import utils
8
9 # pylint: disable=too-many-instance-attributes
10 class TfpConverter:
11 """Encapsulate tfp specific logic."""
12
13 def __init__(
14 self,
15 *,
16 posterior,
17 var_names=None,
18 model_fn=None,
19 feed_dict=None,
20 posterior_predictive_samples=100,
21 posterior_predictive_size=1,
22 observed=None,
23 coords=None,
24 dims=None
25 ):
26
27 self.posterior = posterior
28
29 if var_names is None:
30 self.var_names = []
31 for i in range(0, len(posterior)):
32 self.var_names.append("var_{0}".format(i))
33 else:
34 self.var_names = var_names
35
36 self.model_fn = model_fn
37 self.feed_dict = feed_dict
38 self.posterior_predictive_samples = posterior_predictive_samples
39 self.posterior_predictive_size = posterior_predictive_size
40 self.observed = observed
41 self.coords = coords
42 self.dims = dims
43
44 import tensorflow_probability as tfp
45 import tensorflow as tf
46 import tensorflow_probability.python.edward2 as ed
47
48 self.tfp = tfp
49 self.tf = tf # pylint: disable=invalid-name
50 self.ed = ed # pylint: disable=invalid-name
51
52 if int(self.tf.__version__[0]) > 1:
53 import tensorflow.compat.v1 as tf # pylint: disable=import-error
54
55 tf.disable_v2_behavior()
56 self.tf = tf # pylint: disable=invalid-name
57
58 def posterior_to_xarray(self):
59 """Convert the posterior to an xarray dataset."""
60 data = {}
61 for i, var_name in enumerate(self.var_names):
62 data[var_name] = utils.expand_dims(self.posterior[i])
63 return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=self.dims)
64
65 def observed_data_to_xarray(self):
66 """Convert observed data to xarray."""
67 if self.observed is None:
68 return None
69
70 observed_data = {}
71 if isinstance(self.observed, self.tf.Tensor):
72 with self.tf.Session() as sess:
73 vals = sess.run(self.observed, feed_dict=self.feed_dict)
74 else:
75 vals = self.observed
76
77 if self.dims is None:
78 dims = {}
79 else:
80 dims = self.dims
81
82 name = "obs"
83 val_dims = dims.get(name)
84 vals = utils.one_de(vals)
85 val_dims, coords = generate_dims_coords(vals.shape, name, dims=val_dims, coords=self.coords)
86 # coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}
87
88 observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)
89 return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.tfp))
90
91 def _value_setter(self, variables):
92 def interceptor(rv_constructor, *rv_args, **rv_kwargs):
93 """Replace prior on effects with empirical posterior mean from MCMC."""
94 name = rv_kwargs.pop("name")
95 if name in variables:
96 rv_kwargs["value"] = variables[name]
97 return rv_constructor(*rv_args, **rv_kwargs)
98
99 return interceptor
100
101 def posterior_predictive_to_xarray(self):
102 """Convert posterior_predictive samples to xarray."""
103 if self.model_fn is None:
104 return None
105
106 posterior_preds = []
107 sample_size = self.posterior[0].shape[0]
108
109 for i in np.arange(0, sample_size, int(sample_size / self.posterior_predictive_samples)):
110 variables = {}
111 for var_i, var_name in enumerate(self.var_names):
112 variables[var_name] = self.posterior[var_i][i]
113
114 with self.ed.interception(self._value_setter(variables)):
115 if self.posterior_predictive_size > 1:
116 posterior_preds.append(
117 [self.model_fn() for _ in range(self.posterior_predictive_size)]
118 )
119 else:
120 posterior_preds.append(self.model_fn())
121
122 data = {}
123 with self.tf.Session() as sess:
124 data["obs"] = utils.expand_dims(sess.run(posterior_preds, feed_dict=self.feed_dict))
125 return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=self.dims)
126
127 def sample_stats_to_xarray(self):
128 """Extract sample_stats from tfp trace."""
129 if self.model_fn is None or self.observed is None:
130 return None
131
132 log_likelihood = []
133 sample_size = self.posterior[0].shape[0]
134
135 for i in range(sample_size):
136 variables = {}
137 for var_i, var_name in enumerate(self.var_names):
138 variables[var_name] = self.posterior[var_i][i]
139
140 with self.ed.interception(self._value_setter(variables)):
141 log_likelihood.append((self.model_fn().distribution.log_prob(self.observed)))
142
143 data = {}
144 if self.dims is not None:
145 coord_name = self.dims.get("obs")
146 else:
147 coord_name = None
148 dims = {"log_likelihood": coord_name}
149
150 with self.tf.Session() as sess:
151 data["log_likelihood"] = utils.expand_dims(
152 sess.run(log_likelihood, feed_dict=self.feed_dict)
153 )
154 return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=dims)
155
156 def to_inference_data(self):
157 """Convert all available data to an InferenceData object.
158
159 Note that if groups can not be created (i.e., there is no `trace`, so
160 the `posterior` and `sample_stats` can not be extracted), then the InferenceData
161 will not have those groups.
162 """
163 return InferenceData(
164 **{
165 "posterior": self.posterior_to_xarray(),
166 "sample_stats": self.sample_stats_to_xarray(),
167 "posterior_predictive": self.posterior_predictive_to_xarray(),
168 "observed_data": self.observed_data_to_xarray(),
169 }
170 )
171
172
173 def from_tfp(
174 posterior=None,
175 *,
176 var_names=None,
177 model_fn=None,
178 feed_dict=None,
179 posterior_predictive_samples=100,
180 posterior_predictive_size=1,
181 observed=None,
182 coords=None,
183 dims=None
184 ):
185 """Convert tfp data into an InferenceData object."""
186 return TfpConverter(
187 posterior=posterior,
188 var_names=var_names,
189 model_fn=model_fn,
190 feed_dict=feed_dict,
191 posterior_predictive_samples=posterior_predictive_samples,
192 posterior_predictive_size=posterior_predictive_size,
193 observed=observed,
194 coords=coords,
195 dims=dims,
196 ).to_inference_data()
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/data/io_tfp.py b/arviz/data/io_tfp.py
--- a/arviz/data/io_tfp.py
+++ b/arviz/data/io_tfp.py
@@ -19,6 +19,7 @@
feed_dict=None,
posterior_predictive_samples=100,
posterior_predictive_size=1,
+ chain_dim=None,
observed=None,
coords=None,
dims=None
@@ -38,6 +39,7 @@
self.posterior_predictive_samples = posterior_predictive_samples
self.posterior_predictive_size = posterior_predictive_size
self.observed = observed
+ self.chain_dim = chain_dim
self.coords = coords
self.dims = dims
@@ -55,11 +57,20 @@
tf.disable_v2_behavior()
self.tf = tf # pylint: disable=invalid-name
+ def handle_chain_location(self, ary):
+ """Move the axis corresponding to the chain to first position.
+
+ If there is only one chain which has no axis, add it.
+ """
+ if self.chain_dim is None:
+ return utils.expand_dims(ary)
+ return ary.swapaxes(0, self.chain_dim)
+
def posterior_to_xarray(self):
"""Convert the posterior to an xarray dataset."""
data = {}
for i, var_name in enumerate(self.var_names):
- data[var_name] = utils.expand_dims(self.posterior[i])
+ data[var_name] = self.handle_chain_location(self.posterior[i])
return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=self.dims)
def observed_data_to_xarray(self):
@@ -121,7 +132,9 @@
data = {}
with self.tf.Session() as sess:
- data["obs"] = utils.expand_dims(sess.run(posterior_preds, feed_dict=self.feed_dict))
+ data["obs"] = self.handle_chain_location(
+ sess.run(posterior_preds, feed_dict=self.feed_dict)
+ )
return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=self.dims)
def sample_stats_to_xarray(self):
@@ -148,7 +161,7 @@
dims = {"log_likelihood": coord_name}
with self.tf.Session() as sess:
- data["log_likelihood"] = utils.expand_dims(
+ data["log_likelihood"] = self.handle_chain_location(
sess.run(log_likelihood, feed_dict=self.feed_dict)
)
return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=dims)
@@ -178,6 +191,7 @@
feed_dict=None,
posterior_predictive_samples=100,
posterior_predictive_size=1,
+ chain_dim=None,
observed=None,
coords=None,
dims=None
@@ -190,6 +204,7 @@
feed_dict=feed_dict,
posterior_predictive_samples=posterior_predictive_samples,
posterior_predictive_size=posterior_predictive_size,
+ chain_dim=chain_dim,
observed=observed,
coords=coords,
dims=dims,
| {"golden_diff": "diff --git a/arviz/data/io_tfp.py b/arviz/data/io_tfp.py\n--- a/arviz/data/io_tfp.py\n+++ b/arviz/data/io_tfp.py\n@@ -19,6 +19,7 @@\n feed_dict=None,\r\n posterior_predictive_samples=100,\r\n posterior_predictive_size=1,\r\n+ chain_dim=None,\r\n observed=None,\r\n coords=None,\r\n dims=None\r\n@@ -38,6 +39,7 @@\n self.posterior_predictive_samples = posterior_predictive_samples\r\n self.posterior_predictive_size = posterior_predictive_size\r\n self.observed = observed\r\n+ self.chain_dim = chain_dim\r\n self.coords = coords\r\n self.dims = dims\r\n \r\n@@ -55,11 +57,20 @@\n tf.disable_v2_behavior()\r\n self.tf = tf # pylint: disable=invalid-name\r\n \r\n+ def handle_chain_location(self, ary):\r\n+ \"\"\"Move the axis corresponding to the chain to first position.\r\n+\r\n+ If there is only one chain which has no axis, add it.\r\n+ \"\"\"\r\n+ if self.chain_dim is None:\r\n+ return utils.expand_dims(ary)\r\n+ return ary.swapaxes(0, self.chain_dim)\r\n+\r\n def posterior_to_xarray(self):\r\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\r\n data = {}\r\n for i, var_name in enumerate(self.var_names):\r\n- data[var_name] = utils.expand_dims(self.posterior[i])\r\n+ data[var_name] = self.handle_chain_location(self.posterior[i])\r\n return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=self.dims)\r\n \r\n def observed_data_to_xarray(self):\r\n@@ -121,7 +132,9 @@\n \r\n data = {}\r\n with self.tf.Session() as sess:\r\n- data[\"obs\"] = utils.expand_dims(sess.run(posterior_preds, feed_dict=self.feed_dict))\r\n+ data[\"obs\"] = self.handle_chain_location(\r\n+ sess.run(posterior_preds, feed_dict=self.feed_dict)\r\n+ )\r\n return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=self.dims)\r\n \r\n def sample_stats_to_xarray(self):\r\n@@ -148,7 +161,7 @@\n dims = {\"log_likelihood\": coord_name}\r\n \r\n with self.tf.Session() as sess:\r\n- data[\"log_likelihood\"] = utils.expand_dims(\r\n+ data[\"log_likelihood\"] = self.handle_chain_location(\r\n sess.run(log_likelihood, feed_dict=self.feed_dict)\r\n )\r\n return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=dims)\r\n@@ -178,6 +191,7 @@\n feed_dict=None,\r\n posterior_predictive_samples=100,\r\n posterior_predictive_size=1,\r\n+ chain_dim=None,\r\n observed=None,\r\n coords=None,\r\n dims=None\r\n@@ -190,6 +204,7 @@\n feed_dict=feed_dict,\r\n posterior_predictive_samples=posterior_predictive_samples,\r\n posterior_predictive_size=posterior_predictive_size,\r\n+ chain_dim=chain_dim,\r\n observed=observed,\r\n coords=coords,\r\n dims=dims,\n", "issue": "from_tfp with multiple chains?\nHow should `from_tfp` be used with multiple chains? It looks like it is only compatible with one chain, and it treats the different chains as different variables.\r\n\r\n## Example\r\n\r\n import tensorflow as tf\r\n import tensorflow_probability as tfp\r\n import tensorflow_probability.python.edward2 as ed\r\n import numpy as np\r\n import arviz as az\r\n dtype = np.float32\r\n\r\n def unnormalized_log_prob(x):\r\n return -x**2.\r\n\r\n samples_gauss, _ = tfp.mcmc.sample_chain(\r\n num_results=1000,\r\n current_state=np.ones(20, dtype=dtype),\r\n kernel=tfp.mcmc.HamiltonianMonteCarlo(\r\n unnormalized_log_prob,\r\n step_size=1.0,\r\n num_leapfrog_steps=3),\r\n num_burnin_steps=500,\r\n parallel_iterations=4) \r\n\r\n with tf.Session() as sess:\r\n [samples_gauss_, ] = sess.run([samples_gauss, ])\r\n\r\n az.from_tfp([samples_gauss_]).posterior \r\n # It is expected to be a list with a variable (array) at each position\r\n\r\n#### Output\r\n <xarray.Dataset>\r\n Dimensions: (chain: 1, draw: 1000, var_0_dim_0: 20)\r\n Coordinates:\r\n * chain (chain) int64 0\r\n * draw (draw) int64 0 1 2 3 4 5 6 7 ... 993 994 995 996 997 998 999\r\n * var_0_dim_0 (var_0_dim_0) int64 0 1 2 3 4 5 6 7 ... 12 13 14 15 16 17 18 19\r\n Data variables:\r\n var_0 (chain, draw, var_0_dim_0) float32 0.38988703 ... -0.04516393\r\n Attributes:\r\n ...\r\n\r\n### Workaround\r\nMy array has dimensions (ndraws, nchains), therefore simply transposing the array allows to call:\r\n\r\n from_array = az.convert_to_inference_data(samples_gauss_.T)\r\n az.plot_trace(from_array);\r\n\r\n#### Output\r\nWhich as it can be seen, works properly.\r\n\r\n\r\n\r\nI don't know if I am missing how to use the coords or dims parameter to somehow sort this out or if it is an implementation detail.\r\n\n", "before_files": [{"content": "\"\"\"Tfp-specific conversion code.\"\"\"\r\nimport numpy as np\r\nimport xarray as xr\r\n\r\nfrom .inference_data import InferenceData\r\nfrom .base import dict_to_dataset, generate_dims_coords, make_attrs\r\nfrom .. import utils\r\n\r\n# pylint: disable=too-many-instance-attributes\r\nclass TfpConverter:\r\n \"\"\"Encapsulate tfp specific logic.\"\"\"\r\n\r\n def __init__(\r\n self,\r\n *,\r\n posterior,\r\n var_names=None,\r\n model_fn=None,\r\n feed_dict=None,\r\n posterior_predictive_samples=100,\r\n posterior_predictive_size=1,\r\n observed=None,\r\n coords=None,\r\n dims=None\r\n ):\r\n\r\n self.posterior = posterior\r\n\r\n if var_names is None:\r\n self.var_names = []\r\n for i in range(0, len(posterior)):\r\n self.var_names.append(\"var_{0}\".format(i))\r\n else:\r\n self.var_names = var_names\r\n\r\n self.model_fn = model_fn\r\n self.feed_dict = feed_dict\r\n self.posterior_predictive_samples = posterior_predictive_samples\r\n self.posterior_predictive_size = posterior_predictive_size\r\n self.observed = observed\r\n self.coords = coords\r\n self.dims = dims\r\n\r\n import tensorflow_probability as tfp\r\n import tensorflow as tf\r\n import tensorflow_probability.python.edward2 as ed\r\n\r\n self.tfp = tfp\r\n self.tf = tf # pylint: disable=invalid-name\r\n self.ed = ed # pylint: disable=invalid-name\r\n\r\n if int(self.tf.__version__[0]) > 1:\r\n import tensorflow.compat.v1 as tf # pylint: disable=import-error\r\n\r\n tf.disable_v2_behavior()\r\n self.tf = tf # pylint: disable=invalid-name\r\n\r\n def posterior_to_xarray(self):\r\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\r\n data = {}\r\n for i, var_name in enumerate(self.var_names):\r\n data[var_name] = utils.expand_dims(self.posterior[i])\r\n return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=self.dims)\r\n\r\n def observed_data_to_xarray(self):\r\n \"\"\"Convert observed data to xarray.\"\"\"\r\n if self.observed is None:\r\n return None\r\n\r\n observed_data = {}\r\n if isinstance(self.observed, self.tf.Tensor):\r\n with self.tf.Session() as sess:\r\n vals = sess.run(self.observed, feed_dict=self.feed_dict)\r\n else:\r\n vals = self.observed\r\n\r\n if self.dims is None:\r\n dims = {}\r\n else:\r\n dims = self.dims\r\n\r\n name = \"obs\"\r\n val_dims = dims.get(name)\r\n vals = utils.one_de(vals)\r\n val_dims, coords = generate_dims_coords(vals.shape, name, dims=val_dims, coords=self.coords)\r\n # coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}\r\n\r\n observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)\r\n return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.tfp))\r\n\r\n def _value_setter(self, variables):\r\n def interceptor(rv_constructor, *rv_args, **rv_kwargs):\r\n \"\"\"Replace prior on effects with empirical posterior mean from MCMC.\"\"\"\r\n name = rv_kwargs.pop(\"name\")\r\n if name in variables:\r\n rv_kwargs[\"value\"] = variables[name]\r\n return rv_constructor(*rv_args, **rv_kwargs)\r\n\r\n return interceptor\r\n\r\n def posterior_predictive_to_xarray(self):\r\n \"\"\"Convert posterior_predictive samples to xarray.\"\"\"\r\n if self.model_fn is None:\r\n return None\r\n\r\n posterior_preds = []\r\n sample_size = self.posterior[0].shape[0]\r\n\r\n for i in np.arange(0, sample_size, int(sample_size / self.posterior_predictive_samples)):\r\n variables = {}\r\n for var_i, var_name in enumerate(self.var_names):\r\n variables[var_name] = self.posterior[var_i][i]\r\n\r\n with self.ed.interception(self._value_setter(variables)):\r\n if self.posterior_predictive_size > 1:\r\n posterior_preds.append(\r\n [self.model_fn() for _ in range(self.posterior_predictive_size)]\r\n )\r\n else:\r\n posterior_preds.append(self.model_fn())\r\n\r\n data = {}\r\n with self.tf.Session() as sess:\r\n data[\"obs\"] = utils.expand_dims(sess.run(posterior_preds, feed_dict=self.feed_dict))\r\n return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=self.dims)\r\n\r\n def sample_stats_to_xarray(self):\r\n \"\"\"Extract sample_stats from tfp trace.\"\"\"\r\n if self.model_fn is None or self.observed is None:\r\n return None\r\n\r\n log_likelihood = []\r\n sample_size = self.posterior[0].shape[0]\r\n\r\n for i in range(sample_size):\r\n variables = {}\r\n for var_i, var_name in enumerate(self.var_names):\r\n variables[var_name] = self.posterior[var_i][i]\r\n\r\n with self.ed.interception(self._value_setter(variables)):\r\n log_likelihood.append((self.model_fn().distribution.log_prob(self.observed)))\r\n\r\n data = {}\r\n if self.dims is not None:\r\n coord_name = self.dims.get(\"obs\")\r\n else:\r\n coord_name = None\r\n dims = {\"log_likelihood\": coord_name}\r\n\r\n with self.tf.Session() as sess:\r\n data[\"log_likelihood\"] = utils.expand_dims(\r\n sess.run(log_likelihood, feed_dict=self.feed_dict)\r\n )\r\n return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=dims)\r\n\r\n def to_inference_data(self):\r\n \"\"\"Convert all available data to an InferenceData object.\r\n\r\n Note that if groups can not be created (i.e., there is no `trace`, so\r\n the `posterior` and `sample_stats` can not be extracted), then the InferenceData\r\n will not have those groups.\r\n \"\"\"\r\n return InferenceData(\r\n **{\r\n \"posterior\": self.posterior_to_xarray(),\r\n \"sample_stats\": self.sample_stats_to_xarray(),\r\n \"posterior_predictive\": self.posterior_predictive_to_xarray(),\r\n \"observed_data\": self.observed_data_to_xarray(),\r\n }\r\n )\r\n\r\n\r\ndef from_tfp(\r\n posterior=None,\r\n *,\r\n var_names=None,\r\n model_fn=None,\r\n feed_dict=None,\r\n posterior_predictive_samples=100,\r\n posterior_predictive_size=1,\r\n observed=None,\r\n coords=None,\r\n dims=None\r\n):\r\n \"\"\"Convert tfp data into an InferenceData object.\"\"\"\r\n return TfpConverter(\r\n posterior=posterior,\r\n var_names=var_names,\r\n model_fn=model_fn,\r\n feed_dict=feed_dict,\r\n posterior_predictive_samples=posterior_predictive_samples,\r\n posterior_predictive_size=posterior_predictive_size,\r\n observed=observed,\r\n coords=coords,\r\n dims=dims,\r\n ).to_inference_data()\r\n", "path": "arviz/data/io_tfp.py"}], "after_files": [{"content": "\"\"\"Tfp-specific conversion code.\"\"\"\r\nimport numpy as np\r\nimport xarray as xr\r\n\r\nfrom .inference_data import InferenceData\r\nfrom .base import dict_to_dataset, generate_dims_coords, make_attrs\r\nfrom .. import utils\r\n\r\n# pylint: disable=too-many-instance-attributes\r\nclass TfpConverter:\r\n \"\"\"Encapsulate tfp specific logic.\"\"\"\r\n\r\n def __init__(\r\n self,\r\n *,\r\n posterior,\r\n var_names=None,\r\n model_fn=None,\r\n feed_dict=None,\r\n posterior_predictive_samples=100,\r\n posterior_predictive_size=1,\r\n chain_dim=None,\r\n observed=None,\r\n coords=None,\r\n dims=None\r\n ):\r\n\r\n self.posterior = posterior\r\n\r\n if var_names is None:\r\n self.var_names = []\r\n for i in range(0, len(posterior)):\r\n self.var_names.append(\"var_{0}\".format(i))\r\n else:\r\n self.var_names = var_names\r\n\r\n self.model_fn = model_fn\r\n self.feed_dict = feed_dict\r\n self.posterior_predictive_samples = posterior_predictive_samples\r\n self.posterior_predictive_size = posterior_predictive_size\r\n self.observed = observed\r\n self.chain_dim = chain_dim\r\n self.coords = coords\r\n self.dims = dims\r\n\r\n import tensorflow_probability as tfp\r\n import tensorflow as tf\r\n import tensorflow_probability.python.edward2 as ed\r\n\r\n self.tfp = tfp\r\n self.tf = tf # pylint: disable=invalid-name\r\n self.ed = ed # pylint: disable=invalid-name\r\n\r\n if int(self.tf.__version__[0]) > 1:\r\n import tensorflow.compat.v1 as tf # pylint: disable=import-error\r\n\r\n tf.disable_v2_behavior()\r\n self.tf = tf # pylint: disable=invalid-name\r\n\r\n def handle_chain_location(self, ary):\r\n \"\"\"Move the axis corresponding to the chain to first position.\r\n\r\n If there is only one chain which has no axis, add it.\r\n \"\"\"\r\n if self.chain_dim is None:\r\n return utils.expand_dims(ary)\r\n return ary.swapaxes(0, self.chain_dim)\r\n\r\n def posterior_to_xarray(self):\r\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\r\n data = {}\r\n for i, var_name in enumerate(self.var_names):\r\n data[var_name] = self.handle_chain_location(self.posterior[i])\r\n return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=self.dims)\r\n\r\n def observed_data_to_xarray(self):\r\n \"\"\"Convert observed data to xarray.\"\"\"\r\n if self.observed is None:\r\n return None\r\n\r\n observed_data = {}\r\n if isinstance(self.observed, self.tf.Tensor):\r\n with self.tf.Session() as sess:\r\n vals = sess.run(self.observed, feed_dict=self.feed_dict)\r\n else:\r\n vals = self.observed\r\n\r\n if self.dims is None:\r\n dims = {}\r\n else:\r\n dims = self.dims\r\n\r\n name = \"obs\"\r\n val_dims = dims.get(name)\r\n vals = utils.one_de(vals)\r\n val_dims, coords = generate_dims_coords(vals.shape, name, dims=val_dims, coords=self.coords)\r\n # coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}\r\n\r\n observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)\r\n return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.tfp))\r\n\r\n def _value_setter(self, variables):\r\n def interceptor(rv_constructor, *rv_args, **rv_kwargs):\r\n \"\"\"Replace prior on effects with empirical posterior mean from MCMC.\"\"\"\r\n name = rv_kwargs.pop(\"name\")\r\n if name in variables:\r\n rv_kwargs[\"value\"] = variables[name]\r\n return rv_constructor(*rv_args, **rv_kwargs)\r\n\r\n return interceptor\r\n\r\n def posterior_predictive_to_xarray(self):\r\n \"\"\"Convert posterior_predictive samples to xarray.\"\"\"\r\n if self.model_fn is None:\r\n return None\r\n\r\n posterior_preds = []\r\n sample_size = self.posterior[0].shape[0]\r\n\r\n for i in np.arange(0, sample_size, int(sample_size / self.posterior_predictive_samples)):\r\n variables = {}\r\n for var_i, var_name in enumerate(self.var_names):\r\n variables[var_name] = self.posterior[var_i][i]\r\n\r\n with self.ed.interception(self._value_setter(variables)):\r\n if self.posterior_predictive_size > 1:\r\n posterior_preds.append(\r\n [self.model_fn() for _ in range(self.posterior_predictive_size)]\r\n )\r\n else:\r\n posterior_preds.append(self.model_fn())\r\n\r\n data = {}\r\n with self.tf.Session() as sess:\r\n data[\"obs\"] = self.handle_chain_location(\r\n sess.run(posterior_preds, feed_dict=self.feed_dict)\r\n )\r\n return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=self.dims)\r\n\r\n def sample_stats_to_xarray(self):\r\n \"\"\"Extract sample_stats from tfp trace.\"\"\"\r\n if self.model_fn is None or self.observed is None:\r\n return None\r\n\r\n log_likelihood = []\r\n sample_size = self.posterior[0].shape[0]\r\n\r\n for i in range(sample_size):\r\n variables = {}\r\n for var_i, var_name in enumerate(self.var_names):\r\n variables[var_name] = self.posterior[var_i][i]\r\n\r\n with self.ed.interception(self._value_setter(variables)):\r\n log_likelihood.append((self.model_fn().distribution.log_prob(self.observed)))\r\n\r\n data = {}\r\n if self.dims is not None:\r\n coord_name = self.dims.get(\"obs\")\r\n else:\r\n coord_name = None\r\n dims = {\"log_likelihood\": coord_name}\r\n\r\n with self.tf.Session() as sess:\r\n data[\"log_likelihood\"] = self.handle_chain_location(\r\n sess.run(log_likelihood, feed_dict=self.feed_dict)\r\n )\r\n return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=dims)\r\n\r\n def to_inference_data(self):\r\n \"\"\"Convert all available data to an InferenceData object.\r\n\r\n Note that if groups can not be created (i.e., there is no `trace`, so\r\n the `posterior` and `sample_stats` can not be extracted), then the InferenceData\r\n will not have those groups.\r\n \"\"\"\r\n return InferenceData(\r\n **{\r\n \"posterior\": self.posterior_to_xarray(),\r\n \"sample_stats\": self.sample_stats_to_xarray(),\r\n \"posterior_predictive\": self.posterior_predictive_to_xarray(),\r\n \"observed_data\": self.observed_data_to_xarray(),\r\n }\r\n )\r\n\r\n\r\ndef from_tfp(\r\n posterior=None,\r\n *,\r\n var_names=None,\r\n model_fn=None,\r\n feed_dict=None,\r\n posterior_predictive_samples=100,\r\n posterior_predictive_size=1,\r\n chain_dim=None,\r\n observed=None,\r\n coords=None,\r\n dims=None\r\n):\r\n \"\"\"Convert tfp data into an InferenceData object.\"\"\"\r\n return TfpConverter(\r\n posterior=posterior,\r\n var_names=var_names,\r\n model_fn=model_fn,\r\n feed_dict=feed_dict,\r\n posterior_predictive_samples=posterior_predictive_samples,\r\n posterior_predictive_size=posterior_predictive_size,\r\n chain_dim=chain_dim,\r\n observed=observed,\r\n coords=coords,\r\n dims=dims,\r\n ).to_inference_data()\r\n", "path": "arviz/data/io_tfp.py"}]} | 2,883 | 700 |
gh_patches_debug_43086 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-3042 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error transforming template when using !GetAtt
### CloudFormation Lint Version
0.85.0
### What operating system are you using?
Ubuntu
### Describe the bug
When I try to validate the template i get following error:
> E0001 Error transforming template: __init__() missing 2 required positional arguments: 'start_mark' and 'end_mark'
aws/template.yml:1:1
When AuthorizeUri is hardcoded the problem disappears.
### Expected behavior
No error is thrown and template is validated.
### Reproduction template
```AWSTemplateFormatVersion: "2010-09-09"
Transform: AWS::Serverless-2016-10-31
Resources:
Api:
Type: "AWS::Serverless::GraphQLApi"
Properties:
Name: SomeApi
SchemaUri: ./schema.graphql
Auth:
Type: AWS_LAMBDA
LambdaAuthorizer:
AuthorizerUri: !GetAtt AppSyncAuthorizerFunction.Arn
AppSyncAuthorizerFunction:
Type: AWS::Serverless::Function
Properties:
CodeUri: ./lambda/
Handler: lambda_authorizer.handle
Runtime: python3.8
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/decode/node.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5
6 import logging
7 from copy import deepcopy
8
9 import regex as re
10
11 LOGGER = logging.getLogger(__name__)
12
13
14 class TemplateAttributeError(AttributeError):
15 """Custom error to capture Attribute Errors in the Template"""
16
17
18 def create_str_node_class(cls):
19 """
20 Create string node class
21 """
22
23 class node_class(cls):
24 """Node class created based on the input class"""
25
26 def __init__(self, x, start_mark, end_mark):
27 try:
28 cls.__init__(self, x)
29 except TypeError:
30 cls.__init__(self)
31 self.start_mark = start_mark
32 self.end_mark = end_mark
33
34 # pylint: disable=bad-classmethod-argument, unused-argument
35 def __new__(self, x, start_mark, end_mark):
36 return cls.__new__(self, x)
37
38 def __getattr__(self, name):
39 raise TemplateAttributeError(f"{self.__class__.__name__}.{name} is invalid")
40
41 def __deepcopy__(self, memo):
42 result = str_node(self, self.start_mark, self.end_mark)
43 memo[id(self)] = result
44 return result
45
46 def __copy__(self):
47 return self
48
49 node_class.__name__ = f"{cls.__name__}_node"
50 return node_class
51
52
53 def create_dict_node_class(cls):
54 """
55 Create dynamic node class
56 """
57
58 class node_class(cls):
59 """Node class created based on the input class"""
60
61 def __init__(self, x, start_mark, end_mark):
62 try:
63 cls.__init__(self, x)
64 except TypeError:
65 cls.__init__(self)
66 self.start_mark = start_mark
67 self.end_mark = end_mark
68 self.condition_functions = ["Fn::If"]
69
70 def __deepcopy__(self, memo):
71 result = dict_node(self, self.start_mark, self.end_mark)
72 memo[id(self)] = result
73 for k, v in self.items():
74 result[deepcopy(k)] = deepcopy(v, memo)
75
76 return result
77
78 def __copy__(self):
79 return self
80
81 def is_function_returning_object(self, mappings=None):
82 """
83 Check if an object is using a function that could return an object
84 Return True when
85 Fn::Select:
86 - 0 # or any number
87 - !FindInMap [mapname, key, value] # or any mapname, key, value
88 Otherwise False
89 """
90 mappings = mappings or {}
91 if len(self) == 1:
92 for k, v in self.items():
93 if k in ["Fn::Select"]:
94 if isinstance(v, list):
95 if len(v) == 2:
96 p_v = v[1]
97 if isinstance(p_v, dict):
98 if len(p_v) == 1:
99 for l_k in p_v.keys():
100 if l_k in ["Fn::FindInMap", "Fn::GetAtt"]:
101 return True
102
103 return False
104
105 def get(self, key, default=None):
106 """Override the default get"""
107 if isinstance(default, dict):
108 default = dict_node(default, self.start_mark, self.end_mark)
109 return super().get(key, default)
110
111 def get_safe(self, key, default=None, path=None, type_t=()):
112 """
113 Get values in format
114 """
115 path = path or []
116
117 if default == {}:
118 default = dict_node({}, self.start_mark, self.end_mark)
119 value = self.get(key, default)
120 if value is None and default is None:
121 # if default is None and value is None return empty list
122 return []
123
124 # if the value is the default make sure that the default value is of type_t when specified
125 if bool(type_t) and value == default and not isinstance(default, type_t):
126 raise ValueError('"default" type should be of "type_t"')
127
128 # when not a dict see if if the value is of the right type
129 results = []
130 if not isinstance(value, (dict)):
131 if isinstance(value, type_t) or not type_t:
132 return [(value, (path[:] + [key]))]
133 else:
134 for sub_v, sub_path in value.items_safe(path + [key]):
135 if isinstance(sub_v, type_t) or not type_t:
136 results.append((sub_v, sub_path))
137
138 return results
139
140 def clean(self):
141 """Clean object to remove any Ref AWS::NoValue"""
142 result = dict_node({}, self.start_mark, self.end_mark)
143 for k, v in self.items():
144 if isinstance(v, dict) and len(v) == 1:
145 if v.get("Ref") == "AWS::NoValue":
146 continue
147 result[k] = v
148 return result
149
150 def items_safe(self, path=None, type_t=()):
151 """Get items while handling IFs"""
152 path = path or []
153 if len(self) == 1:
154 for k, v in self.items():
155 if k == "Fn::If":
156 if isinstance(v, list):
157 if len(v) == 3:
158 for i, if_v in enumerate(v[1:]):
159 if isinstance(if_v, dict):
160 # yield from if_v.items_safe(path[:] + [k, i - 1])
161 # Python 2.7 support
162 for items, p in if_v.items_safe(
163 path[:] + [k, i + 1]
164 ):
165 if isinstance(items, type_t) or not type_t:
166 yield items, p
167 elif isinstance(if_v, list):
168 if isinstance(if_v, type_t) or not type_t:
169 yield if_v, path[:] + [k, i + 1]
170 else:
171 if isinstance(if_v, type_t) or not type_t:
172 yield if_v, path[:] + [k, i + 1]
173 elif not (k == "Ref" and v == "AWS::NoValue"):
174 if isinstance(self, type_t) or not type_t:
175 yield self.clean(), path[:]
176 else:
177 if isinstance(self, type_t) or not type_t:
178 yield self.clean(), path[:]
179
180 def __getattr__(self, name):
181 raise TemplateAttributeError(f"{self.__class__.__name__}.{name} is invalid")
182
183 node_class.__name__ = f"{cls.__name__}_node"
184 return node_class
185
186
187 def create_intrinsic_node_class(cls):
188 """
189 Create dynamic sub class
190 """
191
192 class intrinsic_class(cls):
193 """Node class created based on the input class"""
194
195 def is_valid(self):
196 raise TemplateAttributeError("intrisnic class shouldn't be directly used")
197
198 intrinsic_class.__name__ = f"{cls.__name__}_intrinsic"
199 return intrinsic_class
200
201
202 def create_sub_node_class(cls):
203 """
204 Create dynamic sub class
205 """
206
207 class sub_class(cls):
208 """Node class created based on the input class"""
209
210 def __init__(self, x, start_mark, end_mark):
211 cls.__init__(self, x, start_mark, end_mark)
212 self.__cache_is_valid = False
213 self.__cache_sub_string = ""
214 self.__cache_sub_string_vars = set()
215 self.__cache_sub_vars = {}
216 self.__setup()
217
218 def __setup_list_sub_string(self, s):
219 self.__cache_sub_string = s
220 regex = re.compile(r"\${[^!].*?}")
221 string_params = regex.findall(s)
222
223 for string_param in string_params:
224 self.__cache_sub_string_vars.add(string_param[2:-1].strip())
225
226 def __setup_list(self, v):
227 if len(v) == 2:
228 if not isinstance(v[0], str):
229 return
230 self.__setup_list_sub_string(v[0])
231 if not isinstance(v[1], dict):
232 return
233 self.__cache_sub_vars = v[1]
234 self.__cache_is_valid = True
235
236 def __setup(self):
237 if len(self) == 1:
238 for k, v in self.items():
239 if k == "Fn::Sub":
240 if isinstance(v, str):
241 self.__setup_list_sub_string(v)
242 self.__cache_is_valid = True
243 elif isinstance(v, list):
244 self.__setup_list(v)
245
246 def get_defined_vars(self):
247 # Returns that are in the second part of a list Fn::Sub
248 # This function will not return implied variables from a String Ref and GetAtt
249 if self.is_valid():
250 return self.__cache_sub_vars
251
252 return {}
253
254 def get_string_vars(self):
255 # Returns all variables in the Sub String
256 if self.is_valid():
257 return self.__cache_sub_string_vars
258
259 return set()
260
261 def get_string(self):
262 # Returns the sub string as it was when it was decoded
263 if self.is_valid():
264 return self.__cache_sub_string
265
266 return ""
267
268 def is_valid(self):
269 return self.__cache_is_valid
270
271 sub_class.__name__ = f"{cls.__name__}_sub"
272 return sub_class
273
274
275 def create_dict_list_class(cls):
276 """
277 Create dynamic list class
278 """
279
280 class node_class(cls):
281 """Node class created based on the input class"""
282
283 def __init__(self, x, start_mark, end_mark):
284 try:
285 cls.__init__(self, x)
286 except TypeError:
287 cls.__init__(self)
288 self.start_mark = start_mark
289 self.end_mark = end_mark
290 self.condition_functions = ["Fn::If"]
291
292 def __deepcopy__(self, memo):
293 result = list_node([], self.start_mark, self.end_mark)
294 memo[id(self)] = result
295 for _, v in enumerate(self):
296 result.append(deepcopy(v, memo))
297
298 return result
299
300 def __copy__(self):
301 return self
302
303 def items_safe(self, path=None, type_t=()):
304 """Get items while handling IFs"""
305 path = path or []
306 for i, v in enumerate(self):
307 if isinstance(v, dict):
308 for items, p in v.items_safe(path[:] + [i]):
309 if isinstance(items, type_t) or not type_t:
310 yield items, p
311 else:
312 if isinstance(v, type_t) or not type_t:
313 yield v, path[:] + [i]
314
315 def __getattr__(self, name):
316 raise TemplateAttributeError(f"{self.__class__.__name__}.{name} is invalid")
317
318 node_class.__name__ = f"{cls.__name__}_node"
319 return node_class
320
321
322 str_node = create_str_node_class(str)
323 dict_node = create_dict_node_class(dict)
324 list_node = create_dict_list_class(list)
325 intrinsic_node = create_intrinsic_node_class(dict_node)
326 sub_node = create_sub_node_class(intrinsic_node)
327
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/decode/node.py b/src/cfnlint/decode/node.py
--- a/src/cfnlint/decode/node.py
+++ b/src/cfnlint/decode/node.py
@@ -3,8 +3,12 @@
SPDX-License-Identifier: MIT-0
"""
+from __future__ import annotations
+
import logging
+from collections import namedtuple
from copy import deepcopy
+from typing import Any, Dict, Protocol, Set
import regex as re
@@ -15,6 +19,14 @@
"""Custom error to capture Attribute Errors in the Template"""
+class Mark(Protocol):
+ line: int
+ column: int
+
+
+_mark = namedtuple("_mark", ["line", "column"])
+
+
def create_str_node_class(cls):
"""
Create string node class
@@ -23,13 +35,16 @@
class node_class(cls):
"""Node class created based on the input class"""
- def __init__(self, x, start_mark, end_mark):
+ def __init__(
+ self, x, start_mark: Mark | None = None, end_mark: Mark | None = None
+ ):
try:
cls.__init__(self, x)
except TypeError:
cls.__init__(self)
- self.start_mark = start_mark
- self.end_mark = end_mark
+
+ self.start_mark = start_mark or _mark(0, 0)
+ self.end_mark = end_mark or _mark(0, 0)
# pylint: disable=bad-classmethod-argument, unused-argument
def __new__(self, x, start_mark, end_mark):
@@ -58,13 +73,16 @@
class node_class(cls):
"""Node class created based on the input class"""
- def __init__(self, x, start_mark, end_mark):
+ def __init__(
+ self, x, start_mark: Mark | None = None, end_mark: Mark | None = None
+ ):
+ LOGGER.debug(type(start_mark))
try:
cls.__init__(self, x)
except TypeError:
cls.__init__(self)
- self.start_mark = start_mark
- self.end_mark = end_mark
+ self.start_mark = start_mark or _mark(0, 0)
+ self.end_mark = end_mark or _mark(0, 0)
self.condition_functions = ["Fn::If"]
def __deepcopy__(self, memo):
@@ -207,12 +225,14 @@
class sub_class(cls):
"""Node class created based on the input class"""
- def __init__(self, x, start_mark, end_mark):
+ def __init__(
+ self, x, start_mark: Mark | None = None, end_mark: Mark | None = None
+ ):
cls.__init__(self, x, start_mark, end_mark)
self.__cache_is_valid = False
self.__cache_sub_string = ""
- self.__cache_sub_string_vars = set()
- self.__cache_sub_vars = {}
+ self.__cache_sub_string_vars: Set[str] = set()
+ self.__cache_sub_vars: Dict[str, Any] = {}
self.__setup()
def __setup_list_sub_string(self, s):
@@ -280,13 +300,15 @@
class node_class(cls):
"""Node class created based on the input class"""
- def __init__(self, x, start_mark, end_mark):
+ def __init__(
+ self, x, start_mark: Mark | None = None, end_mark: Mark | None = None
+ ):
try:
cls.__init__(self, x)
except TypeError:
cls.__init__(self)
- self.start_mark = start_mark
- self.end_mark = end_mark
+ self.start_mark = start_mark or _mark(0, 0)
+ self.end_mark = end_mark or _mark(0, 0)
self.condition_functions = ["Fn::If"]
def __deepcopy__(self, memo):
| {"golden_diff": "diff --git a/src/cfnlint/decode/node.py b/src/cfnlint/decode/node.py\n--- a/src/cfnlint/decode/node.py\n+++ b/src/cfnlint/decode/node.py\n@@ -3,8 +3,12 @@\n SPDX-License-Identifier: MIT-0\n \"\"\"\n \n+from __future__ import annotations\n+\n import logging\n+from collections import namedtuple\n from copy import deepcopy\n+from typing import Any, Dict, Protocol, Set\n \n import regex as re\n \n@@ -15,6 +19,14 @@\n \"\"\"Custom error to capture Attribute Errors in the Template\"\"\"\n \n \n+class Mark(Protocol):\n+ line: int\n+ column: int\n+\n+\n+_mark = namedtuple(\"_mark\", [\"line\", \"column\"])\n+\n+\n def create_str_node_class(cls):\n \"\"\"\n Create string node class\n@@ -23,13 +35,16 @@\n class node_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n \n- def __init__(self, x, start_mark, end_mark):\n+ def __init__(\n+ self, x, start_mark: Mark | None = None, end_mark: Mark | None = None\n+ ):\n try:\n cls.__init__(self, x)\n except TypeError:\n cls.__init__(self)\n- self.start_mark = start_mark\n- self.end_mark = end_mark\n+\n+ self.start_mark = start_mark or _mark(0, 0)\n+ self.end_mark = end_mark or _mark(0, 0)\n \n # pylint: disable=bad-classmethod-argument, unused-argument\n def __new__(self, x, start_mark, end_mark):\n@@ -58,13 +73,16 @@\n class node_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n \n- def __init__(self, x, start_mark, end_mark):\n+ def __init__(\n+ self, x, start_mark: Mark | None = None, end_mark: Mark | None = None\n+ ):\n+ LOGGER.debug(type(start_mark))\n try:\n cls.__init__(self, x)\n except TypeError:\n cls.__init__(self)\n- self.start_mark = start_mark\n- self.end_mark = end_mark\n+ self.start_mark = start_mark or _mark(0, 0)\n+ self.end_mark = end_mark or _mark(0, 0)\n self.condition_functions = [\"Fn::If\"]\n \n def __deepcopy__(self, memo):\n@@ -207,12 +225,14 @@\n class sub_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n \n- def __init__(self, x, start_mark, end_mark):\n+ def __init__(\n+ self, x, start_mark: Mark | None = None, end_mark: Mark | None = None\n+ ):\n cls.__init__(self, x, start_mark, end_mark)\n self.__cache_is_valid = False\n self.__cache_sub_string = \"\"\n- self.__cache_sub_string_vars = set()\n- self.__cache_sub_vars = {}\n+ self.__cache_sub_string_vars: Set[str] = set()\n+ self.__cache_sub_vars: Dict[str, Any] = {}\n self.__setup()\n \n def __setup_list_sub_string(self, s):\n@@ -280,13 +300,15 @@\n class node_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n \n- def __init__(self, x, start_mark, end_mark):\n+ def __init__(\n+ self, x, start_mark: Mark | None = None, end_mark: Mark | None = None\n+ ):\n try:\n cls.__init__(self, x)\n except TypeError:\n cls.__init__(self)\n- self.start_mark = start_mark\n- self.end_mark = end_mark\n+ self.start_mark = start_mark or _mark(0, 0)\n+ self.end_mark = end_mark or _mark(0, 0)\n self.condition_functions = [\"Fn::If\"]\n \n def __deepcopy__(self, memo):\n", "issue": "Error transforming template when using !GetAtt\n### CloudFormation Lint Version\n\n0.85.0\n\n### What operating system are you using?\n\nUbuntu\n\n### Describe the bug\n\nWhen I try to validate the template i get following error:\r\n\r\n> E0001 Error transforming template: __init__() missing 2 required positional arguments: 'start_mark' and 'end_mark'\r\naws/template.yml:1:1\r\n\r\nWhen AuthorizeUri is hardcoded the problem disappears.\r\n\n\n### Expected behavior\n\nNo error is thrown and template is validated.\n\n### Reproduction template\n\n```AWSTemplateFormatVersion: \"2010-09-09\"\r\nTransform: AWS::Serverless-2016-10-31\r\n\r\nResources:\r\n Api:\r\n Type: \"AWS::Serverless::GraphQLApi\"\r\n Properties:\r\n Name: SomeApi\r\n SchemaUri: ./schema.graphql\r\n Auth:\r\n Type: AWS_LAMBDA\r\n LambdaAuthorizer:\r\n AuthorizerUri: !GetAtt AppSyncAuthorizerFunction.Arn\r\n\r\n AppSyncAuthorizerFunction:\r\n Type: AWS::Serverless::Function\r\n Properties:\r\n CodeUri: ./lambda/\r\n Handler: lambda_authorizer.handle\r\n Runtime: python3.8\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\nimport logging\nfrom copy import deepcopy\n\nimport regex as re\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass TemplateAttributeError(AttributeError):\n \"\"\"Custom error to capture Attribute Errors in the Template\"\"\"\n\n\ndef create_str_node_class(cls):\n \"\"\"\n Create string node class\n \"\"\"\n\n class node_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def __init__(self, x, start_mark, end_mark):\n try:\n cls.__init__(self, x)\n except TypeError:\n cls.__init__(self)\n self.start_mark = start_mark\n self.end_mark = end_mark\n\n # pylint: disable=bad-classmethod-argument, unused-argument\n def __new__(self, x, start_mark, end_mark):\n return cls.__new__(self, x)\n\n def __getattr__(self, name):\n raise TemplateAttributeError(f\"{self.__class__.__name__}.{name} is invalid\")\n\n def __deepcopy__(self, memo):\n result = str_node(self, self.start_mark, self.end_mark)\n memo[id(self)] = result\n return result\n\n def __copy__(self):\n return self\n\n node_class.__name__ = f\"{cls.__name__}_node\"\n return node_class\n\n\ndef create_dict_node_class(cls):\n \"\"\"\n Create dynamic node class\n \"\"\"\n\n class node_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def __init__(self, x, start_mark, end_mark):\n try:\n cls.__init__(self, x)\n except TypeError:\n cls.__init__(self)\n self.start_mark = start_mark\n self.end_mark = end_mark\n self.condition_functions = [\"Fn::If\"]\n\n def __deepcopy__(self, memo):\n result = dict_node(self, self.start_mark, self.end_mark)\n memo[id(self)] = result\n for k, v in self.items():\n result[deepcopy(k)] = deepcopy(v, memo)\n\n return result\n\n def __copy__(self):\n return self\n\n def is_function_returning_object(self, mappings=None):\n \"\"\"\n Check if an object is using a function that could return an object\n Return True when\n Fn::Select:\n - 0 # or any number\n - !FindInMap [mapname, key, value] # or any mapname, key, value\n Otherwise False\n \"\"\"\n mappings = mappings or {}\n if len(self) == 1:\n for k, v in self.items():\n if k in [\"Fn::Select\"]:\n if isinstance(v, list):\n if len(v) == 2:\n p_v = v[1]\n if isinstance(p_v, dict):\n if len(p_v) == 1:\n for l_k in p_v.keys():\n if l_k in [\"Fn::FindInMap\", \"Fn::GetAtt\"]:\n return True\n\n return False\n\n def get(self, key, default=None):\n \"\"\"Override the default get\"\"\"\n if isinstance(default, dict):\n default = dict_node(default, self.start_mark, self.end_mark)\n return super().get(key, default)\n\n def get_safe(self, key, default=None, path=None, type_t=()):\n \"\"\"\n Get values in format\n \"\"\"\n path = path or []\n\n if default == {}:\n default = dict_node({}, self.start_mark, self.end_mark)\n value = self.get(key, default)\n if value is None and default is None:\n # if default is None and value is None return empty list\n return []\n\n # if the value is the default make sure that the default value is of type_t when specified\n if bool(type_t) and value == default and not isinstance(default, type_t):\n raise ValueError('\"default\" type should be of \"type_t\"')\n\n # when not a dict see if if the value is of the right type\n results = []\n if not isinstance(value, (dict)):\n if isinstance(value, type_t) or not type_t:\n return [(value, (path[:] + [key]))]\n else:\n for sub_v, sub_path in value.items_safe(path + [key]):\n if isinstance(sub_v, type_t) or not type_t:\n results.append((sub_v, sub_path))\n\n return results\n\n def clean(self):\n \"\"\"Clean object to remove any Ref AWS::NoValue\"\"\"\n result = dict_node({}, self.start_mark, self.end_mark)\n for k, v in self.items():\n if isinstance(v, dict) and len(v) == 1:\n if v.get(\"Ref\") == \"AWS::NoValue\":\n continue\n result[k] = v\n return result\n\n def items_safe(self, path=None, type_t=()):\n \"\"\"Get items while handling IFs\"\"\"\n path = path or []\n if len(self) == 1:\n for k, v in self.items():\n if k == \"Fn::If\":\n if isinstance(v, list):\n if len(v) == 3:\n for i, if_v in enumerate(v[1:]):\n if isinstance(if_v, dict):\n # yield from if_v.items_safe(path[:] + [k, i - 1])\n # Python 2.7 support\n for items, p in if_v.items_safe(\n path[:] + [k, i + 1]\n ):\n if isinstance(items, type_t) or not type_t:\n yield items, p\n elif isinstance(if_v, list):\n if isinstance(if_v, type_t) or not type_t:\n yield if_v, path[:] + [k, i + 1]\n else:\n if isinstance(if_v, type_t) or not type_t:\n yield if_v, path[:] + [k, i + 1]\n elif not (k == \"Ref\" and v == \"AWS::NoValue\"):\n if isinstance(self, type_t) or not type_t:\n yield self.clean(), path[:]\n else:\n if isinstance(self, type_t) or not type_t:\n yield self.clean(), path[:]\n\n def __getattr__(self, name):\n raise TemplateAttributeError(f\"{self.__class__.__name__}.{name} is invalid\")\n\n node_class.__name__ = f\"{cls.__name__}_node\"\n return node_class\n\n\ndef create_intrinsic_node_class(cls):\n \"\"\"\n Create dynamic sub class\n \"\"\"\n\n class intrinsic_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def is_valid(self):\n raise TemplateAttributeError(\"intrisnic class shouldn't be directly used\")\n\n intrinsic_class.__name__ = f\"{cls.__name__}_intrinsic\"\n return intrinsic_class\n\n\ndef create_sub_node_class(cls):\n \"\"\"\n Create dynamic sub class\n \"\"\"\n\n class sub_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def __init__(self, x, start_mark, end_mark):\n cls.__init__(self, x, start_mark, end_mark)\n self.__cache_is_valid = False\n self.__cache_sub_string = \"\"\n self.__cache_sub_string_vars = set()\n self.__cache_sub_vars = {}\n self.__setup()\n\n def __setup_list_sub_string(self, s):\n self.__cache_sub_string = s\n regex = re.compile(r\"\\${[^!].*?}\")\n string_params = regex.findall(s)\n\n for string_param in string_params:\n self.__cache_sub_string_vars.add(string_param[2:-1].strip())\n\n def __setup_list(self, v):\n if len(v) == 2:\n if not isinstance(v[0], str):\n return\n self.__setup_list_sub_string(v[0])\n if not isinstance(v[1], dict):\n return\n self.__cache_sub_vars = v[1]\n self.__cache_is_valid = True\n\n def __setup(self):\n if len(self) == 1:\n for k, v in self.items():\n if k == \"Fn::Sub\":\n if isinstance(v, str):\n self.__setup_list_sub_string(v)\n self.__cache_is_valid = True\n elif isinstance(v, list):\n self.__setup_list(v)\n\n def get_defined_vars(self):\n # Returns that are in the second part of a list Fn::Sub\n # This function will not return implied variables from a String Ref and GetAtt\n if self.is_valid():\n return self.__cache_sub_vars\n\n return {}\n\n def get_string_vars(self):\n # Returns all variables in the Sub String\n if self.is_valid():\n return self.__cache_sub_string_vars\n\n return set()\n\n def get_string(self):\n # Returns the sub string as it was when it was decoded\n if self.is_valid():\n return self.__cache_sub_string\n\n return \"\"\n\n def is_valid(self):\n return self.__cache_is_valid\n\n sub_class.__name__ = f\"{cls.__name__}_sub\"\n return sub_class\n\n\ndef create_dict_list_class(cls):\n \"\"\"\n Create dynamic list class\n \"\"\"\n\n class node_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def __init__(self, x, start_mark, end_mark):\n try:\n cls.__init__(self, x)\n except TypeError:\n cls.__init__(self)\n self.start_mark = start_mark\n self.end_mark = end_mark\n self.condition_functions = [\"Fn::If\"]\n\n def __deepcopy__(self, memo):\n result = list_node([], self.start_mark, self.end_mark)\n memo[id(self)] = result\n for _, v in enumerate(self):\n result.append(deepcopy(v, memo))\n\n return result\n\n def __copy__(self):\n return self\n\n def items_safe(self, path=None, type_t=()):\n \"\"\"Get items while handling IFs\"\"\"\n path = path or []\n for i, v in enumerate(self):\n if isinstance(v, dict):\n for items, p in v.items_safe(path[:] + [i]):\n if isinstance(items, type_t) or not type_t:\n yield items, p\n else:\n if isinstance(v, type_t) or not type_t:\n yield v, path[:] + [i]\n\n def __getattr__(self, name):\n raise TemplateAttributeError(f\"{self.__class__.__name__}.{name} is invalid\")\n\n node_class.__name__ = f\"{cls.__name__}_node\"\n return node_class\n\n\nstr_node = create_str_node_class(str)\ndict_node = create_dict_node_class(dict)\nlist_node = create_dict_list_class(list)\nintrinsic_node = create_intrinsic_node_class(dict_node)\nsub_node = create_sub_node_class(intrinsic_node)\n", "path": "src/cfnlint/decode/node.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom collections import namedtuple\nfrom copy import deepcopy\nfrom typing import Any, Dict, Protocol, Set\n\nimport regex as re\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass TemplateAttributeError(AttributeError):\n \"\"\"Custom error to capture Attribute Errors in the Template\"\"\"\n\n\nclass Mark(Protocol):\n line: int\n column: int\n\n\n_mark = namedtuple(\"_mark\", [\"line\", \"column\"])\n\n\ndef create_str_node_class(cls):\n \"\"\"\n Create string node class\n \"\"\"\n\n class node_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def __init__(\n self, x, start_mark: Mark | None = None, end_mark: Mark | None = None\n ):\n try:\n cls.__init__(self, x)\n except TypeError:\n cls.__init__(self)\n\n self.start_mark = start_mark or _mark(0, 0)\n self.end_mark = end_mark or _mark(0, 0)\n\n # pylint: disable=bad-classmethod-argument, unused-argument\n def __new__(self, x, start_mark, end_mark):\n return cls.__new__(self, x)\n\n def __getattr__(self, name):\n raise TemplateAttributeError(f\"{self.__class__.__name__}.{name} is invalid\")\n\n def __deepcopy__(self, memo):\n result = str_node(self, self.start_mark, self.end_mark)\n memo[id(self)] = result\n return result\n\n def __copy__(self):\n return self\n\n node_class.__name__ = f\"{cls.__name__}_node\"\n return node_class\n\n\ndef create_dict_node_class(cls):\n \"\"\"\n Create dynamic node class\n \"\"\"\n\n class node_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def __init__(\n self, x, start_mark: Mark | None = None, end_mark: Mark | None = None\n ):\n LOGGER.debug(type(start_mark))\n try:\n cls.__init__(self, x)\n except TypeError:\n cls.__init__(self)\n self.start_mark = start_mark or _mark(0, 0)\n self.end_mark = end_mark or _mark(0, 0)\n self.condition_functions = [\"Fn::If\"]\n\n def __deepcopy__(self, memo):\n result = dict_node(self, self.start_mark, self.end_mark)\n memo[id(self)] = result\n for k, v in self.items():\n result[deepcopy(k)] = deepcopy(v, memo)\n\n return result\n\n def __copy__(self):\n return self\n\n def is_function_returning_object(self, mappings=None):\n \"\"\"\n Check if an object is using a function that could return an object\n Return True when\n Fn::Select:\n - 0 # or any number\n - !FindInMap [mapname, key, value] # or any mapname, key, value\n Otherwise False\n \"\"\"\n mappings = mappings or {}\n if len(self) == 1:\n for k, v in self.items():\n if k in [\"Fn::Select\"]:\n if isinstance(v, list):\n if len(v) == 2:\n p_v = v[1]\n if isinstance(p_v, dict):\n if len(p_v) == 1:\n for l_k in p_v.keys():\n if l_k in [\"Fn::FindInMap\", \"Fn::GetAtt\"]:\n return True\n\n return False\n\n def get(self, key, default=None):\n \"\"\"Override the default get\"\"\"\n if isinstance(default, dict):\n default = dict_node(default, self.start_mark, self.end_mark)\n return super().get(key, default)\n\n def get_safe(self, key, default=None, path=None, type_t=()):\n \"\"\"\n Get values in format\n \"\"\"\n path = path or []\n\n if default == {}:\n default = dict_node({}, self.start_mark, self.end_mark)\n value = self.get(key, default)\n if value is None and default is None:\n # if default is None and value is None return empty list\n return []\n\n # if the value is the default make sure that the default value is of type_t when specified\n if bool(type_t) and value == default and not isinstance(default, type_t):\n raise ValueError('\"default\" type should be of \"type_t\"')\n\n # when not a dict see if if the value is of the right type\n results = []\n if not isinstance(value, (dict)):\n if isinstance(value, type_t) or not type_t:\n return [(value, (path[:] + [key]))]\n else:\n for sub_v, sub_path in value.items_safe(path + [key]):\n if isinstance(sub_v, type_t) or not type_t:\n results.append((sub_v, sub_path))\n\n return results\n\n def clean(self):\n \"\"\"Clean object to remove any Ref AWS::NoValue\"\"\"\n result = dict_node({}, self.start_mark, self.end_mark)\n for k, v in self.items():\n if isinstance(v, dict) and len(v) == 1:\n if v.get(\"Ref\") == \"AWS::NoValue\":\n continue\n result[k] = v\n return result\n\n def items_safe(self, path=None, type_t=()):\n \"\"\"Get items while handling IFs\"\"\"\n path = path or []\n if len(self) == 1:\n for k, v in self.items():\n if k == \"Fn::If\":\n if isinstance(v, list):\n if len(v) == 3:\n for i, if_v in enumerate(v[1:]):\n if isinstance(if_v, dict):\n # yield from if_v.items_safe(path[:] + [k, i - 1])\n # Python 2.7 support\n for items, p in if_v.items_safe(\n path[:] + [k, i + 1]\n ):\n if isinstance(items, type_t) or not type_t:\n yield items, p\n elif isinstance(if_v, list):\n if isinstance(if_v, type_t) or not type_t:\n yield if_v, path[:] + [k, i + 1]\n else:\n if isinstance(if_v, type_t) or not type_t:\n yield if_v, path[:] + [k, i + 1]\n elif not (k == \"Ref\" and v == \"AWS::NoValue\"):\n if isinstance(self, type_t) or not type_t:\n yield self.clean(), path[:]\n else:\n if isinstance(self, type_t) or not type_t:\n yield self.clean(), path[:]\n\n def __getattr__(self, name):\n raise TemplateAttributeError(f\"{self.__class__.__name__}.{name} is invalid\")\n\n node_class.__name__ = f\"{cls.__name__}_node\"\n return node_class\n\n\ndef create_intrinsic_node_class(cls):\n \"\"\"\n Create dynamic sub class\n \"\"\"\n\n class intrinsic_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def is_valid(self):\n raise TemplateAttributeError(\"intrisnic class shouldn't be directly used\")\n\n intrinsic_class.__name__ = f\"{cls.__name__}_intrinsic\"\n return intrinsic_class\n\n\ndef create_sub_node_class(cls):\n \"\"\"\n Create dynamic sub class\n \"\"\"\n\n class sub_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def __init__(\n self, x, start_mark: Mark | None = None, end_mark: Mark | None = None\n ):\n cls.__init__(self, x, start_mark, end_mark)\n self.__cache_is_valid = False\n self.__cache_sub_string = \"\"\n self.__cache_sub_string_vars: Set[str] = set()\n self.__cache_sub_vars: Dict[str, Any] = {}\n self.__setup()\n\n def __setup_list_sub_string(self, s):\n self.__cache_sub_string = s\n regex = re.compile(r\"\\${[^!].*?}\")\n string_params = regex.findall(s)\n\n for string_param in string_params:\n self.__cache_sub_string_vars.add(string_param[2:-1].strip())\n\n def __setup_list(self, v):\n if len(v) == 2:\n if not isinstance(v[0], str):\n return\n self.__setup_list_sub_string(v[0])\n if not isinstance(v[1], dict):\n return\n self.__cache_sub_vars = v[1]\n self.__cache_is_valid = True\n\n def __setup(self):\n if len(self) == 1:\n for k, v in self.items():\n if k == \"Fn::Sub\":\n if isinstance(v, str):\n self.__setup_list_sub_string(v)\n self.__cache_is_valid = True\n elif isinstance(v, list):\n self.__setup_list(v)\n\n def get_defined_vars(self):\n # Returns that are in the second part of a list Fn::Sub\n # This function will not return implied variables from a String Ref and GetAtt\n if self.is_valid():\n return self.__cache_sub_vars\n\n return {}\n\n def get_string_vars(self):\n # Returns all variables in the Sub String\n if self.is_valid():\n return self.__cache_sub_string_vars\n\n return set()\n\n def get_string(self):\n # Returns the sub string as it was when it was decoded\n if self.is_valid():\n return self.__cache_sub_string\n\n return \"\"\n\n def is_valid(self):\n return self.__cache_is_valid\n\n sub_class.__name__ = f\"{cls.__name__}_sub\"\n return sub_class\n\n\ndef create_dict_list_class(cls):\n \"\"\"\n Create dynamic list class\n \"\"\"\n\n class node_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def __init__(\n self, x, start_mark: Mark | None = None, end_mark: Mark | None = None\n ):\n try:\n cls.__init__(self, x)\n except TypeError:\n cls.__init__(self)\n self.start_mark = start_mark or _mark(0, 0)\n self.end_mark = end_mark or _mark(0, 0)\n self.condition_functions = [\"Fn::If\"]\n\n def __deepcopy__(self, memo):\n result = list_node([], self.start_mark, self.end_mark)\n memo[id(self)] = result\n for _, v in enumerate(self):\n result.append(deepcopy(v, memo))\n\n return result\n\n def __copy__(self):\n return self\n\n def items_safe(self, path=None, type_t=()):\n \"\"\"Get items while handling IFs\"\"\"\n path = path or []\n for i, v in enumerate(self):\n if isinstance(v, dict):\n for items, p in v.items_safe(path[:] + [i]):\n if isinstance(items, type_t) or not type_t:\n yield items, p\n else:\n if isinstance(v, type_t) or not type_t:\n yield v, path[:] + [i]\n\n def __getattr__(self, name):\n raise TemplateAttributeError(f\"{self.__class__.__name__}.{name} is invalid\")\n\n node_class.__name__ = f\"{cls.__name__}_node\"\n return node_class\n\n\nstr_node = create_str_node_class(str)\ndict_node = create_dict_node_class(dict)\nlist_node = create_dict_list_class(list)\nintrinsic_node = create_intrinsic_node_class(dict_node)\nsub_node = create_sub_node_class(intrinsic_node)\n", "path": "src/cfnlint/decode/node.py"}]} | 3,763 | 916 |
gh_patches_debug_20112 | rasdani/github-patches | git_diff | apache__airflow-25553 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CORS access_control_allow_origin header never returned
### Apache Airflow version
2.2.2 (latest released)
### What happened
To fix CORS problem added the [access_control_allow_headers](https://airflow.apache.org/docs/apache-airflow/stable/configurations-ref.html#access-control-allow-headers), [access_control_allow_methods](https://airflow.apache.org/docs/apache-airflow/stable/configurations-ref.html#access-control-allow-methods), [access_control_allow_origins](https://airflow.apache.org/docs/apache-airflow/stable/configurations-ref.html#access-control-allow-origins) variables to the 2.2.2 docker-compose file provided in documentation. Both header, and methods returns with the correct value, but origins never does.
### What you expected to happen
The CORS response returning with provided origin header value.
### How to reproduce
Download the latest docker-compose from documentation add the following lines:
`AIRFLOW__API__ACCESS_CONTROL_ALLOW_HEADERS: 'content-type, origin, authorization, accept'`
`AIRFLOW__API__ACCESS_CONTROL_ALLOW_METHODS: 'GET, POST, OPTIONS, DELETE'`
`AIRFLOW__API__ACCESS_CONTROL_ALLOW_ORIGINS: '*'`
run and call with a CORS preflight
### Operating System
Windows 11
### Versions of Apache Airflow Providers
_No response_
### Deployment
Docker-Compose
### Deployment details
_No response_
### Anything else
It's repeatable regardless of ORIGINS value. There was a name change on this variable that's possibly not handled.
On 2.1.4 the same works without problems.
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/www/extensions/init_views.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17
18 import logging
19 import warnings
20 from os import path
21
22 from connexion import App, ProblemException
23 from flask import Flask, request
24
25 from airflow.api_connexion.exceptions import common_error_handler
26 from airflow.configuration import conf
27 from airflow.security import permissions
28 from airflow.www.views import lazy_add_provider_discovered_options_to_connection_form
29
30 log = logging.getLogger(__name__)
31
32 # airflow/www/extensions/init_views.py => airflow/
33 ROOT_APP_DIR = path.abspath(path.join(path.dirname(__file__), path.pardir, path.pardir))
34
35
36 def init_flash_views(app):
37 """Init main app view - redirect to FAB"""
38 from airflow.www.blueprints import routes
39
40 app.register_blueprint(routes)
41
42
43 def init_appbuilder_views(app):
44 """Initialize Web UI views"""
45 appbuilder = app.appbuilder
46 from airflow.www import views
47
48 # Remove the session from scoped_session registry to avoid
49 # reusing a session with a disconnected connection
50 appbuilder.session.remove()
51 appbuilder.add_view_no_menu(views.AutocompleteView())
52 appbuilder.add_view_no_menu(views.Airflow())
53 appbuilder.add_view(
54 views.DagRunModelView,
55 permissions.RESOURCE_DAG_RUN,
56 category=permissions.RESOURCE_BROWSE_MENU,
57 category_icon="fa-globe",
58 )
59 appbuilder.add_view(
60 views.JobModelView, permissions.RESOURCE_JOB, category=permissions.RESOURCE_BROWSE_MENU
61 )
62 appbuilder.add_view(
63 views.LogModelView, permissions.RESOURCE_AUDIT_LOG, category=permissions.RESOURCE_BROWSE_MENU
64 )
65 appbuilder.add_view(
66 views.VariableModelView, permissions.RESOURCE_VARIABLE, category=permissions.RESOURCE_ADMIN_MENU
67 )
68 appbuilder.add_view(
69 views.TaskInstanceModelView,
70 permissions.RESOURCE_TASK_INSTANCE,
71 category=permissions.RESOURCE_BROWSE_MENU,
72 )
73 appbuilder.add_view(
74 views.TaskRescheduleModelView,
75 permissions.RESOURCE_TASK_RESCHEDULE,
76 category=permissions.RESOURCE_BROWSE_MENU,
77 )
78 appbuilder.add_view(
79 views.TriggerModelView,
80 permissions.RESOURCE_TRIGGER,
81 category=permissions.RESOURCE_BROWSE_MENU,
82 )
83 appbuilder.add_view(
84 views.ConfigurationView,
85 permissions.RESOURCE_CONFIG,
86 category=permissions.RESOURCE_ADMIN_MENU,
87 category_icon="fa-user",
88 )
89 appbuilder.add_view(
90 views.ConnectionModelView, permissions.RESOURCE_CONNECTION, category=permissions.RESOURCE_ADMIN_MENU
91 )
92 appbuilder.add_view(
93 views.SlaMissModelView, permissions.RESOURCE_SLA_MISS, category=permissions.RESOURCE_BROWSE_MENU
94 )
95 appbuilder.add_view(
96 views.PluginView, permissions.RESOURCE_PLUGIN, category=permissions.RESOURCE_ADMIN_MENU
97 )
98 appbuilder.add_view(
99 views.ProviderView, permissions.RESOURCE_PROVIDER, category=permissions.RESOURCE_ADMIN_MENU
100 )
101 appbuilder.add_view(
102 views.PoolModelView, permissions.RESOURCE_POOL, category=permissions.RESOURCE_ADMIN_MENU
103 )
104 appbuilder.add_view(
105 views.XComModelView, permissions.RESOURCE_XCOM, category=permissions.RESOURCE_ADMIN_MENU
106 )
107 appbuilder.add_view(
108 views.DagDependenciesView,
109 permissions.RESOURCE_DAG_DEPENDENCIES,
110 category=permissions.RESOURCE_BROWSE_MENU,
111 )
112 # add_view_no_menu to change item position.
113 # I added link in extensions.init_appbuilder_links.init_appbuilder_links
114 appbuilder.add_view_no_menu(views.RedocView)
115
116
117 def init_plugins(app):
118 """Integrate Flask and FAB with plugins"""
119 from airflow import plugins_manager
120
121 plugins_manager.initialize_web_ui_plugins()
122
123 appbuilder = app.appbuilder
124
125 for view in plugins_manager.flask_appbuilder_views:
126 name = view.get('name')
127 if name:
128 log.debug("Adding view %s with menu", name)
129 appbuilder.add_view(view["view"], name, category=view["category"])
130 else:
131 # if 'name' key is missing, intent is to add view without menu
132 log.debug("Adding view %s without menu", str(type(view["view"])))
133 appbuilder.add_view_no_menu(view["view"])
134
135 for menu_link in sorted(plugins_manager.flask_appbuilder_menu_links, key=lambda x: x["name"]):
136 log.debug("Adding menu link %s to %s", menu_link["name"], menu_link["href"])
137 appbuilder.add_link(**menu_link)
138
139 for blue_print in plugins_manager.flask_blueprints:
140 log.debug("Adding blueprint %s:%s", blue_print["name"], blue_print["blueprint"].import_name)
141 app.register_blueprint(blue_print["blueprint"])
142
143
144 def init_connection_form():
145 """Initializes connection form"""
146 lazy_add_provider_discovered_options_to_connection_form()
147
148
149 def init_error_handlers(app: Flask):
150 """Add custom errors handlers"""
151 from airflow.www import views
152
153 app.register_error_handler(500, views.show_traceback)
154 app.register_error_handler(404, views.not_found)
155
156
157 def set_cors_headers_on_response(response):
158 """Add response headers"""
159 allow_headers = conf.get('api', 'access_control_allow_headers')
160 allow_methods = conf.get('api', 'access_control_allow_methods')
161 allow_origins = conf.get('api', 'access_control_allow_origins')
162 if allow_headers is not None:
163 response.headers['Access-Control-Allow-Headers'] = allow_headers
164 if allow_methods is not None:
165 response.headers['Access-Control-Allow-Methods'] = allow_methods
166 if allow_origins is not None:
167 allowed_origins = allow_origins.split(' ')
168 origin = request.environ.get('HTTP_ORIGIN', allowed_origins[0])
169 if origin in allowed_origins:
170 response.headers['Access-Control-Allow-Origin'] = origin
171 return response
172
173
174 def init_api_connexion(app: Flask) -> None:
175 """Initialize Stable API"""
176 base_path = '/api/v1'
177
178 from airflow.www import views
179
180 @app.errorhandler(404)
181 @app.errorhandler(405)
182 def _handle_api_error(ex):
183 if request.path.startswith(base_path):
184 # 404 errors are never handled on the blueprint level
185 # unless raised from a view func so actual 404 errors,
186 # i.e. "no route for it" defined, need to be handled
187 # here on the application level
188 return common_error_handler(ex)
189 else:
190 return views.not_found(ex)
191
192 spec_dir = path.join(ROOT_APP_DIR, 'api_connexion', 'openapi')
193 connexion_app = App(__name__, specification_dir=spec_dir, skip_error_handlers=True)
194 connexion_app.app = app
195 api_bp = connexion_app.add_api(
196 specification='v1.yaml', base_path=base_path, validate_responses=True, strict_validation=True
197 ).blueprint
198 # Like "api_bp.after_request", but the BP is already registered, so we have
199 # to register it in the app directly.
200 app.after_request_funcs.setdefault(api_bp.name, []).append(set_cors_headers_on_response)
201 app.register_error_handler(ProblemException, common_error_handler)
202 app.extensions['csrf'].exempt(api_bp)
203
204
205 def init_api_experimental(app):
206 """Initialize Experimental API"""
207 if not conf.getboolean('api', 'enable_experimental_api', fallback=False):
208 return
209 from airflow.www.api.experimental import endpoints
210
211 warnings.warn(
212 "The experimental REST API is deprecated. Please migrate to the stable REST API. "
213 "Please note that the experimental API do not have access control. "
214 "The authenticated user has full access.",
215 DeprecationWarning,
216 )
217 app.register_blueprint(endpoints.api_experimental, url_prefix='/api/experimental')
218 app.extensions['csrf'].exempt(endpoints.api_experimental)
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/www/extensions/init_views.py b/airflow/www/extensions/init_views.py
--- a/airflow/www/extensions/init_views.py
+++ b/airflow/www/extensions/init_views.py
@@ -159,11 +159,13 @@
allow_headers = conf.get('api', 'access_control_allow_headers')
allow_methods = conf.get('api', 'access_control_allow_methods')
allow_origins = conf.get('api', 'access_control_allow_origins')
- if allow_headers is not None:
+ if allow_headers:
response.headers['Access-Control-Allow-Headers'] = allow_headers
- if allow_methods is not None:
+ if allow_methods:
response.headers['Access-Control-Allow-Methods'] = allow_methods
- if allow_origins is not None:
+ if allow_origins == '*':
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ elif allow_origins:
allowed_origins = allow_origins.split(' ')
origin = request.environ.get('HTTP_ORIGIN', allowed_origins[0])
if origin in allowed_origins:
| {"golden_diff": "diff --git a/airflow/www/extensions/init_views.py b/airflow/www/extensions/init_views.py\n--- a/airflow/www/extensions/init_views.py\n+++ b/airflow/www/extensions/init_views.py\n@@ -159,11 +159,13 @@\n allow_headers = conf.get('api', 'access_control_allow_headers')\n allow_methods = conf.get('api', 'access_control_allow_methods')\n allow_origins = conf.get('api', 'access_control_allow_origins')\n- if allow_headers is not None:\n+ if allow_headers:\n response.headers['Access-Control-Allow-Headers'] = allow_headers\n- if allow_methods is not None:\n+ if allow_methods:\n response.headers['Access-Control-Allow-Methods'] = allow_methods\n- if allow_origins is not None:\n+ if allow_origins == '*':\n+ response.headers['Access-Control-Allow-Origin'] = '*'\n+ elif allow_origins:\n allowed_origins = allow_origins.split(' ')\n origin = request.environ.get('HTTP_ORIGIN', allowed_origins[0])\n if origin in allowed_origins:\n", "issue": "CORS access_control_allow_origin header never returned\n### Apache Airflow version\n\n2.2.2 (latest released)\n\n### What happened\n\nTo fix CORS problem added the [access_control_allow_headers](https://airflow.apache.org/docs/apache-airflow/stable/configurations-ref.html#access-control-allow-headers), [access_control_allow_methods](https://airflow.apache.org/docs/apache-airflow/stable/configurations-ref.html#access-control-allow-methods), [access_control_allow_origins](https://airflow.apache.org/docs/apache-airflow/stable/configurations-ref.html#access-control-allow-origins) variables to the 2.2.2 docker-compose file provided in documentation. Both header, and methods returns with the correct value, but origins never does.\n\n### What you expected to happen\n\nThe CORS response returning with provided origin header value.\n\n### How to reproduce\n\nDownload the latest docker-compose from documentation add the following lines:\r\n`AIRFLOW__API__ACCESS_CONTROL_ALLOW_HEADERS: 'content-type, origin, authorization, accept'`\r\n`AIRFLOW__API__ACCESS_CONTROL_ALLOW_METHODS: 'GET, POST, OPTIONS, DELETE'`\r\n`AIRFLOW__API__ACCESS_CONTROL_ALLOW_ORIGINS: '*'`\r\n\r\nrun and call with a CORS preflight\n\n### Operating System\n\nWindows 11\n\n### Versions of Apache Airflow Providers\n\n_No response_\n\n### Deployment\n\nDocker-Compose\n\n### Deployment details\n\n_No response_\n\n### Anything else\n\nIt's repeatable regardless of ORIGINS value. There was a name change on this variable that's possibly not handled.\r\nOn 2.1.4 the same works without problems.\n\n### Are you willing to submit PR?\n\n- [ ] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport logging\nimport warnings\nfrom os import path\n\nfrom connexion import App, ProblemException\nfrom flask import Flask, request\n\nfrom airflow.api_connexion.exceptions import common_error_handler\nfrom airflow.configuration import conf\nfrom airflow.security import permissions\nfrom airflow.www.views import lazy_add_provider_discovered_options_to_connection_form\n\nlog = logging.getLogger(__name__)\n\n# airflow/www/extensions/init_views.py => airflow/\nROOT_APP_DIR = path.abspath(path.join(path.dirname(__file__), path.pardir, path.pardir))\n\n\ndef init_flash_views(app):\n \"\"\"Init main app view - redirect to FAB\"\"\"\n from airflow.www.blueprints import routes\n\n app.register_blueprint(routes)\n\n\ndef init_appbuilder_views(app):\n \"\"\"Initialize Web UI views\"\"\"\n appbuilder = app.appbuilder\n from airflow.www import views\n\n # Remove the session from scoped_session registry to avoid\n # reusing a session with a disconnected connection\n appbuilder.session.remove()\n appbuilder.add_view_no_menu(views.AutocompleteView())\n appbuilder.add_view_no_menu(views.Airflow())\n appbuilder.add_view(\n views.DagRunModelView,\n permissions.RESOURCE_DAG_RUN,\n category=permissions.RESOURCE_BROWSE_MENU,\n category_icon=\"fa-globe\",\n )\n appbuilder.add_view(\n views.JobModelView, permissions.RESOURCE_JOB, category=permissions.RESOURCE_BROWSE_MENU\n )\n appbuilder.add_view(\n views.LogModelView, permissions.RESOURCE_AUDIT_LOG, category=permissions.RESOURCE_BROWSE_MENU\n )\n appbuilder.add_view(\n views.VariableModelView, permissions.RESOURCE_VARIABLE, category=permissions.RESOURCE_ADMIN_MENU\n )\n appbuilder.add_view(\n views.TaskInstanceModelView,\n permissions.RESOURCE_TASK_INSTANCE,\n category=permissions.RESOURCE_BROWSE_MENU,\n )\n appbuilder.add_view(\n views.TaskRescheduleModelView,\n permissions.RESOURCE_TASK_RESCHEDULE,\n category=permissions.RESOURCE_BROWSE_MENU,\n )\n appbuilder.add_view(\n views.TriggerModelView,\n permissions.RESOURCE_TRIGGER,\n category=permissions.RESOURCE_BROWSE_MENU,\n )\n appbuilder.add_view(\n views.ConfigurationView,\n permissions.RESOURCE_CONFIG,\n category=permissions.RESOURCE_ADMIN_MENU,\n category_icon=\"fa-user\",\n )\n appbuilder.add_view(\n views.ConnectionModelView, permissions.RESOURCE_CONNECTION, category=permissions.RESOURCE_ADMIN_MENU\n )\n appbuilder.add_view(\n views.SlaMissModelView, permissions.RESOURCE_SLA_MISS, category=permissions.RESOURCE_BROWSE_MENU\n )\n appbuilder.add_view(\n views.PluginView, permissions.RESOURCE_PLUGIN, category=permissions.RESOURCE_ADMIN_MENU\n )\n appbuilder.add_view(\n views.ProviderView, permissions.RESOURCE_PROVIDER, category=permissions.RESOURCE_ADMIN_MENU\n )\n appbuilder.add_view(\n views.PoolModelView, permissions.RESOURCE_POOL, category=permissions.RESOURCE_ADMIN_MENU\n )\n appbuilder.add_view(\n views.XComModelView, permissions.RESOURCE_XCOM, category=permissions.RESOURCE_ADMIN_MENU\n )\n appbuilder.add_view(\n views.DagDependenciesView,\n permissions.RESOURCE_DAG_DEPENDENCIES,\n category=permissions.RESOURCE_BROWSE_MENU,\n )\n # add_view_no_menu to change item position.\n # I added link in extensions.init_appbuilder_links.init_appbuilder_links\n appbuilder.add_view_no_menu(views.RedocView)\n\n\ndef init_plugins(app):\n \"\"\"Integrate Flask and FAB with plugins\"\"\"\n from airflow import plugins_manager\n\n plugins_manager.initialize_web_ui_plugins()\n\n appbuilder = app.appbuilder\n\n for view in plugins_manager.flask_appbuilder_views:\n name = view.get('name')\n if name:\n log.debug(\"Adding view %s with menu\", name)\n appbuilder.add_view(view[\"view\"], name, category=view[\"category\"])\n else:\n # if 'name' key is missing, intent is to add view without menu\n log.debug(\"Adding view %s without menu\", str(type(view[\"view\"])))\n appbuilder.add_view_no_menu(view[\"view\"])\n\n for menu_link in sorted(plugins_manager.flask_appbuilder_menu_links, key=lambda x: x[\"name\"]):\n log.debug(\"Adding menu link %s to %s\", menu_link[\"name\"], menu_link[\"href\"])\n appbuilder.add_link(**menu_link)\n\n for blue_print in plugins_manager.flask_blueprints:\n log.debug(\"Adding blueprint %s:%s\", blue_print[\"name\"], blue_print[\"blueprint\"].import_name)\n app.register_blueprint(blue_print[\"blueprint\"])\n\n\ndef init_connection_form():\n \"\"\"Initializes connection form\"\"\"\n lazy_add_provider_discovered_options_to_connection_form()\n\n\ndef init_error_handlers(app: Flask):\n \"\"\"Add custom errors handlers\"\"\"\n from airflow.www import views\n\n app.register_error_handler(500, views.show_traceback)\n app.register_error_handler(404, views.not_found)\n\n\ndef set_cors_headers_on_response(response):\n \"\"\"Add response headers\"\"\"\n allow_headers = conf.get('api', 'access_control_allow_headers')\n allow_methods = conf.get('api', 'access_control_allow_methods')\n allow_origins = conf.get('api', 'access_control_allow_origins')\n if allow_headers is not None:\n response.headers['Access-Control-Allow-Headers'] = allow_headers\n if allow_methods is not None:\n response.headers['Access-Control-Allow-Methods'] = allow_methods\n if allow_origins is not None:\n allowed_origins = allow_origins.split(' ')\n origin = request.environ.get('HTTP_ORIGIN', allowed_origins[0])\n if origin in allowed_origins:\n response.headers['Access-Control-Allow-Origin'] = origin\n return response\n\n\ndef init_api_connexion(app: Flask) -> None:\n \"\"\"Initialize Stable API\"\"\"\n base_path = '/api/v1'\n\n from airflow.www import views\n\n @app.errorhandler(404)\n @app.errorhandler(405)\n def _handle_api_error(ex):\n if request.path.startswith(base_path):\n # 404 errors are never handled on the blueprint level\n # unless raised from a view func so actual 404 errors,\n # i.e. \"no route for it\" defined, need to be handled\n # here on the application level\n return common_error_handler(ex)\n else:\n return views.not_found(ex)\n\n spec_dir = path.join(ROOT_APP_DIR, 'api_connexion', 'openapi')\n connexion_app = App(__name__, specification_dir=spec_dir, skip_error_handlers=True)\n connexion_app.app = app\n api_bp = connexion_app.add_api(\n specification='v1.yaml', base_path=base_path, validate_responses=True, strict_validation=True\n ).blueprint\n # Like \"api_bp.after_request\", but the BP is already registered, so we have\n # to register it in the app directly.\n app.after_request_funcs.setdefault(api_bp.name, []).append(set_cors_headers_on_response)\n app.register_error_handler(ProblemException, common_error_handler)\n app.extensions['csrf'].exempt(api_bp)\n\n\ndef init_api_experimental(app):\n \"\"\"Initialize Experimental API\"\"\"\n if not conf.getboolean('api', 'enable_experimental_api', fallback=False):\n return\n from airflow.www.api.experimental import endpoints\n\n warnings.warn(\n \"The experimental REST API is deprecated. Please migrate to the stable REST API. \"\n \"Please note that the experimental API do not have access control. \"\n \"The authenticated user has full access.\",\n DeprecationWarning,\n )\n app.register_blueprint(endpoints.api_experimental, url_prefix='/api/experimental')\n app.extensions['csrf'].exempt(endpoints.api_experimental)\n", "path": "airflow/www/extensions/init_views.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport logging\nimport warnings\nfrom os import path\n\nfrom connexion import App, ProblemException\nfrom flask import Flask, request\n\nfrom airflow.api_connexion.exceptions import common_error_handler\nfrom airflow.configuration import conf\nfrom airflow.security import permissions\nfrom airflow.www.views import lazy_add_provider_discovered_options_to_connection_form\n\nlog = logging.getLogger(__name__)\n\n# airflow/www/extensions/init_views.py => airflow/\nROOT_APP_DIR = path.abspath(path.join(path.dirname(__file__), path.pardir, path.pardir))\n\n\ndef init_flash_views(app):\n \"\"\"Init main app view - redirect to FAB\"\"\"\n from airflow.www.blueprints import routes\n\n app.register_blueprint(routes)\n\n\ndef init_appbuilder_views(app):\n \"\"\"Initialize Web UI views\"\"\"\n appbuilder = app.appbuilder\n from airflow.www import views\n\n # Remove the session from scoped_session registry to avoid\n # reusing a session with a disconnected connection\n appbuilder.session.remove()\n appbuilder.add_view_no_menu(views.AutocompleteView())\n appbuilder.add_view_no_menu(views.Airflow())\n appbuilder.add_view(\n views.DagRunModelView,\n permissions.RESOURCE_DAG_RUN,\n category=permissions.RESOURCE_BROWSE_MENU,\n category_icon=\"fa-globe\",\n )\n appbuilder.add_view(\n views.JobModelView, permissions.RESOURCE_JOB, category=permissions.RESOURCE_BROWSE_MENU\n )\n appbuilder.add_view(\n views.LogModelView, permissions.RESOURCE_AUDIT_LOG, category=permissions.RESOURCE_BROWSE_MENU\n )\n appbuilder.add_view(\n views.VariableModelView, permissions.RESOURCE_VARIABLE, category=permissions.RESOURCE_ADMIN_MENU\n )\n appbuilder.add_view(\n views.TaskInstanceModelView,\n permissions.RESOURCE_TASK_INSTANCE,\n category=permissions.RESOURCE_BROWSE_MENU,\n )\n appbuilder.add_view(\n views.TaskRescheduleModelView,\n permissions.RESOURCE_TASK_RESCHEDULE,\n category=permissions.RESOURCE_BROWSE_MENU,\n )\n appbuilder.add_view(\n views.TriggerModelView,\n permissions.RESOURCE_TRIGGER,\n category=permissions.RESOURCE_BROWSE_MENU,\n )\n appbuilder.add_view(\n views.ConfigurationView,\n permissions.RESOURCE_CONFIG,\n category=permissions.RESOURCE_ADMIN_MENU,\n category_icon=\"fa-user\",\n )\n appbuilder.add_view(\n views.ConnectionModelView, permissions.RESOURCE_CONNECTION, category=permissions.RESOURCE_ADMIN_MENU\n )\n appbuilder.add_view(\n views.SlaMissModelView, permissions.RESOURCE_SLA_MISS, category=permissions.RESOURCE_BROWSE_MENU\n )\n appbuilder.add_view(\n views.PluginView, permissions.RESOURCE_PLUGIN, category=permissions.RESOURCE_ADMIN_MENU\n )\n appbuilder.add_view(\n views.ProviderView, permissions.RESOURCE_PROVIDER, category=permissions.RESOURCE_ADMIN_MENU\n )\n appbuilder.add_view(\n views.PoolModelView, permissions.RESOURCE_POOL, category=permissions.RESOURCE_ADMIN_MENU\n )\n appbuilder.add_view(\n views.XComModelView, permissions.RESOURCE_XCOM, category=permissions.RESOURCE_ADMIN_MENU\n )\n appbuilder.add_view(\n views.DagDependenciesView,\n permissions.RESOURCE_DAG_DEPENDENCIES,\n category=permissions.RESOURCE_BROWSE_MENU,\n )\n # add_view_no_menu to change item position.\n # I added link in extensions.init_appbuilder_links.init_appbuilder_links\n appbuilder.add_view_no_menu(views.RedocView)\n\n\ndef init_plugins(app):\n \"\"\"Integrate Flask and FAB with plugins\"\"\"\n from airflow import plugins_manager\n\n plugins_manager.initialize_web_ui_plugins()\n\n appbuilder = app.appbuilder\n\n for view in plugins_manager.flask_appbuilder_views:\n name = view.get('name')\n if name:\n log.debug(\"Adding view %s with menu\", name)\n appbuilder.add_view(view[\"view\"], name, category=view[\"category\"])\n else:\n # if 'name' key is missing, intent is to add view without menu\n log.debug(\"Adding view %s without menu\", str(type(view[\"view\"])))\n appbuilder.add_view_no_menu(view[\"view\"])\n\n for menu_link in sorted(plugins_manager.flask_appbuilder_menu_links, key=lambda x: x[\"name\"]):\n log.debug(\"Adding menu link %s to %s\", menu_link[\"name\"], menu_link[\"href\"])\n appbuilder.add_link(**menu_link)\n\n for blue_print in plugins_manager.flask_blueprints:\n log.debug(\"Adding blueprint %s:%s\", blue_print[\"name\"], blue_print[\"blueprint\"].import_name)\n app.register_blueprint(blue_print[\"blueprint\"])\n\n\ndef init_connection_form():\n \"\"\"Initializes connection form\"\"\"\n lazy_add_provider_discovered_options_to_connection_form()\n\n\ndef init_error_handlers(app: Flask):\n \"\"\"Add custom errors handlers\"\"\"\n from airflow.www import views\n\n app.register_error_handler(500, views.show_traceback)\n app.register_error_handler(404, views.not_found)\n\n\ndef set_cors_headers_on_response(response):\n \"\"\"Add response headers\"\"\"\n allow_headers = conf.get('api', 'access_control_allow_headers')\n allow_methods = conf.get('api', 'access_control_allow_methods')\n allow_origins = conf.get('api', 'access_control_allow_origins')\n if allow_headers:\n response.headers['Access-Control-Allow-Headers'] = allow_headers\n if allow_methods:\n response.headers['Access-Control-Allow-Methods'] = allow_methods\n if allow_origins == '*':\n response.headers['Access-Control-Allow-Origin'] = '*'\n elif allow_origins:\n allowed_origins = allow_origins.split(' ')\n origin = request.environ.get('HTTP_ORIGIN', allowed_origins[0])\n if origin in allowed_origins:\n response.headers['Access-Control-Allow-Origin'] = origin\n return response\n\n\ndef init_api_connexion(app: Flask) -> None:\n \"\"\"Initialize Stable API\"\"\"\n base_path = '/api/v1'\n\n from airflow.www import views\n\n @app.errorhandler(404)\n @app.errorhandler(405)\n def _handle_api_error(ex):\n if request.path.startswith(base_path):\n # 404 errors are never handled on the blueprint level\n # unless raised from a view func so actual 404 errors,\n # i.e. \"no route for it\" defined, need to be handled\n # here on the application level\n return common_error_handler(ex)\n else:\n return views.not_found(ex)\n\n spec_dir = path.join(ROOT_APP_DIR, 'api_connexion', 'openapi')\n connexion_app = App(__name__, specification_dir=spec_dir, skip_error_handlers=True)\n connexion_app.app = app\n api_bp = connexion_app.add_api(\n specification='v1.yaml', base_path=base_path, validate_responses=True, strict_validation=True\n ).blueprint\n # Like \"api_bp.after_request\", but the BP is already registered, so we have\n # to register it in the app directly.\n app.after_request_funcs.setdefault(api_bp.name, []).append(set_cors_headers_on_response)\n app.register_error_handler(ProblemException, common_error_handler)\n app.extensions['csrf'].exempt(api_bp)\n\n\ndef init_api_experimental(app):\n \"\"\"Initialize Experimental API\"\"\"\n if not conf.getboolean('api', 'enable_experimental_api', fallback=False):\n return\n from airflow.www.api.experimental import endpoints\n\n warnings.warn(\n \"The experimental REST API is deprecated. Please migrate to the stable REST API. \"\n \"Please note that the experimental API do not have access control. \"\n \"The authenticated user has full access.\",\n DeprecationWarning,\n )\n app.register_blueprint(endpoints.api_experimental, url_prefix='/api/experimental')\n app.extensions['csrf'].exempt(endpoints.api_experimental)\n", "path": "airflow/www/extensions/init_views.py"}]} | 3,054 | 237 |
gh_patches_debug_41876 | rasdani/github-patches | git_diff | carpentries__amy-2231 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Do not allow concurrent same community roles
People should not have the same concurrent community roles. For example if a person has an Instructor role from 2018-02-01 through 2020-06-01, any other role must end before 2018-02-01 and start after 2020-06-01.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `amy/communityroles/forms.py`
Content:
```
1 from collections import defaultdict
2 from datetime import date
3 from typing import Any, Optional, Union
4
5 from django import forms
6 from django.core.exceptions import ObjectDoesNotExist, ValidationError
7
8 from workshops.fields import HeavySelect2Widget, ModelSelect2Widget
9 from workshops.forms import SELECT2_SIDEBAR, BootstrapHelper, WidgetOverrideMixin
10 from workshops.models import Award, Person
11
12 from .fields import CustomKeysJSONField
13 from .models import CommunityRole, CommunityRoleConfig, CommunityRoleInactivation
14
15
16 class CommunityRoleForm(WidgetOverrideMixin, forms.ModelForm):
17 class Meta:
18 model = CommunityRole
19 fields = (
20 "config",
21 "person",
22 "award",
23 "start",
24 "end",
25 "inactivation",
26 "membership",
27 "url",
28 "generic_relation_content_type",
29 "generic_relation_pk",
30 )
31 widgets = {
32 "config": HeavySelect2Widget(
33 data_view="api:communityroleconfig-list", attrs=SELECT2_SIDEBAR
34 ),
35 "person": ModelSelect2Widget(
36 data_view="person-lookup", attrs=SELECT2_SIDEBAR
37 ),
38 "award": ModelSelect2Widget(
39 data_view="award-lookup", attrs=SELECT2_SIDEBAR
40 ),
41 "membership": ModelSelect2Widget(
42 data_view="membership-lookup", attrs=SELECT2_SIDEBAR
43 ),
44 "generic_relation_content_type": forms.Select(
45 # "disabled" means the browsers will not send the field during POST.
46 # See how it's handled in `clean()` method below.
47 attrs={"disabled": ""},
48 ),
49 "generic_relation_pk": HeavySelect2Widget(
50 data_view="generic-object-lookup", attrs=SELECT2_SIDEBAR
51 ),
52 }
53 labels = {
54 "generic_relation_content_type": "Generic relation object type",
55 "generic_relation_pk": "Generic relation object",
56 }
57
58 class Media:
59 js = ("communityrole_form.js",)
60
61 def __init__(self, *args, **kwargs):
62 form_tag = kwargs.pop("form_tag", True)
63 super().__init__(*args, **kwargs)
64 bootstrap_kwargs = {
65 "add_cancel_button": False,
66 "form_tag": form_tag,
67 }
68 self.helper = BootstrapHelper(**bootstrap_kwargs)
69
70 def clean(self) -> Union[dict[str, Any], None]:
71 """Validate form according to rules set up in related Community Role
72 configuration."""
73 cleaned_data = super().clean()
74 if not cleaned_data:
75 return cleaned_data
76
77 errors: defaultdict[str, list[ValidationError]] = defaultdict(list)
78 config: Optional[CommunityRoleConfig] = cleaned_data.get("config")
79 award: Optional[Award] = cleaned_data.get("award")
80 person: Optional[Person] = cleaned_data.get("person")
81 inactivation: Optional[CommunityRoleInactivation] = cleaned_data.get(
82 "inactivation"
83 )
84 end_date: Optional[date] = cleaned_data.get("end")
85
86 # Config is required, but field validation for 'config' should raise
87 # validation error first.
88 if not config:
89 return cleaned_data
90
91 # Award required?
92 if config.link_to_award and not award:
93 errors["award"].append(
94 ValidationError(f"Award is required with community role {config}")
95 )
96
97 # Award should point at the same person the community role links to
98 if award and award.person != person:
99 errors["award"].append(ValidationError(f"Award should belong to {person}"))
100
101 # Specific award badge required?
102 if (badge := config.award_badge_limit) and award:
103 if award.badge != badge:
104 errors["award"].append(
105 ValidationError(
106 f"Award badge must be {badge} for community role {config}"
107 )
108 )
109
110 # Membership required?
111 if config.link_to_membership and not cleaned_data.get("membership"):
112 errors["membership"].append(
113 ValidationError(f"Membership is required with community role {config}")
114 )
115
116 # Additional URL supported?
117 if not config.additional_url and cleaned_data.get("url"):
118 errors["url"].append(
119 ValidationError(f"URL is not supported for community role {config}")
120 )
121
122 # Widget for `generic_relation_content_type` is disabled in HTML, which
123 # makes browsers not send it. The code below sets the default value to
124 # the same value as in related config.
125 generic_relation_content_type = config.generic_relation_content_type
126
127 # Generic relation object must exist
128 if config.generic_relation_content_type and generic_relation_content_type:
129 model_class = generic_relation_content_type.model_class()
130 try:
131 model_class._base_manager.get(
132 pk=cleaned_data.get("generic_relation_pk")
133 )
134 except ObjectDoesNotExist:
135 errors["generic_relation_pk"].append(
136 ValidationError(
137 f"Generic relation object of model {model_class.__name__} "
138 "doesn't exist"
139 )
140 )
141
142 # End date is required when any inactivation was selected.
143 if inactivation is not None and end_date is None:
144 errors["end"].append(
145 ValidationError("Required when Reason for inactivation selected.")
146 )
147
148 if errors:
149 raise ValidationError(errors)
150
151 return cleaned_data
152
153 def clean_end(self):
154 """Validate that end >= start"""
155 start = self.cleaned_data.get("start")
156 end = self.cleaned_data.get("end")
157 if start and end and end < start:
158 raise ValidationError("Must not be earlier than start date.")
159 return end
160
161
162 class CommunityRoleUpdateForm(CommunityRoleForm):
163 config = forms.ModelChoiceField(
164 queryset=CommunityRoleConfig.objects.all(),
165 disabled=True,
166 )
167
168 custom_keys = CustomKeysJSONField(required=False)
169
170 class Meta(CommunityRoleForm.Meta):
171 fields = CommunityRoleForm.Meta.fields + ("custom_keys",)
172
173 def __init__(self, *args, community_role_config: CommunityRoleConfig, **kwargs):
174 self.config = community_role_config
175 super().__init__(*args, **kwargs)
176 self.fields["custom_keys"].apply_labels(self.config.custom_key_labels)
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/amy/communityroles/forms.py b/amy/communityroles/forms.py
--- a/amy/communityroles/forms.py
+++ b/amy/communityroles/forms.py
@@ -4,6 +4,7 @@
from django import forms
from django.core.exceptions import ObjectDoesNotExist, ValidationError
+from django.db.models import Q
from workshops.fields import HeavySelect2Widget, ModelSelect2Widget
from workshops.forms import SELECT2_SIDEBAR, BootstrapHelper, WidgetOverrideMixin
@@ -81,11 +82,12 @@
inactivation: Optional[CommunityRoleInactivation] = cleaned_data.get(
"inactivation"
)
+ start_date: Optional[date] = cleaned_data.get("start")
end_date: Optional[date] = cleaned_data.get("end")
# Config is required, but field validation for 'config' should raise
# validation error first.
- if not config:
+ if not config or not person:
return cleaned_data
# Award required?
@@ -145,8 +147,13 @@
ValidationError("Required when Reason for inactivation selected.")
)
+ try:
+ self.check_concurrent_role(config, person, start_date, end_date)
+ except ValidationError as exc:
+ errors["person"].append(exc)
+
if errors:
- raise ValidationError(errors)
+ raise ValidationError(errors) # type: ignore
return cleaned_data
@@ -158,6 +165,32 @@
raise ValidationError("Must not be earlier than start date.")
return end
+ def check_concurrent_role(
+ self,
+ config: Optional[CommunityRoleConfig] = None,
+ person: Optional[Person] = None,
+ start_date: Optional[date] = None,
+ end_date: Optional[date] = None,
+ ) -> Optional[Person]:
+ """Person should not have any concurrent Community Roles of the same type in the
+ same time."""
+ # These are required fields in the form, so they should be present.
+ if config and person and start_date:
+ same_time = Q(end__gt=start_date) | Q(end__isnull=True)
+ if end_date:
+ # if `end_date` is present, introduce additional condition
+ same_time &= Q(start__lt=end_date) | Q(start__isnull=True)
+ if roles := CommunityRole.objects.filter(
+ person=person, config=config
+ ).filter(same_time):
+ raise (
+ ValidationError(
+ f"Person {person} has concurrent community roles: "
+ f"{list(roles)}."
+ )
+ )
+ return person
+
class CommunityRoleUpdateForm(CommunityRoleForm):
config = forms.ModelChoiceField(
@@ -173,4 +206,16 @@
def __init__(self, *args, community_role_config: CommunityRoleConfig, **kwargs):
self.config = community_role_config
super().__init__(*args, **kwargs)
- self.fields["custom_keys"].apply_labels(self.config.custom_key_labels)
+ self.fields["custom_keys"].apply_labels( # type: ignore
+ self.config.custom_key_labels
+ )
+
+ def check_concurrent_role(
+ self,
+ config: Optional[CommunityRoleConfig] = None,
+ person: Optional[Person] = None,
+ start_date: Optional[date] = None,
+ end_date: Optional[date] = None,
+ ) -> Optional[Person]:
+ """When updating a CommunityRole, we shouldn't check for concurrent roles."""
+ return person
| {"golden_diff": "diff --git a/amy/communityroles/forms.py b/amy/communityroles/forms.py\n--- a/amy/communityroles/forms.py\n+++ b/amy/communityroles/forms.py\n@@ -4,6 +4,7 @@\n \n from django import forms\n from django.core.exceptions import ObjectDoesNotExist, ValidationError\n+from django.db.models import Q\n \n from workshops.fields import HeavySelect2Widget, ModelSelect2Widget\n from workshops.forms import SELECT2_SIDEBAR, BootstrapHelper, WidgetOverrideMixin\n@@ -81,11 +82,12 @@\n inactivation: Optional[CommunityRoleInactivation] = cleaned_data.get(\n \"inactivation\"\n )\n+ start_date: Optional[date] = cleaned_data.get(\"start\")\n end_date: Optional[date] = cleaned_data.get(\"end\")\n \n # Config is required, but field validation for 'config' should raise\n # validation error first.\n- if not config:\n+ if not config or not person:\n return cleaned_data\n \n # Award required?\n@@ -145,8 +147,13 @@\n ValidationError(\"Required when Reason for inactivation selected.\")\n )\n \n+ try:\n+ self.check_concurrent_role(config, person, start_date, end_date)\n+ except ValidationError as exc:\n+ errors[\"person\"].append(exc)\n+\n if errors:\n- raise ValidationError(errors)\n+ raise ValidationError(errors) # type: ignore\n \n return cleaned_data\n \n@@ -158,6 +165,32 @@\n raise ValidationError(\"Must not be earlier than start date.\")\n return end\n \n+ def check_concurrent_role(\n+ self,\n+ config: Optional[CommunityRoleConfig] = None,\n+ person: Optional[Person] = None,\n+ start_date: Optional[date] = None,\n+ end_date: Optional[date] = None,\n+ ) -> Optional[Person]:\n+ \"\"\"Person should not have any concurrent Community Roles of the same type in the\n+ same time.\"\"\"\n+ # These are required fields in the form, so they should be present.\n+ if config and person and start_date:\n+ same_time = Q(end__gt=start_date) | Q(end__isnull=True)\n+ if end_date:\n+ # if `end_date` is present, introduce additional condition\n+ same_time &= Q(start__lt=end_date) | Q(start__isnull=True)\n+ if roles := CommunityRole.objects.filter(\n+ person=person, config=config\n+ ).filter(same_time):\n+ raise (\n+ ValidationError(\n+ f\"Person {person} has concurrent community roles: \"\n+ f\"{list(roles)}.\"\n+ )\n+ )\n+ return person\n+\n \n class CommunityRoleUpdateForm(CommunityRoleForm):\n config = forms.ModelChoiceField(\n@@ -173,4 +206,16 @@\n def __init__(self, *args, community_role_config: CommunityRoleConfig, **kwargs):\n self.config = community_role_config\n super().__init__(*args, **kwargs)\n- self.fields[\"custom_keys\"].apply_labels(self.config.custom_key_labels)\n+ self.fields[\"custom_keys\"].apply_labels( # type: ignore\n+ self.config.custom_key_labels\n+ )\n+\n+ def check_concurrent_role(\n+ self,\n+ config: Optional[CommunityRoleConfig] = None,\n+ person: Optional[Person] = None,\n+ start_date: Optional[date] = None,\n+ end_date: Optional[date] = None,\n+ ) -> Optional[Person]:\n+ \"\"\"When updating a CommunityRole, we shouldn't check for concurrent roles.\"\"\"\n+ return person\n", "issue": "Do not allow concurrent same community roles\nPeople should not have the same concurrent community roles. For example if a person has an Instructor role from 2018-02-01 through 2020-06-01, any other role must end before 2018-02-01 and start after 2020-06-01.\n", "before_files": [{"content": "from collections import defaultdict\nfrom datetime import date\nfrom typing import Any, Optional, Union\n\nfrom django import forms\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\n\nfrom workshops.fields import HeavySelect2Widget, ModelSelect2Widget\nfrom workshops.forms import SELECT2_SIDEBAR, BootstrapHelper, WidgetOverrideMixin\nfrom workshops.models import Award, Person\n\nfrom .fields import CustomKeysJSONField\nfrom .models import CommunityRole, CommunityRoleConfig, CommunityRoleInactivation\n\n\nclass CommunityRoleForm(WidgetOverrideMixin, forms.ModelForm):\n class Meta:\n model = CommunityRole\n fields = (\n \"config\",\n \"person\",\n \"award\",\n \"start\",\n \"end\",\n \"inactivation\",\n \"membership\",\n \"url\",\n \"generic_relation_content_type\",\n \"generic_relation_pk\",\n )\n widgets = {\n \"config\": HeavySelect2Widget(\n data_view=\"api:communityroleconfig-list\", attrs=SELECT2_SIDEBAR\n ),\n \"person\": ModelSelect2Widget(\n data_view=\"person-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"award\": ModelSelect2Widget(\n data_view=\"award-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"membership\": ModelSelect2Widget(\n data_view=\"membership-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"generic_relation_content_type\": forms.Select(\n # \"disabled\" means the browsers will not send the field during POST.\n # See how it's handled in `clean()` method below.\n attrs={\"disabled\": \"\"},\n ),\n \"generic_relation_pk\": HeavySelect2Widget(\n data_view=\"generic-object-lookup\", attrs=SELECT2_SIDEBAR\n ),\n }\n labels = {\n \"generic_relation_content_type\": \"Generic relation object type\",\n \"generic_relation_pk\": \"Generic relation object\",\n }\n\n class Media:\n js = (\"communityrole_form.js\",)\n\n def __init__(self, *args, **kwargs):\n form_tag = kwargs.pop(\"form_tag\", True)\n super().__init__(*args, **kwargs)\n bootstrap_kwargs = {\n \"add_cancel_button\": False,\n \"form_tag\": form_tag,\n }\n self.helper = BootstrapHelper(**bootstrap_kwargs)\n\n def clean(self) -> Union[dict[str, Any], None]:\n \"\"\"Validate form according to rules set up in related Community Role\n configuration.\"\"\"\n cleaned_data = super().clean()\n if not cleaned_data:\n return cleaned_data\n\n errors: defaultdict[str, list[ValidationError]] = defaultdict(list)\n config: Optional[CommunityRoleConfig] = cleaned_data.get(\"config\")\n award: Optional[Award] = cleaned_data.get(\"award\")\n person: Optional[Person] = cleaned_data.get(\"person\")\n inactivation: Optional[CommunityRoleInactivation] = cleaned_data.get(\n \"inactivation\"\n )\n end_date: Optional[date] = cleaned_data.get(\"end\")\n\n # Config is required, but field validation for 'config' should raise\n # validation error first.\n if not config:\n return cleaned_data\n\n # Award required?\n if config.link_to_award and not award:\n errors[\"award\"].append(\n ValidationError(f\"Award is required with community role {config}\")\n )\n\n # Award should point at the same person the community role links to\n if award and award.person != person:\n errors[\"award\"].append(ValidationError(f\"Award should belong to {person}\"))\n\n # Specific award badge required?\n if (badge := config.award_badge_limit) and award:\n if award.badge != badge:\n errors[\"award\"].append(\n ValidationError(\n f\"Award badge must be {badge} for community role {config}\"\n )\n )\n\n # Membership required?\n if config.link_to_membership and not cleaned_data.get(\"membership\"):\n errors[\"membership\"].append(\n ValidationError(f\"Membership is required with community role {config}\")\n )\n\n # Additional URL supported?\n if not config.additional_url and cleaned_data.get(\"url\"):\n errors[\"url\"].append(\n ValidationError(f\"URL is not supported for community role {config}\")\n )\n\n # Widget for `generic_relation_content_type` is disabled in HTML, which\n # makes browsers not send it. The code below sets the default value to\n # the same value as in related config.\n generic_relation_content_type = config.generic_relation_content_type\n\n # Generic relation object must exist\n if config.generic_relation_content_type and generic_relation_content_type:\n model_class = generic_relation_content_type.model_class()\n try:\n model_class._base_manager.get(\n pk=cleaned_data.get(\"generic_relation_pk\")\n )\n except ObjectDoesNotExist:\n errors[\"generic_relation_pk\"].append(\n ValidationError(\n f\"Generic relation object of model {model_class.__name__} \"\n \"doesn't exist\"\n )\n )\n\n # End date is required when any inactivation was selected.\n if inactivation is not None and end_date is None:\n errors[\"end\"].append(\n ValidationError(\"Required when Reason for inactivation selected.\")\n )\n\n if errors:\n raise ValidationError(errors)\n\n return cleaned_data\n\n def clean_end(self):\n \"\"\"Validate that end >= start\"\"\"\n start = self.cleaned_data.get(\"start\")\n end = self.cleaned_data.get(\"end\")\n if start and end and end < start:\n raise ValidationError(\"Must not be earlier than start date.\")\n return end\n\n\nclass CommunityRoleUpdateForm(CommunityRoleForm):\n config = forms.ModelChoiceField(\n queryset=CommunityRoleConfig.objects.all(),\n disabled=True,\n )\n\n custom_keys = CustomKeysJSONField(required=False)\n\n class Meta(CommunityRoleForm.Meta):\n fields = CommunityRoleForm.Meta.fields + (\"custom_keys\",)\n\n def __init__(self, *args, community_role_config: CommunityRoleConfig, **kwargs):\n self.config = community_role_config\n super().__init__(*args, **kwargs)\n self.fields[\"custom_keys\"].apply_labels(self.config.custom_key_labels)\n", "path": "amy/communityroles/forms.py"}], "after_files": [{"content": "from collections import defaultdict\nfrom datetime import date\nfrom typing import Any, Optional, Union\n\nfrom django import forms\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.db.models import Q\n\nfrom workshops.fields import HeavySelect2Widget, ModelSelect2Widget\nfrom workshops.forms import SELECT2_SIDEBAR, BootstrapHelper, WidgetOverrideMixin\nfrom workshops.models import Award, Person\n\nfrom .fields import CustomKeysJSONField\nfrom .models import CommunityRole, CommunityRoleConfig, CommunityRoleInactivation\n\n\nclass CommunityRoleForm(WidgetOverrideMixin, forms.ModelForm):\n class Meta:\n model = CommunityRole\n fields = (\n \"config\",\n \"person\",\n \"award\",\n \"start\",\n \"end\",\n \"inactivation\",\n \"membership\",\n \"url\",\n \"generic_relation_content_type\",\n \"generic_relation_pk\",\n )\n widgets = {\n \"config\": HeavySelect2Widget(\n data_view=\"api:communityroleconfig-list\", attrs=SELECT2_SIDEBAR\n ),\n \"person\": ModelSelect2Widget(\n data_view=\"person-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"award\": ModelSelect2Widget(\n data_view=\"award-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"membership\": ModelSelect2Widget(\n data_view=\"membership-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"generic_relation_content_type\": forms.Select(\n # \"disabled\" means the browsers will not send the field during POST.\n # See how it's handled in `clean()` method below.\n attrs={\"disabled\": \"\"},\n ),\n \"generic_relation_pk\": HeavySelect2Widget(\n data_view=\"generic-object-lookup\", attrs=SELECT2_SIDEBAR\n ),\n }\n labels = {\n \"generic_relation_content_type\": \"Generic relation object type\",\n \"generic_relation_pk\": \"Generic relation object\",\n }\n\n class Media:\n js = (\"communityrole_form.js\",)\n\n def __init__(self, *args, **kwargs):\n form_tag = kwargs.pop(\"form_tag\", True)\n super().__init__(*args, **kwargs)\n bootstrap_kwargs = {\n \"add_cancel_button\": False,\n \"form_tag\": form_tag,\n }\n self.helper = BootstrapHelper(**bootstrap_kwargs)\n\n def clean(self) -> Union[dict[str, Any], None]:\n \"\"\"Validate form according to rules set up in related Community Role\n configuration.\"\"\"\n cleaned_data = super().clean()\n if not cleaned_data:\n return cleaned_data\n\n errors: defaultdict[str, list[ValidationError]] = defaultdict(list)\n config: Optional[CommunityRoleConfig] = cleaned_data.get(\"config\")\n award: Optional[Award] = cleaned_data.get(\"award\")\n person: Optional[Person] = cleaned_data.get(\"person\")\n inactivation: Optional[CommunityRoleInactivation] = cleaned_data.get(\n \"inactivation\"\n )\n start_date: Optional[date] = cleaned_data.get(\"start\")\n end_date: Optional[date] = cleaned_data.get(\"end\")\n\n # Config is required, but field validation for 'config' should raise\n # validation error first.\n if not config or not person:\n return cleaned_data\n\n # Award required?\n if config.link_to_award and not award:\n errors[\"award\"].append(\n ValidationError(f\"Award is required with community role {config}\")\n )\n\n # Award should point at the same person the community role links to\n if award and award.person != person:\n errors[\"award\"].append(ValidationError(f\"Award should belong to {person}\"))\n\n # Specific award badge required?\n if (badge := config.award_badge_limit) and award:\n if award.badge != badge:\n errors[\"award\"].append(\n ValidationError(\n f\"Award badge must be {badge} for community role {config}\"\n )\n )\n\n # Membership required?\n if config.link_to_membership and not cleaned_data.get(\"membership\"):\n errors[\"membership\"].append(\n ValidationError(f\"Membership is required with community role {config}\")\n )\n\n # Additional URL supported?\n if not config.additional_url and cleaned_data.get(\"url\"):\n errors[\"url\"].append(\n ValidationError(f\"URL is not supported for community role {config}\")\n )\n\n # Widget for `generic_relation_content_type` is disabled in HTML, which\n # makes browsers not send it. The code below sets the default value to\n # the same value as in related config.\n generic_relation_content_type = config.generic_relation_content_type\n\n # Generic relation object must exist\n if config.generic_relation_content_type and generic_relation_content_type:\n model_class = generic_relation_content_type.model_class()\n try:\n model_class._base_manager.get(\n pk=cleaned_data.get(\"generic_relation_pk\")\n )\n except ObjectDoesNotExist:\n errors[\"generic_relation_pk\"].append(\n ValidationError(\n f\"Generic relation object of model {model_class.__name__} \"\n \"doesn't exist\"\n )\n )\n\n # End date is required when any inactivation was selected.\n if inactivation is not None and end_date is None:\n errors[\"end\"].append(\n ValidationError(\"Required when Reason for inactivation selected.\")\n )\n\n try:\n self.check_concurrent_role(config, person, start_date, end_date)\n except ValidationError as exc:\n errors[\"person\"].append(exc)\n\n if errors:\n raise ValidationError(errors) # type: ignore\n\n return cleaned_data\n\n def clean_end(self):\n \"\"\"Validate that end >= start\"\"\"\n start = self.cleaned_data.get(\"start\")\n end = self.cleaned_data.get(\"end\")\n if start and end and end < start:\n raise ValidationError(\"Must not be earlier than start date.\")\n return end\n\n def check_concurrent_role(\n self,\n config: Optional[CommunityRoleConfig] = None,\n person: Optional[Person] = None,\n start_date: Optional[date] = None,\n end_date: Optional[date] = None,\n ) -> Optional[Person]:\n \"\"\"Person should not have any concurrent Community Roles of the same type in the\n same time.\"\"\"\n # These are required fields in the form, so they should be present.\n if config and person and start_date:\n same_time = Q(end__gt=start_date) | Q(end__isnull=True)\n if end_date:\n # if `end_date` is present, introduce additional condition\n same_time &= Q(start__lt=end_date) | Q(start__isnull=True)\n if roles := CommunityRole.objects.filter(\n person=person, config=config\n ).filter(same_time):\n raise (\n ValidationError(\n f\"Person {person} has concurrent community roles: \"\n f\"{list(roles)}.\"\n )\n )\n return person\n\n\nclass CommunityRoleUpdateForm(CommunityRoleForm):\n config = forms.ModelChoiceField(\n queryset=CommunityRoleConfig.objects.all(),\n disabled=True,\n )\n\n custom_keys = CustomKeysJSONField(required=False)\n\n class Meta(CommunityRoleForm.Meta):\n fields = CommunityRoleForm.Meta.fields + (\"custom_keys\",)\n\n def __init__(self, *args, community_role_config: CommunityRoleConfig, **kwargs):\n self.config = community_role_config\n super().__init__(*args, **kwargs)\n self.fields[\"custom_keys\"].apply_labels( # type: ignore\n self.config.custom_key_labels\n )\n\n def check_concurrent_role(\n self,\n config: Optional[CommunityRoleConfig] = None,\n person: Optional[Person] = None,\n start_date: Optional[date] = None,\n end_date: Optional[date] = None,\n ) -> Optional[Person]:\n \"\"\"When updating a CommunityRole, we shouldn't check for concurrent roles.\"\"\"\n return person\n", "path": "amy/communityroles/forms.py"}]} | 2,072 | 794 |
gh_patches_debug_8903 | rasdani/github-patches | git_diff | getredash__redash-445 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Query execution fails with Google Apps accounts having non-ASCII characters in name
If the account used for Google Apps authentication has non-ASCII characters (ä in this case), execution of queries fails against Amazon Redshift datasource.
[2015-06-05 11:36:13,520][PID:7502][INFO][root] [Manager] Metadata: [{'Username': u'Raimo J\xe4rvenp\xe4\xe4', 'Query ID': 25}]
[2015-06-05 11:36:13,520][PID:7502][INFO][root] [Manager][a2a8974a74cfcb958afc2f843f9f896b] Found existing job: ed40d887-8dae-4e2d-a3a4-439bfb4f01a6
[2015-06-05 11:36:13,521][PID:7502][INFO][root] [a2a8974a74cfcb958afc2f843f9f896b] job found is ready (FAILURE), removing lock
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/tasks.py`
Content:
```
1 import time
2 import datetime
3 import logging
4 import redis
5 from celery import Task
6 from celery.result import AsyncResult
7 from celery.utils.log import get_task_logger
8 from redash import redis_connection, models, statsd_client, settings
9 from redash.utils import gen_query_hash
10 from redash.worker import celery
11 from redash.query_runner import get_query_runner
12
13 logger = get_task_logger(__name__)
14
15
16 class BaseTask(Task):
17 abstract = True
18
19 def after_return(self, *args, **kwargs):
20 models.db.close_db(None)
21
22 def __call__(self, *args, **kwargs):
23 models.db.connect_db()
24 return super(BaseTask, self).__call__(*args, **kwargs)
25
26
27 class QueryTask(object):
28 MAX_RETRIES = 5
29
30 # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
31 STATUSES = {
32 'PENDING': 1,
33 'STARTED': 2,
34 'SUCCESS': 3,
35 'FAILURE': 4,
36 'REVOKED': 4
37 }
38
39 def __init__(self, job_id=None, async_result=None):
40 if async_result:
41 self._async_result = async_result
42 else:
43 self._async_result = AsyncResult(job_id, app=celery)
44
45 @property
46 def id(self):
47 return self._async_result.id
48
49 @classmethod
50 def add_task(cls, query, data_source, scheduled=False, metadata={}):
51 query_hash = gen_query_hash(query)
52 logging.info("[Manager][%s] Inserting job", query_hash)
53 logging.info("[Manager] Metadata: [%s]", metadata)
54 try_count = 0
55 job = None
56
57 while try_count < cls.MAX_RETRIES:
58 try_count += 1
59
60 pipe = redis_connection.pipeline()
61 try:
62 pipe.watch(cls._job_lock_id(query_hash, data_source.id))
63 job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))
64 if job_id:
65 logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)
66
67 job = cls(job_id=job_id)
68 if job.ready():
69 logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
70 redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
71 job = None
72
73 if not job:
74 pipe.multi()
75
76 if scheduled:
77 queue_name = data_source.scheduled_queue_name
78 else:
79 queue_name = data_source.queue_name
80
81 result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)
82 job = cls(async_result=result)
83
84 logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
85 pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
86 pipe.execute()
87 break
88
89 except redis.WatchError:
90 continue
91
92 if not job:
93 logging.error("[Manager][%s] Failed adding job for query.", query_hash)
94
95 return job
96
97 def to_dict(self):
98 if self._async_result.status == 'STARTED':
99 updated_at = self._async_result.result.get('start_time', 0)
100 else:
101 updated_at = 0
102
103 if self._async_result.failed() and isinstance(self._async_result.result, Exception):
104 error = self._async_result.result.message
105 elif self._async_result.status == 'REVOKED':
106 error = 'Query execution cancelled.'
107 else:
108 error = ''
109
110 if self._async_result.successful():
111 query_result_id = self._async_result.result
112 else:
113 query_result_id = None
114
115 return {
116 'id': self._async_result.id,
117 'updated_at': updated_at,
118 'status': self.STATUSES[self._async_result.status],
119 'error': error,
120 'query_result_id': query_result_id,
121 }
122
123 @property
124 def is_cancelled(self):
125 return self._async_result.status == 'REVOKED'
126
127 @property
128 def celery_status(self):
129 return self._async_result.status
130
131 def ready(self):
132 return self._async_result.ready()
133
134 def cancel(self):
135 return self._async_result.revoke(terminate=True)
136
137 @staticmethod
138 def _job_lock_id(query_hash, data_source_id):
139 return "query_hash_job:%s:%s" % (data_source_id, query_hash)
140
141
142 @celery.task(base=BaseTask)
143 def refresh_queries():
144 # self.status['last_refresh_at'] = time.time()
145 # self._save_status()
146
147 logger.info("Refreshing queries...")
148
149 outdated_queries_count = 0
150 for query in models.Query.outdated_queries():
151 QueryTask.add_task(query.query, query.data_source, scheduled=True,
152 metadata={'Query ID': query.id, 'Username': 'Scheduled'})
153 outdated_queries_count += 1
154
155 statsd_client.gauge('manager.outdated_queries', outdated_queries_count)
156
157 logger.info("Done refreshing queries. Found %d outdated queries." % outdated_queries_count)
158
159 status = redis_connection.hgetall('redash:status')
160 now = time.time()
161
162 redis_connection.hmset('redash:status', {
163 'outdated_queries_count': outdated_queries_count,
164 'last_refresh_at': now
165 })
166
167 statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
168
169
170 @celery.task(base=BaseTask)
171 def cleanup_tasks():
172 # in case of cold restart of the workers, there might be jobs that still have their "lock" object, but aren't really
173 # going to run. this job removes them.
174 lock_keys = redis_connection.keys("query_hash_job:*") # TODO: use set instead of keys command
175 if not lock_keys:
176 return
177
178 query_tasks = [QueryTask(job_id=j) for j in redis_connection.mget(lock_keys)]
179
180 logger.info("Found %d locks", len(query_tasks))
181
182 inspect = celery.control.inspect()
183 active_tasks = inspect.active()
184 if active_tasks is None:
185 active_tasks = []
186 else:
187 active_tasks = active_tasks.values()
188
189 all_tasks = set()
190 for task_list in active_tasks:
191 for task in task_list:
192 all_tasks.add(task['id'])
193
194 logger.info("Active jobs count: %d", len(all_tasks))
195
196 for i, t in enumerate(query_tasks):
197 if t.ready():
198 # if locked task is ready already (failed, finished, revoked), we don't need the lock anymore
199 logger.warning("%s is ready (%s), removing lock.", lock_keys[i], t.celery_status)
200 redis_connection.delete(lock_keys[i])
201
202 # if t.celery_status == 'STARTED' and t.id not in all_tasks:
203 # logger.warning("Couldn't find active job for: %s, removing lock.", lock_keys[i])
204 # redis_connection.delete(lock_keys[i])
205
206
207 @celery.task(base=BaseTask)
208 def cleanup_query_results():
209 """
210 Job to cleanup unused query results -- such that no query links to them anymore, and older than a week (so it's less
211 likely to be open in someone's browser and be used).
212
213 Each time the job deletes only 100 query results so it won't choke the database in case of many such results.
214 """
215
216 unused_query_results = models.QueryResult.unused().limit(100)
217 total_unused_query_results = models.QueryResult.unused().count()
218 deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()
219
220 logger.info("Deleted %d unused query results out of total of %d." % (deleted_count, total_unused_query_results))
221
222
223 @celery.task(base=BaseTask)
224 def refresh_schemas():
225 """
226 Refershs the datasources schema.
227 """
228
229 for ds in models.DataSource.all():
230 logger.info("Refreshing schema for: {}".format(ds.name))
231 ds.get_schema(refresh=True)
232
233
234 @celery.task(bind=True, base=BaseTask, track_started=True)
235 def execute_query(self, query, data_source_id, metadata):
236 start_time = time.time()
237
238 logger.info("Loading data source (%d)...", data_source_id)
239
240 # TODO: we should probably cache data sources in Redis
241 data_source = models.DataSource.get_by_id(data_source_id)
242
243 self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})
244
245 logger.info("Executing query:\n%s", query)
246
247 query_hash = gen_query_hash(query)
248 query_runner = get_query_runner(data_source.type, data_source.options)
249
250 if query_runner.annotate_query():
251 metadata['Task ID'] = self.request.id
252 metadata['Query Hash'] = query_hash
253 metadata['Queue'] = self.request.delivery_info['routing_key']
254
255 annotation = ", ".join(["{}: {}".format(k, v) for k, v in metadata.iteritems()])
256
257 logging.debug("Annotation: %s", annotation)
258
259 annotated_query = u"/* {} */ {}".format(annotation, query)
260 else:
261 annotated_query = query
262
263 with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):
264 data, error = query_runner.run_query(annotated_query)
265
266 run_time = time.time() - start_time
267 logger.info("Query finished... data length=%s, error=%s", data and len(data), error)
268
269 self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})
270
271 # Delete query_hash
272 redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
273
274 if not error:
275 query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow())
276 else:
277 raise Exception(error)
278
279 return query_result.id
280
281
282 @celery.task(base=BaseTask)
283 def record_event(event):
284 models.Event.record(event)
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/tasks.py b/redash/tasks.py
--- a/redash/tasks.py
+++ b/redash/tasks.py
@@ -252,9 +252,9 @@
metadata['Query Hash'] = query_hash
metadata['Queue'] = self.request.delivery_info['routing_key']
- annotation = ", ".join(["{}: {}".format(k, v) for k, v in metadata.iteritems()])
+ annotation = u", ".join([u"{}: {}".format(k, v) for k, v in metadata.iteritems()])
- logging.debug("Annotation: %s", annotation)
+ logging.debug(u"Annotation: %s", annotation)
annotated_query = u"/* {} */ {}".format(annotation, query)
else:
| {"golden_diff": "diff --git a/redash/tasks.py b/redash/tasks.py\n--- a/redash/tasks.py\n+++ b/redash/tasks.py\n@@ -252,9 +252,9 @@\n metadata['Query Hash'] = query_hash\n metadata['Queue'] = self.request.delivery_info['routing_key']\n \n- annotation = \", \".join([\"{}: {}\".format(k, v) for k, v in metadata.iteritems()])\n+ annotation = u\", \".join([u\"{}: {}\".format(k, v) for k, v in metadata.iteritems()])\n \n- logging.debug(\"Annotation: %s\", annotation)\n+ logging.debug(u\"Annotation: %s\", annotation)\n \n annotated_query = u\"/* {} */ {}\".format(annotation, query)\n else:\n", "issue": "Query execution fails with Google Apps accounts having non-ASCII characters in name\nIf the account used for Google Apps authentication has non-ASCII characters (\u00e4 in this case), execution of queries fails against Amazon Redshift datasource.\n\n[2015-06-05 11:36:13,520][PID:7502][INFO][root] [Manager] Metadata: [{'Username': u'Raimo J\\xe4rvenp\\xe4\\xe4', 'Query ID': 25}]\n[2015-06-05 11:36:13,520][PID:7502][INFO][root] [Manager][a2a8974a74cfcb958afc2f843f9f896b] Found existing job: ed40d887-8dae-4e2d-a3a4-439bfb4f01a6\n[2015-06-05 11:36:13,521][PID:7502][INFO][root] [a2a8974a74cfcb958afc2f843f9f896b] job found is ready (FAILURE), removing lock\n\n", "before_files": [{"content": "import time\nimport datetime\nimport logging\nimport redis\nfrom celery import Task\nfrom celery.result import AsyncResult\nfrom celery.utils.log import get_task_logger\nfrom redash import redis_connection, models, statsd_client, settings\nfrom redash.utils import gen_query_hash\nfrom redash.worker import celery\nfrom redash.query_runner import get_query_runner\n\nlogger = get_task_logger(__name__)\n\n\nclass BaseTask(Task):\n abstract = True\n\n def after_return(self, *args, **kwargs):\n models.db.close_db(None)\n\n def __call__(self, *args, **kwargs):\n models.db.connect_db()\n return super(BaseTask, self).__call__(*args, **kwargs)\n\n\nclass QueryTask(object):\n MAX_RETRIES = 5\n\n # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this\n STATUSES = {\n 'PENDING': 1,\n 'STARTED': 2,\n 'SUCCESS': 3,\n 'FAILURE': 4,\n 'REVOKED': 4\n }\n\n def __init__(self, job_id=None, async_result=None):\n if async_result:\n self._async_result = async_result\n else:\n self._async_result = AsyncResult(job_id, app=celery)\n\n @property\n def id(self):\n return self._async_result.id\n\n @classmethod\n def add_task(cls, query, data_source, scheduled=False, metadata={}):\n query_hash = gen_query_hash(query)\n logging.info(\"[Manager][%s] Inserting job\", query_hash)\n logging.info(\"[Manager] Metadata: [%s]\", metadata)\n try_count = 0\n job = None\n \n while try_count < cls.MAX_RETRIES:\n try_count += 1\n\n pipe = redis_connection.pipeline()\n try:\n pipe.watch(cls._job_lock_id(query_hash, data_source.id))\n job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))\n if job_id:\n logging.info(\"[Manager][%s] Found existing job: %s\", query_hash, job_id)\n\n job = cls(job_id=job_id)\n if job.ready():\n logging.info(\"[%s] job found is ready (%s), removing lock\", query_hash, job.celery_status)\n redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))\n job = None\n\n if not job:\n pipe.multi()\n\n if scheduled:\n queue_name = data_source.scheduled_queue_name\n else:\n queue_name = data_source.queue_name\n\n result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)\n job = cls(async_result=result)\n \n logging.info(\"[Manager][%s] Created new job: %s\", query_hash, job.id)\n pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)\n pipe.execute()\n break\n\n except redis.WatchError:\n continue\n\n if not job:\n logging.error(\"[Manager][%s] Failed adding job for query.\", query_hash)\n\n return job\n\n def to_dict(self):\n if self._async_result.status == 'STARTED':\n updated_at = self._async_result.result.get('start_time', 0)\n else:\n updated_at = 0\n\n if self._async_result.failed() and isinstance(self._async_result.result, Exception):\n error = self._async_result.result.message\n elif self._async_result.status == 'REVOKED':\n error = 'Query execution cancelled.'\n else:\n error = ''\n\n if self._async_result.successful():\n query_result_id = self._async_result.result\n else:\n query_result_id = None\n\n return {\n 'id': self._async_result.id,\n 'updated_at': updated_at,\n 'status': self.STATUSES[self._async_result.status],\n 'error': error,\n 'query_result_id': query_result_id,\n }\n\n @property\n def is_cancelled(self):\n return self._async_result.status == 'REVOKED'\n\n @property\n def celery_status(self):\n return self._async_result.status\n\n def ready(self):\n return self._async_result.ready()\n\n def cancel(self):\n return self._async_result.revoke(terminate=True)\n\n @staticmethod\n def _job_lock_id(query_hash, data_source_id):\n return \"query_hash_job:%s:%s\" % (data_source_id, query_hash)\n\n\[email protected](base=BaseTask)\ndef refresh_queries():\n # self.status['last_refresh_at'] = time.time()\n # self._save_status()\n\n logger.info(\"Refreshing queries...\")\n\n outdated_queries_count = 0\n for query in models.Query.outdated_queries():\n QueryTask.add_task(query.query, query.data_source, scheduled=True,\n metadata={'Query ID': query.id, 'Username': 'Scheduled'})\n outdated_queries_count += 1\n\n statsd_client.gauge('manager.outdated_queries', outdated_queries_count)\n\n logger.info(\"Done refreshing queries. Found %d outdated queries.\" % outdated_queries_count)\n\n status = redis_connection.hgetall('redash:status')\n now = time.time()\n\n redis_connection.hmset('redash:status', {\n 'outdated_queries_count': outdated_queries_count,\n 'last_refresh_at': now\n })\n\n statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))\n\n\[email protected](base=BaseTask)\ndef cleanup_tasks():\n # in case of cold restart of the workers, there might be jobs that still have their \"lock\" object, but aren't really\n # going to run. this job removes them.\n lock_keys = redis_connection.keys(\"query_hash_job:*\") # TODO: use set instead of keys command\n if not lock_keys:\n return\n \n query_tasks = [QueryTask(job_id=j) for j in redis_connection.mget(lock_keys)]\n\n logger.info(\"Found %d locks\", len(query_tasks))\n\n inspect = celery.control.inspect()\n active_tasks = inspect.active()\n if active_tasks is None:\n active_tasks = []\n else:\n active_tasks = active_tasks.values()\n\n all_tasks = set()\n for task_list in active_tasks:\n for task in task_list:\n all_tasks.add(task['id'])\n\n logger.info(\"Active jobs count: %d\", len(all_tasks))\n\n for i, t in enumerate(query_tasks):\n if t.ready():\n # if locked task is ready already (failed, finished, revoked), we don't need the lock anymore\n logger.warning(\"%s is ready (%s), removing lock.\", lock_keys[i], t.celery_status)\n redis_connection.delete(lock_keys[i])\n\n # if t.celery_status == 'STARTED' and t.id not in all_tasks:\n # logger.warning(\"Couldn't find active job for: %s, removing lock.\", lock_keys[i])\n # redis_connection.delete(lock_keys[i])\n\n\[email protected](base=BaseTask)\ndef cleanup_query_results():\n \"\"\"\n Job to cleanup unused query results -- such that no query links to them anymore, and older than a week (so it's less\n likely to be open in someone's browser and be used).\n\n Each time the job deletes only 100 query results so it won't choke the database in case of many such results.\n \"\"\"\n\n unused_query_results = models.QueryResult.unused().limit(100)\n total_unused_query_results = models.QueryResult.unused().count()\n deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()\n\n logger.info(\"Deleted %d unused query results out of total of %d.\" % (deleted_count, total_unused_query_results))\n\n\[email protected](base=BaseTask)\ndef refresh_schemas():\n \"\"\"\n Refershs the datasources schema.\n \"\"\"\n\n for ds in models.DataSource.all():\n logger.info(\"Refreshing schema for: {}\".format(ds.name))\n ds.get_schema(refresh=True)\n\n\[email protected](bind=True, base=BaseTask, track_started=True)\ndef execute_query(self, query, data_source_id, metadata):\n start_time = time.time()\n\n logger.info(\"Loading data source (%d)...\", data_source_id)\n\n # TODO: we should probably cache data sources in Redis\n data_source = models.DataSource.get_by_id(data_source_id)\n\n self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})\n\n logger.info(\"Executing query:\\n%s\", query)\n\n query_hash = gen_query_hash(query)\n query_runner = get_query_runner(data_source.type, data_source.options)\n\n if query_runner.annotate_query():\n metadata['Task ID'] = self.request.id\n metadata['Query Hash'] = query_hash\n metadata['Queue'] = self.request.delivery_info['routing_key']\n\n annotation = \", \".join([\"{}: {}\".format(k, v) for k, v in metadata.iteritems()])\n\n logging.debug(\"Annotation: %s\", annotation)\n\n annotated_query = u\"/* {} */ {}\".format(annotation, query)\n else:\n annotated_query = query\n\n with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):\n data, error = query_runner.run_query(annotated_query)\n\n run_time = time.time() - start_time\n logger.info(\"Query finished... data length=%s, error=%s\", data and len(data), error)\n\n self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})\n\n # Delete query_hash\n redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))\n\n if not error:\n query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow())\n else:\n raise Exception(error)\n\n return query_result.id\n\n\[email protected](base=BaseTask)\ndef record_event(event):\n models.Event.record(event)\n", "path": "redash/tasks.py"}], "after_files": [{"content": "import time\nimport datetime\nimport logging\nimport redis\nfrom celery import Task\nfrom celery.result import AsyncResult\nfrom celery.utils.log import get_task_logger\nfrom redash import redis_connection, models, statsd_client, settings\nfrom redash.utils import gen_query_hash\nfrom redash.worker import celery\nfrom redash.query_runner import get_query_runner\n\nlogger = get_task_logger(__name__)\n\n\nclass BaseTask(Task):\n abstract = True\n\n def after_return(self, *args, **kwargs):\n models.db.close_db(None)\n\n def __call__(self, *args, **kwargs):\n models.db.connect_db()\n return super(BaseTask, self).__call__(*args, **kwargs)\n\n\nclass QueryTask(object):\n MAX_RETRIES = 5\n\n # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this\n STATUSES = {\n 'PENDING': 1,\n 'STARTED': 2,\n 'SUCCESS': 3,\n 'FAILURE': 4,\n 'REVOKED': 4\n }\n\n def __init__(self, job_id=None, async_result=None):\n if async_result:\n self._async_result = async_result\n else:\n self._async_result = AsyncResult(job_id, app=celery)\n\n @property\n def id(self):\n return self._async_result.id\n\n @classmethod\n def add_task(cls, query, data_source, scheduled=False, metadata={}):\n query_hash = gen_query_hash(query)\n logging.info(\"[Manager][%s] Inserting job\", query_hash)\n logging.info(\"[Manager] Metadata: [%s]\", metadata)\n try_count = 0\n job = None\n \n while try_count < cls.MAX_RETRIES:\n try_count += 1\n\n pipe = redis_connection.pipeline()\n try:\n pipe.watch(cls._job_lock_id(query_hash, data_source.id))\n job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))\n if job_id:\n logging.info(\"[Manager][%s] Found existing job: %s\", query_hash, job_id)\n\n job = cls(job_id=job_id)\n if job.ready():\n logging.info(\"[%s] job found is ready (%s), removing lock\", query_hash, job.celery_status)\n redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))\n job = None\n\n if not job:\n pipe.multi()\n\n if scheduled:\n queue_name = data_source.scheduled_queue_name\n else:\n queue_name = data_source.queue_name\n\n result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)\n job = cls(async_result=result)\n \n logging.info(\"[Manager][%s] Created new job: %s\", query_hash, job.id)\n pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)\n pipe.execute()\n break\n\n except redis.WatchError:\n continue\n\n if not job:\n logging.error(\"[Manager][%s] Failed adding job for query.\", query_hash)\n\n return job\n\n def to_dict(self):\n if self._async_result.status == 'STARTED':\n updated_at = self._async_result.result.get('start_time', 0)\n else:\n updated_at = 0\n\n if self._async_result.failed() and isinstance(self._async_result.result, Exception):\n error = self._async_result.result.message\n elif self._async_result.status == 'REVOKED':\n error = 'Query execution cancelled.'\n else:\n error = ''\n\n if self._async_result.successful():\n query_result_id = self._async_result.result\n else:\n query_result_id = None\n\n return {\n 'id': self._async_result.id,\n 'updated_at': updated_at,\n 'status': self.STATUSES[self._async_result.status],\n 'error': error,\n 'query_result_id': query_result_id,\n }\n\n @property\n def is_cancelled(self):\n return self._async_result.status == 'REVOKED'\n\n @property\n def celery_status(self):\n return self._async_result.status\n\n def ready(self):\n return self._async_result.ready()\n\n def cancel(self):\n return self._async_result.revoke(terminate=True)\n\n @staticmethod\n def _job_lock_id(query_hash, data_source_id):\n return \"query_hash_job:%s:%s\" % (data_source_id, query_hash)\n\n\[email protected](base=BaseTask)\ndef refresh_queries():\n # self.status['last_refresh_at'] = time.time()\n # self._save_status()\n\n logger.info(\"Refreshing queries...\")\n\n outdated_queries_count = 0\n for query in models.Query.outdated_queries():\n QueryTask.add_task(query.query, query.data_source, scheduled=True,\n metadata={'Query ID': query.id, 'Username': 'Scheduled'})\n outdated_queries_count += 1\n\n statsd_client.gauge('manager.outdated_queries', outdated_queries_count)\n\n logger.info(\"Done refreshing queries. Found %d outdated queries.\" % outdated_queries_count)\n\n status = redis_connection.hgetall('redash:status')\n now = time.time()\n\n redis_connection.hmset('redash:status', {\n 'outdated_queries_count': outdated_queries_count,\n 'last_refresh_at': now\n })\n\n statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))\n\n\[email protected](base=BaseTask)\ndef cleanup_tasks():\n # in case of cold restart of the workers, there might be jobs that still have their \"lock\" object, but aren't really\n # going to run. this job removes them.\n lock_keys = redis_connection.keys(\"query_hash_job:*\") # TODO: use set instead of keys command\n if not lock_keys:\n return\n \n query_tasks = [QueryTask(job_id=j) for j in redis_connection.mget(lock_keys)]\n\n logger.info(\"Found %d locks\", len(query_tasks))\n\n inspect = celery.control.inspect()\n active_tasks = inspect.active()\n if active_tasks is None:\n active_tasks = []\n else:\n active_tasks = active_tasks.values()\n\n all_tasks = set()\n for task_list in active_tasks:\n for task in task_list:\n all_tasks.add(task['id'])\n\n logger.info(\"Active jobs count: %d\", len(all_tasks))\n\n for i, t in enumerate(query_tasks):\n if t.ready():\n # if locked task is ready already (failed, finished, revoked), we don't need the lock anymore\n logger.warning(\"%s is ready (%s), removing lock.\", lock_keys[i], t.celery_status)\n redis_connection.delete(lock_keys[i])\n\n # if t.celery_status == 'STARTED' and t.id not in all_tasks:\n # logger.warning(\"Couldn't find active job for: %s, removing lock.\", lock_keys[i])\n # redis_connection.delete(lock_keys[i])\n\n\[email protected](base=BaseTask)\ndef cleanup_query_results():\n \"\"\"\n Job to cleanup unused query results -- such that no query links to them anymore, and older than a week (so it's less\n likely to be open in someone's browser and be used).\n\n Each time the job deletes only 100 query results so it won't choke the database in case of many such results.\n \"\"\"\n\n unused_query_results = models.QueryResult.unused().limit(100)\n total_unused_query_results = models.QueryResult.unused().count()\n deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()\n\n logger.info(\"Deleted %d unused query results out of total of %d.\" % (deleted_count, total_unused_query_results))\n\n\[email protected](base=BaseTask)\ndef refresh_schemas():\n \"\"\"\n Refershs the datasources schema.\n \"\"\"\n\n for ds in models.DataSource.all():\n logger.info(\"Refreshing schema for: {}\".format(ds.name))\n ds.get_schema(refresh=True)\n\n\[email protected](bind=True, base=BaseTask, track_started=True)\ndef execute_query(self, query, data_source_id, metadata):\n start_time = time.time()\n\n logger.info(\"Loading data source (%d)...\", data_source_id)\n\n # TODO: we should probably cache data sources in Redis\n data_source = models.DataSource.get_by_id(data_source_id)\n\n self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})\n\n logger.info(\"Executing query:\\n%s\", query)\n\n query_hash = gen_query_hash(query)\n query_runner = get_query_runner(data_source.type, data_source.options)\n\n if query_runner.annotate_query():\n metadata['Task ID'] = self.request.id\n metadata['Query Hash'] = query_hash\n metadata['Queue'] = self.request.delivery_info['routing_key']\n\n annotation = u\", \".join([u\"{}: {}\".format(k, v) for k, v in metadata.iteritems()])\n\n logging.debug(u\"Annotation: %s\", annotation)\n\n annotated_query = u\"/* {} */ {}\".format(annotation, query)\n else:\n annotated_query = query\n\n with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):\n data, error = query_runner.run_query(annotated_query)\n\n run_time = time.time() - start_time\n logger.info(\"Query finished... data length=%s, error=%s\", data and len(data), error)\n\n self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})\n\n # Delete query_hash\n redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))\n\n if not error:\n query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow())\n else:\n raise Exception(error)\n\n return query_result.id\n\n\[email protected](base=BaseTask)\ndef record_event(event):\n models.Event.record(event)\n", "path": "redash/tasks.py"}]} | 3,511 | 163 |
gh_patches_debug_621 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-10610 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change profile edit form success page
Currently, when a user saves the profile edit form, the success page is not the profile form page, the user gets redirected to the profile public view page. This is quite confusing UX but might be baked into Allauth. I would expect this end up on the profile edit form page instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/profiles/views.py`
Content:
```
1 """Views for creating, editing and viewing site-specific user profiles."""
2
3 from allauth.account.views import LoginView as AllAuthLoginView
4 from allauth.account.views import LogoutView as AllAuthLogoutView
5 from django.conf import settings
6 from django.contrib import messages
7 from django.contrib.auth import logout
8 from django.contrib.auth.models import User
9 from django.contrib.messages.views import SuccessMessageMixin
10 from django.http import Http404, HttpResponseRedirect
11 from django.urls import reverse
12 from django.utils import timezone
13 from django.utils.translation import gettext_lazy as _
14 from rest_framework.authtoken.models import Token
15 from vanilla import CreateView, DeleteView, DetailView, FormView, ListView, UpdateView
16
17 from readthedocs.audit.filters import UserSecurityLogFilter
18 from readthedocs.audit.models import AuditLog
19 from readthedocs.core.forms import UserAdvertisingForm, UserDeleteForm, UserProfileForm
20 from readthedocs.core.history import set_change_reason
21 from readthedocs.core.mixins import PrivateViewMixin
22 from readthedocs.core.models import UserProfile
23 from readthedocs.core.permissions import AdminPermission
24 from readthedocs.core.utils.extend import SettingsOverrideObject
25 from readthedocs.organizations.models import Organization
26 from readthedocs.projects.models import Project
27 from readthedocs.projects.utils import get_csv_file
28
29
30 class LoginViewBase(AllAuthLoginView):
31
32 pass
33
34
35 class LoginView(SettingsOverrideObject):
36
37 _default_class = LoginViewBase
38
39
40 class LogoutViewBase(AllAuthLogoutView):
41
42 pass
43
44
45 class LogoutView(SettingsOverrideObject):
46
47 _default_class = LogoutViewBase
48
49
50 class ProfileEdit(PrivateViewMixin, UpdateView):
51
52 """Edit the current user's profile."""
53
54 model = UserProfile
55 form_class = UserProfileForm
56 template_name = 'profiles/private/edit_profile.html'
57 context_object_name = 'profile'
58
59 def get_object(self):
60 return self.request.user.profile
61
62 def get_success_url(self):
63 return reverse(
64 'profiles_profile_detail',
65 kwargs={'username': self.request.user.username},
66 )
67
68
69 class AccountDelete(PrivateViewMixin, SuccessMessageMixin, FormView):
70
71 form_class = UserDeleteForm
72 template_name = 'profiles/private/delete_account.html'
73 success_message = _('You have successfully deleted your account')
74
75 def get_object(self):
76 return User.objects.get(pk=self.request.user.pk)
77
78 def form_valid(self, form):
79 user = self.get_object()
80 logout(self.request)
81 set_change_reason(user, self.get_change_reason())
82 user.delete()
83 return super().form_valid(form)
84
85 def get_form(self, data=None, files=None, **kwargs):
86 kwargs['instance'] = self.get_object()
87 kwargs['initial'] = {'username': ''}
88 return super().get_form(data, files, **kwargs)
89
90 def get_context_data(self, **kwargs):
91 context = super().get_context_data(**kwargs)
92 user = self.request.user
93 context["projects_to_be_deleted"] = Project.objects.single_owner(user)
94 context["organizations_to_be_deleted"] = Organization.objects.single_owner(user)
95 return context
96
97 def get_success_url(self):
98 return reverse('homepage')
99
100 def get_change_reason(self):
101 klass = self.__class__.__name__
102 return f'origin=form class={klass}'
103
104
105 class ProfileDetail(DetailView):
106
107 model = User
108 template_name = 'profiles/public/profile_detail.html'
109 lookup_field = 'username'
110
111 def get_object(self):
112 """
113 Get the user object.
114
115 If organizations are enabled, show the profile to users in the same organization only.
116 Otherwise, all users can see the profile of others.
117 """
118 user = super().get_object()
119 if not settings.RTD_ALLOW_ORGANIZATIONS:
120 return user
121
122 request_user = self.request.user
123 if not request_user.is_authenticated:
124 raise Http404()
125
126 # Always allow users to see their own profile.
127 if request_user == user:
128 return user
129
130 for org in Organization.objects.for_user(request_user):
131 if AdminPermission.is_member(user=user, obj=org):
132 return user
133 raise Http404()
134
135 def get_context_data(self, **kwargs):
136 context = super().get_context_data(**kwargs)
137 context['profile'] = self.get_object().profile
138 return context
139
140
141 class AccountAdvertisingEdit(PrivateViewMixin, SuccessMessageMixin, UpdateView):
142
143 model = UserProfile
144 form_class = UserAdvertisingForm
145 context_object_name = 'profile'
146 template_name = 'profiles/private/advertising_profile.html'
147 success_message = _('Updated your advertising preferences')
148
149 def get_object(self):
150 return self.request.user.profile
151
152 def get_success_url(self):
153 return reverse('account_advertising')
154
155
156 class TokenMixin(PrivateViewMixin):
157
158 """User token to access APIv3."""
159
160 model = Token
161 lookup_url_kwarg = 'token_pk'
162 template_name = 'profiles/private/token_list.html'
163
164 def get_queryset(self):
165 # NOTE: we are currently showing just one token since the DRF model has
166 # a OneToOneField relation with User. Although, we plan to have multiple
167 # scope-based tokens.
168 return Token.objects.filter(user__in=[self.request.user])
169
170 def get_success_url(self):
171 return reverse('profiles_tokens')
172
173
174 class TokenListView(TokenMixin, ListView):
175 pass
176
177
178 class TokenCreateView(TokenMixin, CreateView):
179
180 """Simple view to generate a Token object for the logged in User."""
181
182 http_method_names = ['post']
183
184 def post(self, request, *args, **kwargs):
185 _, created = Token.objects.get_or_create(user=self.request.user)
186 if created:
187 messages.info(request, 'API Token created successfully')
188 return HttpResponseRedirect(self.get_success_url())
189
190
191 class TokenDeleteView(TokenMixin, DeleteView):
192
193 """View to delete/revoke the current Token of the logged in User."""
194
195 http_method_names = ['post']
196
197 def get_object(self, queryset=None): # noqa
198 return self.request.user.auth_token
199
200
201 class UserSecurityLogView(PrivateViewMixin, ListView):
202 model = AuditLog
203 template_name = 'profiles/private/security_log.html'
204 days_limit = settings.RTD_AUDITLOGS_DEFAULT_RETENTION_DAYS
205
206 def get(self, request, *args, **kwargs):
207 download_data = request.GET.get('download', False)
208 if download_data:
209 return self._get_csv_data()
210 return super().get(request, *args, **kwargs)
211
212 def _get_start_date(self):
213 """Get the date to show logs from."""
214 creation_date = self.request.user.date_joined.date()
215 start_date = timezone.now().date() - timezone.timedelta(days=self.days_limit)
216 # The max we can go back is to the creation of the user.
217 return max(start_date, creation_date)
218
219 def _get_csv_data(self):
220 current_timezone = settings.TIME_ZONE
221 values = [
222 (f"Date ({current_timezone})", "created"),
223 ("User", "log_user_username"),
224 ("Project", "log_project_slug"),
225 ("Organization", "log_organization_slug"),
226 ("Action", "action"),
227 ("IP", "ip"),
228 ("Browser", "browser"),
229 ("Extra data", "data"),
230 ]
231 data = self.get_queryset().values_list(*[value for _, value in values])
232
233 start_date = self._get_start_date()
234 end_date = timezone.now().date()
235 date_filter = self.filter.form.cleaned_data.get('date')
236 if date_filter:
237 start_date = date_filter.start or start_date
238 end_date = date_filter.stop or end_date
239
240 filename = 'readthedocs_user_security_logs_{username}_{start}_{end}.csv'.format(
241 username=self.request.user.username,
242 start=timezone.datetime.strftime(start_date, '%Y-%m-%d'),
243 end=timezone.datetime.strftime(end_date, '%Y-%m-%d'),
244 )
245 csv_data = [
246 [timezone.datetime.strftime(date, '%Y-%m-%d %H:%M:%S'), *rest]
247 for date, *rest in data
248 ]
249 csv_data.insert(0, [header for header, _ in values])
250 return get_csv_file(filename=filename, csv_data=csv_data)
251
252 def get_context_data(self, **kwargs):
253 context = super().get_context_data(**kwargs)
254 context['days_limit'] = self.days_limit
255 context['filter'] = self.filter
256 context['AuditLog'] = AuditLog
257 return context
258
259 def _get_queryset(self):
260 """Return the queryset without filters."""
261 user = self.request.user
262 start_date = self._get_start_date()
263 queryset = AuditLog.objects.filter(
264 user=user,
265 action__in=[action for action, _ in UserSecurityLogFilter.allowed_actions],
266 created__gte=start_date,
267 )
268 return queryset
269
270 def get_queryset(self):
271 """
272 Return the queryset with filters.
273
274 If you want the original queryset without filters,
275 use `_get_queryset`.
276 """
277 queryset = self._get_queryset()
278 # Set filter on self, so we can use it in the context.
279 # Without executing it twice.
280 self.filter = UserSecurityLogFilter(
281 self.request.GET,
282 queryset=queryset,
283 )
284 return self.filter.qs
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/profiles/views.py b/readthedocs/profiles/views.py
--- a/readthedocs/profiles/views.py
+++ b/readthedocs/profiles/views.py
@@ -61,8 +61,7 @@
def get_success_url(self):
return reverse(
- 'profiles_profile_detail',
- kwargs={'username': self.request.user.username},
+ "profiles_profile_edit",
)
| {"golden_diff": "diff --git a/readthedocs/profiles/views.py b/readthedocs/profiles/views.py\n--- a/readthedocs/profiles/views.py\n+++ b/readthedocs/profiles/views.py\n@@ -61,8 +61,7 @@\n \n def get_success_url(self):\n return reverse(\n- 'profiles_profile_detail',\n- kwargs={'username': self.request.user.username},\n+ \"profiles_profile_edit\",\n )\n", "issue": "Change profile edit form success page\nCurrently, when a user saves the profile edit form, the success page is not the profile form page, the user gets redirected to the profile public view page. This is quite confusing UX but might be baked into Allauth. I would expect this end up on the profile edit form page instead.\n", "before_files": [{"content": "\"\"\"Views for creating, editing and viewing site-specific user profiles.\"\"\"\n\nfrom allauth.account.views import LoginView as AllAuthLoginView\nfrom allauth.account.views import LogoutView as AllAuthLogoutView\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.models import User\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.authtoken.models import Token\nfrom vanilla import CreateView, DeleteView, DetailView, FormView, ListView, UpdateView\n\nfrom readthedocs.audit.filters import UserSecurityLogFilter\nfrom readthedocs.audit.models import AuditLog\nfrom readthedocs.core.forms import UserAdvertisingForm, UserDeleteForm, UserProfileForm\nfrom readthedocs.core.history import set_change_reason\nfrom readthedocs.core.mixins import PrivateViewMixin\nfrom readthedocs.core.models import UserProfile\nfrom readthedocs.core.permissions import AdminPermission\nfrom readthedocs.core.utils.extend import SettingsOverrideObject\nfrom readthedocs.organizations.models import Organization\nfrom readthedocs.projects.models import Project\nfrom readthedocs.projects.utils import get_csv_file\n\n\nclass LoginViewBase(AllAuthLoginView):\n\n pass\n\n\nclass LoginView(SettingsOverrideObject):\n\n _default_class = LoginViewBase\n\n\nclass LogoutViewBase(AllAuthLogoutView):\n\n pass\n\n\nclass LogoutView(SettingsOverrideObject):\n\n _default_class = LogoutViewBase\n\n\nclass ProfileEdit(PrivateViewMixin, UpdateView):\n\n \"\"\"Edit the current user's profile.\"\"\"\n\n model = UserProfile\n form_class = UserProfileForm\n template_name = 'profiles/private/edit_profile.html'\n context_object_name = 'profile'\n\n def get_object(self):\n return self.request.user.profile\n\n def get_success_url(self):\n return reverse(\n 'profiles_profile_detail',\n kwargs={'username': self.request.user.username},\n )\n\n\nclass AccountDelete(PrivateViewMixin, SuccessMessageMixin, FormView):\n\n form_class = UserDeleteForm\n template_name = 'profiles/private/delete_account.html'\n success_message = _('You have successfully deleted your account')\n\n def get_object(self):\n return User.objects.get(pk=self.request.user.pk)\n\n def form_valid(self, form):\n user = self.get_object()\n logout(self.request)\n set_change_reason(user, self.get_change_reason())\n user.delete()\n return super().form_valid(form)\n\n def get_form(self, data=None, files=None, **kwargs):\n kwargs['instance'] = self.get_object()\n kwargs['initial'] = {'username': ''}\n return super().get_form(data, files, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n user = self.request.user\n context[\"projects_to_be_deleted\"] = Project.objects.single_owner(user)\n context[\"organizations_to_be_deleted\"] = Organization.objects.single_owner(user)\n return context\n\n def get_success_url(self):\n return reverse('homepage')\n\n def get_change_reason(self):\n klass = self.__class__.__name__\n return f'origin=form class={klass}'\n\n\nclass ProfileDetail(DetailView):\n\n model = User\n template_name = 'profiles/public/profile_detail.html'\n lookup_field = 'username'\n\n def get_object(self):\n \"\"\"\n Get the user object.\n\n If organizations are enabled, show the profile to users in the same organization only.\n Otherwise, all users can see the profile of others.\n \"\"\"\n user = super().get_object()\n if not settings.RTD_ALLOW_ORGANIZATIONS:\n return user\n\n request_user = self.request.user\n if not request_user.is_authenticated:\n raise Http404()\n\n # Always allow users to see their own profile.\n if request_user == user:\n return user\n\n for org in Organization.objects.for_user(request_user):\n if AdminPermission.is_member(user=user, obj=org):\n return user\n raise Http404()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['profile'] = self.get_object().profile\n return context\n\n\nclass AccountAdvertisingEdit(PrivateViewMixin, SuccessMessageMixin, UpdateView):\n\n model = UserProfile\n form_class = UserAdvertisingForm\n context_object_name = 'profile'\n template_name = 'profiles/private/advertising_profile.html'\n success_message = _('Updated your advertising preferences')\n\n def get_object(self):\n return self.request.user.profile\n\n def get_success_url(self):\n return reverse('account_advertising')\n\n\nclass TokenMixin(PrivateViewMixin):\n\n \"\"\"User token to access APIv3.\"\"\"\n\n model = Token\n lookup_url_kwarg = 'token_pk'\n template_name = 'profiles/private/token_list.html'\n\n def get_queryset(self):\n # NOTE: we are currently showing just one token since the DRF model has\n # a OneToOneField relation with User. Although, we plan to have multiple\n # scope-based tokens.\n return Token.objects.filter(user__in=[self.request.user])\n\n def get_success_url(self):\n return reverse('profiles_tokens')\n\n\nclass TokenListView(TokenMixin, ListView):\n pass\n\n\nclass TokenCreateView(TokenMixin, CreateView):\n\n \"\"\"Simple view to generate a Token object for the logged in User.\"\"\"\n\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n _, created = Token.objects.get_or_create(user=self.request.user)\n if created:\n messages.info(request, 'API Token created successfully')\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass TokenDeleteView(TokenMixin, DeleteView):\n\n \"\"\"View to delete/revoke the current Token of the logged in User.\"\"\"\n\n http_method_names = ['post']\n\n def get_object(self, queryset=None): # noqa\n return self.request.user.auth_token\n\n\nclass UserSecurityLogView(PrivateViewMixin, ListView):\n model = AuditLog\n template_name = 'profiles/private/security_log.html'\n days_limit = settings.RTD_AUDITLOGS_DEFAULT_RETENTION_DAYS\n\n def get(self, request, *args, **kwargs):\n download_data = request.GET.get('download', False)\n if download_data:\n return self._get_csv_data()\n return super().get(request, *args, **kwargs)\n\n def _get_start_date(self):\n \"\"\"Get the date to show logs from.\"\"\"\n creation_date = self.request.user.date_joined.date()\n start_date = timezone.now().date() - timezone.timedelta(days=self.days_limit)\n # The max we can go back is to the creation of the user.\n return max(start_date, creation_date)\n\n def _get_csv_data(self):\n current_timezone = settings.TIME_ZONE\n values = [\n (f\"Date ({current_timezone})\", \"created\"),\n (\"User\", \"log_user_username\"),\n (\"Project\", \"log_project_slug\"),\n (\"Organization\", \"log_organization_slug\"),\n (\"Action\", \"action\"),\n (\"IP\", \"ip\"),\n (\"Browser\", \"browser\"),\n (\"Extra data\", \"data\"),\n ]\n data = self.get_queryset().values_list(*[value for _, value in values])\n\n start_date = self._get_start_date()\n end_date = timezone.now().date()\n date_filter = self.filter.form.cleaned_data.get('date')\n if date_filter:\n start_date = date_filter.start or start_date\n end_date = date_filter.stop or end_date\n\n filename = 'readthedocs_user_security_logs_{username}_{start}_{end}.csv'.format(\n username=self.request.user.username,\n start=timezone.datetime.strftime(start_date, '%Y-%m-%d'),\n end=timezone.datetime.strftime(end_date, '%Y-%m-%d'),\n )\n csv_data = [\n [timezone.datetime.strftime(date, '%Y-%m-%d %H:%M:%S'), *rest]\n for date, *rest in data\n ]\n csv_data.insert(0, [header for header, _ in values])\n return get_csv_file(filename=filename, csv_data=csv_data)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['days_limit'] = self.days_limit\n context['filter'] = self.filter\n context['AuditLog'] = AuditLog\n return context\n\n def _get_queryset(self):\n \"\"\"Return the queryset without filters.\"\"\"\n user = self.request.user\n start_date = self._get_start_date()\n queryset = AuditLog.objects.filter(\n user=user,\n action__in=[action for action, _ in UserSecurityLogFilter.allowed_actions],\n created__gte=start_date,\n )\n return queryset\n\n def get_queryset(self):\n \"\"\"\n Return the queryset with filters.\n\n If you want the original queryset without filters,\n use `_get_queryset`.\n \"\"\"\n queryset = self._get_queryset()\n # Set filter on self, so we can use it in the context.\n # Without executing it twice.\n self.filter = UserSecurityLogFilter(\n self.request.GET,\n queryset=queryset,\n )\n return self.filter.qs\n", "path": "readthedocs/profiles/views.py"}], "after_files": [{"content": "\"\"\"Views for creating, editing and viewing site-specific user profiles.\"\"\"\n\nfrom allauth.account.views import LoginView as AllAuthLoginView\nfrom allauth.account.views import LogoutView as AllAuthLogoutView\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.models import User\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.authtoken.models import Token\nfrom vanilla import CreateView, DeleteView, DetailView, FormView, ListView, UpdateView\n\nfrom readthedocs.audit.filters import UserSecurityLogFilter\nfrom readthedocs.audit.models import AuditLog\nfrom readthedocs.core.forms import UserAdvertisingForm, UserDeleteForm, UserProfileForm\nfrom readthedocs.core.history import set_change_reason\nfrom readthedocs.core.mixins import PrivateViewMixin\nfrom readthedocs.core.models import UserProfile\nfrom readthedocs.core.permissions import AdminPermission\nfrom readthedocs.core.utils.extend import SettingsOverrideObject\nfrom readthedocs.organizations.models import Organization\nfrom readthedocs.projects.models import Project\nfrom readthedocs.projects.utils import get_csv_file\n\n\nclass LoginViewBase(AllAuthLoginView):\n\n pass\n\n\nclass LoginView(SettingsOverrideObject):\n\n _default_class = LoginViewBase\n\n\nclass LogoutViewBase(AllAuthLogoutView):\n\n pass\n\n\nclass LogoutView(SettingsOverrideObject):\n\n _default_class = LogoutViewBase\n\n\nclass ProfileEdit(PrivateViewMixin, UpdateView):\n\n \"\"\"Edit the current user's profile.\"\"\"\n\n model = UserProfile\n form_class = UserProfileForm\n template_name = 'profiles/private/edit_profile.html'\n context_object_name = 'profile'\n\n def get_object(self):\n return self.request.user.profile\n\n def get_success_url(self):\n return reverse(\n \"profiles_profile_edit\",\n )\n\n\nclass AccountDelete(PrivateViewMixin, SuccessMessageMixin, FormView):\n\n form_class = UserDeleteForm\n template_name = 'profiles/private/delete_account.html'\n success_message = _('You have successfully deleted your account')\n\n def get_object(self):\n return User.objects.get(pk=self.request.user.pk)\n\n def form_valid(self, form):\n user = self.get_object()\n logout(self.request)\n set_change_reason(user, self.get_change_reason())\n user.delete()\n return super().form_valid(form)\n\n def get_form(self, data=None, files=None, **kwargs):\n kwargs['instance'] = self.get_object()\n kwargs['initial'] = {'username': ''}\n return super().get_form(data, files, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n user = self.request.user\n context[\"projects_to_be_deleted\"] = Project.objects.single_owner(user)\n context[\"organizations_to_be_deleted\"] = Organization.objects.single_owner(user)\n return context\n\n def get_success_url(self):\n return reverse('homepage')\n\n def get_change_reason(self):\n klass = self.__class__.__name__\n return f'origin=form class={klass}'\n\n\nclass ProfileDetail(DetailView):\n\n model = User\n template_name = 'profiles/public/profile_detail.html'\n lookup_field = 'username'\n\n def get_object(self):\n \"\"\"\n Get the user object.\n\n If organizations are enabled, show the profile to users in the same organization only.\n Otherwise, all users can see the profile of others.\n \"\"\"\n user = super().get_object()\n if not settings.RTD_ALLOW_ORGANIZATIONS:\n return user\n\n request_user = self.request.user\n if not request_user.is_authenticated:\n raise Http404()\n\n # Always allow users to see their own profile.\n if request_user == user:\n return user\n\n for org in Organization.objects.for_user(request_user):\n if AdminPermission.is_member(user=user, obj=org):\n return user\n raise Http404()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['profile'] = self.get_object().profile\n return context\n\n\nclass AccountAdvertisingEdit(PrivateViewMixin, SuccessMessageMixin, UpdateView):\n\n model = UserProfile\n form_class = UserAdvertisingForm\n context_object_name = 'profile'\n template_name = 'profiles/private/advertising_profile.html'\n success_message = _('Updated your advertising preferences')\n\n def get_object(self):\n return self.request.user.profile\n\n def get_success_url(self):\n return reverse('account_advertising')\n\n\nclass TokenMixin(PrivateViewMixin):\n\n \"\"\"User token to access APIv3.\"\"\"\n\n model = Token\n lookup_url_kwarg = 'token_pk'\n template_name = 'profiles/private/token_list.html'\n\n def get_queryset(self):\n # NOTE: we are currently showing just one token since the DRF model has\n # a OneToOneField relation with User. Although, we plan to have multiple\n # scope-based tokens.\n return Token.objects.filter(user__in=[self.request.user])\n\n def get_success_url(self):\n return reverse('profiles_tokens')\n\n\nclass TokenListView(TokenMixin, ListView):\n pass\n\n\nclass TokenCreateView(TokenMixin, CreateView):\n\n \"\"\"Simple view to generate a Token object for the logged in User.\"\"\"\n\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n _, created = Token.objects.get_or_create(user=self.request.user)\n if created:\n messages.info(request, 'API Token created successfully')\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass TokenDeleteView(TokenMixin, DeleteView):\n\n \"\"\"View to delete/revoke the current Token of the logged in User.\"\"\"\n\n http_method_names = ['post']\n\n def get_object(self, queryset=None): # noqa\n return self.request.user.auth_token\n\n\nclass UserSecurityLogView(PrivateViewMixin, ListView):\n model = AuditLog\n template_name = 'profiles/private/security_log.html'\n days_limit = settings.RTD_AUDITLOGS_DEFAULT_RETENTION_DAYS\n\n def get(self, request, *args, **kwargs):\n download_data = request.GET.get('download', False)\n if download_data:\n return self._get_csv_data()\n return super().get(request, *args, **kwargs)\n\n def _get_start_date(self):\n \"\"\"Get the date to show logs from.\"\"\"\n creation_date = self.request.user.date_joined.date()\n start_date = timezone.now().date() - timezone.timedelta(days=self.days_limit)\n # The max we can go back is to the creation of the user.\n return max(start_date, creation_date)\n\n def _get_csv_data(self):\n current_timezone = settings.TIME_ZONE\n values = [\n (f\"Date ({current_timezone})\", \"created\"),\n (\"User\", \"log_user_username\"),\n (\"Project\", \"log_project_slug\"),\n (\"Organization\", \"log_organization_slug\"),\n (\"Action\", \"action\"),\n (\"IP\", \"ip\"),\n (\"Browser\", \"browser\"),\n (\"Extra data\", \"data\"),\n ]\n data = self.get_queryset().values_list(*[value for _, value in values])\n\n start_date = self._get_start_date()\n end_date = timezone.now().date()\n date_filter = self.filter.form.cleaned_data.get('date')\n if date_filter:\n start_date = date_filter.start or start_date\n end_date = date_filter.stop or end_date\n\n filename = 'readthedocs_user_security_logs_{username}_{start}_{end}.csv'.format(\n username=self.request.user.username,\n start=timezone.datetime.strftime(start_date, '%Y-%m-%d'),\n end=timezone.datetime.strftime(end_date, '%Y-%m-%d'),\n )\n csv_data = [\n [timezone.datetime.strftime(date, '%Y-%m-%d %H:%M:%S'), *rest]\n for date, *rest in data\n ]\n csv_data.insert(0, [header for header, _ in values])\n return get_csv_file(filename=filename, csv_data=csv_data)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['days_limit'] = self.days_limit\n context['filter'] = self.filter\n context['AuditLog'] = AuditLog\n return context\n\n def _get_queryset(self):\n \"\"\"Return the queryset without filters.\"\"\"\n user = self.request.user\n start_date = self._get_start_date()\n queryset = AuditLog.objects.filter(\n user=user,\n action__in=[action for action, _ in UserSecurityLogFilter.allowed_actions],\n created__gte=start_date,\n )\n return queryset\n\n def get_queryset(self):\n \"\"\"\n Return the queryset with filters.\n\n If you want the original queryset without filters,\n use `_get_queryset`.\n \"\"\"\n queryset = self._get_queryset()\n # Set filter on self, so we can use it in the context.\n # Without executing it twice.\n self.filter = UserSecurityLogFilter(\n self.request.GET,\n queryset=queryset,\n )\n return self.filter.qs\n", "path": "readthedocs/profiles/views.py"}]} | 3,078 | 91 |
gh_patches_debug_2040 | rasdani/github-patches | git_diff | projectmesa__mesa-2125 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix error in failing flocking benchmark
Our benchmarks are failing: https://github.com/projectmesa/mesa/actions/workflows/benchmarks.yml
```bash
08:41:17 starting benchmarks.
08:41:35 Schelling (small) timings: Init 0.00771 s; Run 0.0472 s
08:41:55 Schelling (large) timings: Init 0.05062 s; Run 0.4629 s
08:42:01 WolfSheep (small) timings: Init 0.00333 s; Run 0.0124 s
08:42:[15](https://github.com/projectmesa/mesa/actions/runs/8813652146/job/24191834761#step:7:16) WolfSheep (large) timings: Init 0.05334 s; Run 0.2206 s
File "/home/runner/work/mesa/mesa/benchmarks/global_benchmark.py", line 62, in <module>
results = run_experiments(model, config)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/runner/work/mesa/mesa/benchmarks/global_benchmark.py", line 47, in run_experiments
init_time, run_time = run_model(model_class, seed, config["parameters"])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/runner/work/mesa/mesa/benchmarks/global_benchmark.py", line [21](https://github.com/projectmesa/mesa/actions/runs/8813652146/job/24191834761#step:7:22), in run_model
model = model_class(simulator=simulator, seed=seed, **parameters)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/runner/work/mesa/mesa/benchmarks/Flocking/flocking.py", line 139, in __init__
boid = Boid(
^^^^^
TypeError: Boid.__init__() got an unexpected keyword argument 'pos'
Error: Process completed with exit code 1.
```
Most likely something was changed in [benchmarks/Flocking/flocking.py](https://github.com/projectmesa/mesa/blob/main/benchmarks/Flocking/flocking.py) that needs to be fixed/reverted.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benchmarks/Flocking/flocking.py`
Content:
```
1 """
2 Flockers
3 =============================================================
4 A Mesa implementation of Craig Reynolds's Boids flocker model.
5 Uses numpy arrays to represent vectors.
6 """
7
8 import numpy as np
9
10 import mesa
11
12
13 class Boid(mesa.Agent):
14 """
15 A Boid-style flocker agent.
16
17 The agent follows three behaviors to flock:
18 - Cohesion: steering towards neighboring agents.
19 - Separation: avoiding getting too close to any other agent.
20 - Alignment: try to fly in the same direction as the neighbors.
21
22 Boids have a vision that defines the radius in which they look for their
23 neighbors to flock with. Their speed (a scalar) and direction (a vector)
24 define their movement. Separation is their desired minimum distance from
25 any other Boid.
26 """
27
28 def __init__(
29 self,
30 unique_id,
31 model,
32 speed,
33 direction,
34 vision,
35 separation,
36 cohere=0.03,
37 separate=0.015,
38 match=0.05,
39 ):
40 """
41 Create a new Boid flocker agent.
42
43 Args:
44 unique_id: Unique agent identifier.
45 speed: Distance to move per step.
46 direction: numpy vector for the Boid's direction of movement.
47 vision: Radius to look around for nearby Boids.
48 separation: Minimum distance to maintain from other Boids.
49 cohere: the relative importance of matching neighbors' positions
50 separate: the relative importance of avoiding close neighbors
51 match: the relative importance of matching neighbors' directions
52
53 """
54 super().__init__(unique_id, model)
55 self.speed = speed
56 self.direction = direction
57 self.vision = vision
58 self.separation = separation
59 self.cohere_factor = cohere
60 self.separate_factor = separate
61 self.match_factor = match
62
63 def step(self):
64 """
65 Get the Boid's neighbors, compute the new vector, and move accordingly.
66 """
67
68 neighbors = self.model.space.get_neighbors(self.pos, self.vision, False)
69 n = 0
70 match_vector, separation_vector, cohere = np.zeros((3, 2))
71 for neighbor in neighbors:
72 n += 1
73 heading = self.model.space.get_heading(self.pos, neighbor.pos)
74 cohere += heading
75 if self.model.space.get_distance(self.pos, neighbor.pos) < self.separation:
76 separation_vector -= heading
77 match_vector += neighbor.direction
78 n = max(n, 1)
79 cohere = cohere * self.cohere_factor
80 separation_vector = separation_vector * self.separate_factor
81 match_vector = match_vector * self.match_factor
82 self.direction += (cohere + separation_vector + match_vector) / n
83 self.direction /= np.linalg.norm(self.direction)
84 new_pos = self.pos + self.direction * self.speed
85 self.model.space.move_agent(self, new_pos)
86
87
88 class BoidFlockers(mesa.Model):
89 """
90 Flocker model class. Handles agent creation, placement and scheduling.
91 """
92
93 def __init__(
94 self,
95 seed=None,
96 population=100,
97 width=100,
98 height=100,
99 vision=10,
100 speed=1,
101 separation=1,
102 cohere=0.03,
103 separate=0.015,
104 match=0.05,
105 simulator=None,
106 ):
107 """
108 Create a new Flockers model.
109
110 Args:
111 population: Number of Boids
112 width, height: Size of the space.
113 speed: How fast should the Boids move.
114 vision: How far around should each Boid look for its neighbors
115 separation: What's the minimum distance each Boid will attempt to
116 keep from any other
117 cohere, separate, match: factors for the relative importance of
118 the three drives.
119 """
120 super().__init__(seed=seed)
121 self.population = population
122 self.width = width
123 self.height = height
124 self.simulator = simulator
125
126 self.schedule = mesa.time.RandomActivation(self)
127 self.space = mesa.space.ContinuousSpace(self.width, self.height, True)
128 self.factors = {
129 "cohere": cohere,
130 "separate": separate,
131 "match": match,
132 }
133
134 for i in range(self.population):
135 x = self.random.random() * self.space.x_max
136 y = self.random.random() * self.space.y_max
137 pos = np.array((x, y))
138 direction = np.random.random(2) * 2 - 1
139 boid = Boid(
140 unique_id=i,
141 model=self,
142 pos=pos,
143 speed=speed,
144 direction=direction,
145 vision=vision,
146 separation=separation,
147 **self.factors,
148 )
149 self.space.place_agent(boid, pos)
150 self.schedule.add(boid)
151
152 def step(self):
153 self.schedule.step()
154
155
156 if __name__ == "__main__":
157 import time
158
159 # model = BoidFlockers(seed=15, population=200, width=100, height=100, vision=5)
160 model = BoidFlockers(seed=15, population=400, width=100, height=100, vision=15)
161
162 start_time = time.perf_counter()
163 for _ in range(100):
164 model.step()
165
166 print(time.perf_counter() - start_time)
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benchmarks/Flocking/flocking.py b/benchmarks/Flocking/flocking.py
--- a/benchmarks/Flocking/flocking.py
+++ b/benchmarks/Flocking/flocking.py
@@ -139,7 +139,6 @@
boid = Boid(
unique_id=i,
model=self,
- pos=pos,
speed=speed,
direction=direction,
vision=vision,
| {"golden_diff": "diff --git a/benchmarks/Flocking/flocking.py b/benchmarks/Flocking/flocking.py\n--- a/benchmarks/Flocking/flocking.py\n+++ b/benchmarks/Flocking/flocking.py\n@@ -139,7 +139,6 @@\n boid = Boid(\n unique_id=i,\n model=self,\n- pos=pos,\n speed=speed,\n direction=direction,\n vision=vision,\n", "issue": "Fix error in failing flocking benchmark\nOur benchmarks are failing: https://github.com/projectmesa/mesa/actions/workflows/benchmarks.yml\r\n\r\n```bash\r\n08:41:17 starting benchmarks.\r\n08:41:35 Schelling (small) timings: Init 0.00771 s; Run 0.0472 s\r\n08:41:55 Schelling (large) timings: Init 0.05062 s; Run 0.4629 s\r\n08:42:01 WolfSheep (small) timings: Init 0.00333 s; Run 0.0124 s\r\n08:42:[15](https://github.com/projectmesa/mesa/actions/runs/8813652146/job/24191834761#step:7:16) WolfSheep (large) timings: Init 0.05334 s; Run 0.2206 s\r\n File \"/home/runner/work/mesa/mesa/benchmarks/global_benchmark.py\", line 62, in <module>\r\n results = run_experiments(model, config)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/runner/work/mesa/mesa/benchmarks/global_benchmark.py\", line 47, in run_experiments\r\n init_time, run_time = run_model(model_class, seed, config[\"parameters\"])\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/runner/work/mesa/mesa/benchmarks/global_benchmark.py\", line [21](https://github.com/projectmesa/mesa/actions/runs/8813652146/job/24191834761#step:7:22), in run_model\r\n model = model_class(simulator=simulator, seed=seed, **parameters)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/runner/work/mesa/mesa/benchmarks/Flocking/flocking.py\", line 139, in __init__\r\n boid = Boid(\r\n ^^^^^\r\nTypeError: Boid.__init__() got an unexpected keyword argument 'pos'\r\nError: Process completed with exit code 1.\r\n```\r\nMost likely something was changed in [benchmarks/Flocking/flocking.py](https://github.com/projectmesa/mesa/blob/main/benchmarks/Flocking/flocking.py) that needs to be fixed/reverted.\n", "before_files": [{"content": "\"\"\"\nFlockers\n=============================================================\nA Mesa implementation of Craig Reynolds's Boids flocker model.\nUses numpy arrays to represent vectors.\n\"\"\"\n\nimport numpy as np\n\nimport mesa\n\n\nclass Boid(mesa.Agent):\n \"\"\"\n A Boid-style flocker agent.\n\n The agent follows three behaviors to flock:\n - Cohesion: steering towards neighboring agents.\n - Separation: avoiding getting too close to any other agent.\n - Alignment: try to fly in the same direction as the neighbors.\n\n Boids have a vision that defines the radius in which they look for their\n neighbors to flock with. Their speed (a scalar) and direction (a vector)\n define their movement. Separation is their desired minimum distance from\n any other Boid.\n \"\"\"\n\n def __init__(\n self,\n unique_id,\n model,\n speed,\n direction,\n vision,\n separation,\n cohere=0.03,\n separate=0.015,\n match=0.05,\n ):\n \"\"\"\n Create a new Boid flocker agent.\n\n Args:\n unique_id: Unique agent identifier.\n speed: Distance to move per step.\n direction: numpy vector for the Boid's direction of movement.\n vision: Radius to look around for nearby Boids.\n separation: Minimum distance to maintain from other Boids.\n cohere: the relative importance of matching neighbors' positions\n separate: the relative importance of avoiding close neighbors\n match: the relative importance of matching neighbors' directions\n\n \"\"\"\n super().__init__(unique_id, model)\n self.speed = speed\n self.direction = direction\n self.vision = vision\n self.separation = separation\n self.cohere_factor = cohere\n self.separate_factor = separate\n self.match_factor = match\n\n def step(self):\n \"\"\"\n Get the Boid's neighbors, compute the new vector, and move accordingly.\n \"\"\"\n\n neighbors = self.model.space.get_neighbors(self.pos, self.vision, False)\n n = 0\n match_vector, separation_vector, cohere = np.zeros((3, 2))\n for neighbor in neighbors:\n n += 1\n heading = self.model.space.get_heading(self.pos, neighbor.pos)\n cohere += heading\n if self.model.space.get_distance(self.pos, neighbor.pos) < self.separation:\n separation_vector -= heading\n match_vector += neighbor.direction\n n = max(n, 1)\n cohere = cohere * self.cohere_factor\n separation_vector = separation_vector * self.separate_factor\n match_vector = match_vector * self.match_factor\n self.direction += (cohere + separation_vector + match_vector) / n\n self.direction /= np.linalg.norm(self.direction)\n new_pos = self.pos + self.direction * self.speed\n self.model.space.move_agent(self, new_pos)\n\n\nclass BoidFlockers(mesa.Model):\n \"\"\"\n Flocker model class. Handles agent creation, placement and scheduling.\n \"\"\"\n\n def __init__(\n self,\n seed=None,\n population=100,\n width=100,\n height=100,\n vision=10,\n speed=1,\n separation=1,\n cohere=0.03,\n separate=0.015,\n match=0.05,\n simulator=None,\n ):\n \"\"\"\n Create a new Flockers model.\n\n Args:\n population: Number of Boids\n width, height: Size of the space.\n speed: How fast should the Boids move.\n vision: How far around should each Boid look for its neighbors\n separation: What's the minimum distance each Boid will attempt to\n keep from any other\n cohere, separate, match: factors for the relative importance of\n the three drives.\n \"\"\"\n super().__init__(seed=seed)\n self.population = population\n self.width = width\n self.height = height\n self.simulator = simulator\n\n self.schedule = mesa.time.RandomActivation(self)\n self.space = mesa.space.ContinuousSpace(self.width, self.height, True)\n self.factors = {\n \"cohere\": cohere,\n \"separate\": separate,\n \"match\": match,\n }\n\n for i in range(self.population):\n x = self.random.random() * self.space.x_max\n y = self.random.random() * self.space.y_max\n pos = np.array((x, y))\n direction = np.random.random(2) * 2 - 1\n boid = Boid(\n unique_id=i,\n model=self,\n pos=pos,\n speed=speed,\n direction=direction,\n vision=vision,\n separation=separation,\n **self.factors,\n )\n self.space.place_agent(boid, pos)\n self.schedule.add(boid)\n\n def step(self):\n self.schedule.step()\n\n\nif __name__ == \"__main__\":\n import time\n\n # model = BoidFlockers(seed=15, population=200, width=100, height=100, vision=5)\n model = BoidFlockers(seed=15, population=400, width=100, height=100, vision=15)\n\n start_time = time.perf_counter()\n for _ in range(100):\n model.step()\n\n print(time.perf_counter() - start_time)\n", "path": "benchmarks/Flocking/flocking.py"}], "after_files": [{"content": "\"\"\"\nFlockers\n=============================================================\nA Mesa implementation of Craig Reynolds's Boids flocker model.\nUses numpy arrays to represent vectors.\n\"\"\"\n\nimport numpy as np\n\nimport mesa\n\n\nclass Boid(mesa.Agent):\n \"\"\"\n A Boid-style flocker agent.\n\n The agent follows three behaviors to flock:\n - Cohesion: steering towards neighboring agents.\n - Separation: avoiding getting too close to any other agent.\n - Alignment: try to fly in the same direction as the neighbors.\n\n Boids have a vision that defines the radius in which they look for their\n neighbors to flock with. Their speed (a scalar) and direction (a vector)\n define their movement. Separation is their desired minimum distance from\n any other Boid.\n \"\"\"\n\n def __init__(\n self,\n unique_id,\n model,\n speed,\n direction,\n vision,\n separation,\n cohere=0.03,\n separate=0.015,\n match=0.05,\n ):\n \"\"\"\n Create a new Boid flocker agent.\n\n Args:\n unique_id: Unique agent identifier.\n speed: Distance to move per step.\n direction: numpy vector for the Boid's direction of movement.\n vision: Radius to look around for nearby Boids.\n separation: Minimum distance to maintain from other Boids.\n cohere: the relative importance of matching neighbors' positions\n separate: the relative importance of avoiding close neighbors\n match: the relative importance of matching neighbors' directions\n\n \"\"\"\n super().__init__(unique_id, model)\n self.speed = speed\n self.direction = direction\n self.vision = vision\n self.separation = separation\n self.cohere_factor = cohere\n self.separate_factor = separate\n self.match_factor = match\n\n def step(self):\n \"\"\"\n Get the Boid's neighbors, compute the new vector, and move accordingly.\n \"\"\"\n\n neighbors = self.model.space.get_neighbors(self.pos, self.vision, False)\n n = 0\n match_vector, separation_vector, cohere = np.zeros((3, 2))\n for neighbor in neighbors:\n n += 1\n heading = self.model.space.get_heading(self.pos, neighbor.pos)\n cohere += heading\n if self.model.space.get_distance(self.pos, neighbor.pos) < self.separation:\n separation_vector -= heading\n match_vector += neighbor.direction\n n = max(n, 1)\n cohere = cohere * self.cohere_factor\n separation_vector = separation_vector * self.separate_factor\n match_vector = match_vector * self.match_factor\n self.direction += (cohere + separation_vector + match_vector) / n\n self.direction /= np.linalg.norm(self.direction)\n new_pos = self.pos + self.direction * self.speed\n self.model.space.move_agent(self, new_pos)\n\n\nclass BoidFlockers(mesa.Model):\n \"\"\"\n Flocker model class. Handles agent creation, placement and scheduling.\n \"\"\"\n\n def __init__(\n self,\n seed=None,\n population=100,\n width=100,\n height=100,\n vision=10,\n speed=1,\n separation=1,\n cohere=0.03,\n separate=0.015,\n match=0.05,\n simulator=None,\n ):\n \"\"\"\n Create a new Flockers model.\n\n Args:\n population: Number of Boids\n width, height: Size of the space.\n speed: How fast should the Boids move.\n vision: How far around should each Boid look for its neighbors\n separation: What's the minimum distance each Boid will attempt to\n keep from any other\n cohere, separate, match: factors for the relative importance of\n the three drives.\n \"\"\"\n super().__init__(seed=seed)\n self.population = population\n self.width = width\n self.height = height\n self.simulator = simulator\n\n self.schedule = mesa.time.RandomActivation(self)\n self.space = mesa.space.ContinuousSpace(self.width, self.height, True)\n self.factors = {\n \"cohere\": cohere,\n \"separate\": separate,\n \"match\": match,\n }\n\n for i in range(self.population):\n x = self.random.random() * self.space.x_max\n y = self.random.random() * self.space.y_max\n pos = np.array((x, y))\n direction = np.random.random(2) * 2 - 1\n boid = Boid(\n unique_id=i,\n model=self,\n speed=speed,\n direction=direction,\n vision=vision,\n separation=separation,\n **self.factors,\n )\n self.space.place_agent(boid, pos)\n self.schedule.add(boid)\n\n def step(self):\n self.schedule.step()\n\n\nif __name__ == \"__main__\":\n import time\n\n # model = BoidFlockers(seed=15, population=200, width=100, height=100, vision=5)\n model = BoidFlockers(seed=15, population=400, width=100, height=100, vision=15)\n\n start_time = time.perf_counter()\n for _ in range(100):\n model.step()\n\n print(time.perf_counter() - start_time)\n", "path": "benchmarks/Flocking/flocking.py"}]} | 2,409 | 94 |
gh_patches_debug_23432 | rasdani/github-patches | git_diff | avocado-framework__avocado-5236 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support for codecoverage when running Python tests
Avocado doesn't have a documented and/or supported way for generating code coverage information when running Python based tests.
The first objective is to drop the use of the custom `selftests/run_coverage` script and the additional job, in favor of reusing one already existing CI job with Avocado:
* running code coverage tools
* keeping the information
* pushing the information
Even though Python based tests and code coverage is the primary goal of this issue, it's probable that the solution will be about providing a flexible way to add a "wrapper" of sorts before the actual execution of tests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `avocado/core/runners/avocado_instrumented.py`
Content:
```
1 import multiprocessing
2 import tempfile
3 import time
4 import traceback
5
6 from avocado.core import nrunner
7 from avocado.core.runners.utils import messages
8 from avocado.core.test import TestID
9 from avocado.core.tree import TreeNodeEnvOnly
10 from avocado.core.utils import loader
11 from avocado.core.varianter import is_empty_variant
12
13
14 class AvocadoInstrumentedTestRunner(nrunner.BaseRunner):
15 """
16 Runner for Avocado INSTRUMENTED tests
17
18 Runnable attributes usage:
19
20 * uri: path to a test file, combined with an Avocado.Test
21 inherited class name and method. The test file path and
22 class and method names should be separated by a ":". One
23 example of a valid uri is "mytest.py:Class.test_method".
24
25 * args: not used
26 """
27 DEFAULT_TIMEOUT = 86400
28
29 @staticmethod
30 def _create_params(runnable):
31 """Create params for the test"""
32 if runnable.variant is None:
33 return None
34
35 # rebuild the variant tree
36 variant_tree_nodes = [TreeNodeEnvOnly(path, env) for path, env
37 in runnable.variant['variant']]
38
39 if not is_empty_variant(variant_tree_nodes):
40 tree_nodes = variant_tree_nodes
41 paths = runnable.variant['paths']
42 return tree_nodes, paths
43
44 @staticmethod
45 def _run_avocado(runnable, queue):
46 try:
47 # This assumes that a proper resolution (see resolver module)
48 # was performed, and that a URI contains:
49 # 1) path to python module
50 # 2) class
51 # 3) method
52 #
53 # To be defined: if the resolution uri should be composed like
54 # this, or broken down and stored into other data fields
55 module_path, klass_method = runnable.uri.split(':', 1)
56
57 klass, method = klass_method.split('.', 1)
58
59 params = AvocadoInstrumentedTestRunner._create_params(runnable)
60 result_dir = (runnable.output_dir or
61 tempfile.mkdtemp(prefix=".avocado-task"))
62 test_factory = [klass,
63 {'name': TestID(1, runnable.uri),
64 'methodName': method,
65 'config': runnable.config,
66 'modulePath': module_path,
67 'params': params,
68 'tags': runnable.tags,
69 'run.results_dir': result_dir,
70 }]
71
72 messages.start_logging(runnable.config, queue)
73 instance = loader.load_test(test_factory)
74 early_state = instance.get_state()
75 early_state['type'] = "early_state"
76 queue.put(early_state)
77 instance.run_avocado()
78 state = instance.get_state()
79 fail_reason = state.get('fail_reason')
80 queue.put(messages.WhiteboardMessage.get(state['whiteboard']))
81 queue.put(messages.FinishedMessage.get(state['status'].lower(),
82 fail_reason=fail_reason))
83 except Exception as e:
84 queue.put(messages.StderrMessage.get(traceback.format_exc()))
85 queue.put(messages.FinishedMessage.get('error', fail_reason=str(e)))
86
87 def run(self):
88 yield messages.StartedMessage.get()
89 try:
90 queue = multiprocessing.SimpleQueue()
91 process = multiprocessing.Process(target=self._run_avocado,
92 args=(self.runnable, queue))
93
94 process.start()
95
96 time_started = time.monotonic()
97
98 timeout = float(self.DEFAULT_TIMEOUT)
99 most_current_execution_state_time = None
100 while True:
101 time.sleep(nrunner.RUNNER_RUN_CHECK_INTERVAL)
102 now = time.monotonic()
103 if queue.empty():
104 if most_current_execution_state_time is not None:
105 next_execution_state_mark = (most_current_execution_state_time +
106 nrunner.RUNNER_RUN_STATUS_INTERVAL)
107 if (most_current_execution_state_time is None or
108 now > next_execution_state_mark):
109 most_current_execution_state_time = now
110 yield messages.RunningMessage.get()
111 if (now - time_started) > timeout:
112 process.terminate()
113 yield messages.FinishedMessage.get('interrupted',
114 'timeout')
115 break
116 else:
117 message = queue.get()
118 if message.get('type') == 'early_state':
119 timeout = float(message.get('timeout') or
120 self.DEFAULT_TIMEOUT)
121 else:
122 yield message
123 if message.get('status') == 'finished':
124 break
125 except Exception as e:
126 yield messages.StderrMessage.get(traceback.format_exc())
127 yield messages.FinishedMessage.get('error', fail_reason=str(e))
128
129
130 class RunnerApp(nrunner.BaseRunnerApp):
131 PROG_NAME = 'avocado-runner-avocado-instrumented'
132 PROG_DESCRIPTION = 'nrunner application for avocado-instrumented tests'
133 RUNNABLE_KINDS_CAPABLE = {
134 'avocado-instrumented': AvocadoInstrumentedTestRunner
135 }
136
137
138 def main():
139 nrunner.main(RunnerApp)
140
141
142 if __name__ == '__main__':
143 main()
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/avocado/core/runners/avocado_instrumented.py b/avocado/core/runners/avocado_instrumented.py
--- a/avocado/core/runners/avocado_instrumented.py
+++ b/avocado/core/runners/avocado_instrumented.py
@@ -1,4 +1,5 @@
import multiprocessing
+import os
import tempfile
import time
import traceback
@@ -70,11 +71,22 @@
}]
messages.start_logging(runnable.config, queue)
+
+ if 'COVERAGE_RUN' in os.environ:
+ from coverage import Coverage
+ coverage = Coverage()
+ coverage.start()
+
instance = loader.load_test(test_factory)
early_state = instance.get_state()
early_state['type'] = "early_state"
queue.put(early_state)
instance.run_avocado()
+
+ if 'COVERAGE_RUN' in os.environ:
+ coverage.stop()
+ coverage.save()
+
state = instance.get_state()
fail_reason = state.get('fail_reason')
queue.put(messages.WhiteboardMessage.get(state['whiteboard']))
| {"golden_diff": "diff --git a/avocado/core/runners/avocado_instrumented.py b/avocado/core/runners/avocado_instrumented.py\n--- a/avocado/core/runners/avocado_instrumented.py\n+++ b/avocado/core/runners/avocado_instrumented.py\n@@ -1,4 +1,5 @@\n import multiprocessing\n+import os\n import tempfile\n import time\n import traceback\n@@ -70,11 +71,22 @@\n }]\n \n messages.start_logging(runnable.config, queue)\n+\n+ if 'COVERAGE_RUN' in os.environ:\n+ from coverage import Coverage\n+ coverage = Coverage()\n+ coverage.start()\n+\n instance = loader.load_test(test_factory)\n early_state = instance.get_state()\n early_state['type'] = \"early_state\"\n queue.put(early_state)\n instance.run_avocado()\n+\n+ if 'COVERAGE_RUN' in os.environ:\n+ coverage.stop()\n+ coverage.save()\n+\n state = instance.get_state()\n fail_reason = state.get('fail_reason')\n queue.put(messages.WhiteboardMessage.get(state['whiteboard']))\n", "issue": "Support for codecoverage when running Python tests\nAvocado doesn't have a documented and/or supported way for generating code coverage information when running Python based tests.\r\n\r\nThe first objective is to drop the use of the custom `selftests/run_coverage` script and the additional job, in favor of reusing one already existing CI job with Avocado:\r\n\r\n * running code coverage tools\r\n * keeping the information\r\n * pushing the information\r\n\r\nEven though Python based tests and code coverage is the primary goal of this issue, it's probable that the solution will be about providing a flexible way to add a \"wrapper\" of sorts before the actual execution of tests.\r\n \n", "before_files": [{"content": "import multiprocessing\nimport tempfile\nimport time\nimport traceback\n\nfrom avocado.core import nrunner\nfrom avocado.core.runners.utils import messages\nfrom avocado.core.test import TestID\nfrom avocado.core.tree import TreeNodeEnvOnly\nfrom avocado.core.utils import loader\nfrom avocado.core.varianter import is_empty_variant\n\n\nclass AvocadoInstrumentedTestRunner(nrunner.BaseRunner):\n \"\"\"\n Runner for Avocado INSTRUMENTED tests\n\n Runnable attributes usage:\n\n * uri: path to a test file, combined with an Avocado.Test\n inherited class name and method. The test file path and\n class and method names should be separated by a \":\". One\n example of a valid uri is \"mytest.py:Class.test_method\".\n\n * args: not used\n \"\"\"\n DEFAULT_TIMEOUT = 86400\n\n @staticmethod\n def _create_params(runnable):\n \"\"\"Create params for the test\"\"\"\n if runnable.variant is None:\n return None\n\n # rebuild the variant tree\n variant_tree_nodes = [TreeNodeEnvOnly(path, env) for path, env\n in runnable.variant['variant']]\n\n if not is_empty_variant(variant_tree_nodes):\n tree_nodes = variant_tree_nodes\n paths = runnable.variant['paths']\n return tree_nodes, paths\n\n @staticmethod\n def _run_avocado(runnable, queue):\n try:\n # This assumes that a proper resolution (see resolver module)\n # was performed, and that a URI contains:\n # 1) path to python module\n # 2) class\n # 3) method\n #\n # To be defined: if the resolution uri should be composed like\n # this, or broken down and stored into other data fields\n module_path, klass_method = runnable.uri.split(':', 1)\n\n klass, method = klass_method.split('.', 1)\n\n params = AvocadoInstrumentedTestRunner._create_params(runnable)\n result_dir = (runnable.output_dir or\n tempfile.mkdtemp(prefix=\".avocado-task\"))\n test_factory = [klass,\n {'name': TestID(1, runnable.uri),\n 'methodName': method,\n 'config': runnable.config,\n 'modulePath': module_path,\n 'params': params,\n 'tags': runnable.tags,\n 'run.results_dir': result_dir,\n }]\n\n messages.start_logging(runnable.config, queue)\n instance = loader.load_test(test_factory)\n early_state = instance.get_state()\n early_state['type'] = \"early_state\"\n queue.put(early_state)\n instance.run_avocado()\n state = instance.get_state()\n fail_reason = state.get('fail_reason')\n queue.put(messages.WhiteboardMessage.get(state['whiteboard']))\n queue.put(messages.FinishedMessage.get(state['status'].lower(),\n fail_reason=fail_reason))\n except Exception as e:\n queue.put(messages.StderrMessage.get(traceback.format_exc()))\n queue.put(messages.FinishedMessage.get('error', fail_reason=str(e)))\n\n def run(self):\n yield messages.StartedMessage.get()\n try:\n queue = multiprocessing.SimpleQueue()\n process = multiprocessing.Process(target=self._run_avocado,\n args=(self.runnable, queue))\n\n process.start()\n\n time_started = time.monotonic()\n\n timeout = float(self.DEFAULT_TIMEOUT)\n most_current_execution_state_time = None\n while True:\n time.sleep(nrunner.RUNNER_RUN_CHECK_INTERVAL)\n now = time.monotonic()\n if queue.empty():\n if most_current_execution_state_time is not None:\n next_execution_state_mark = (most_current_execution_state_time +\n nrunner.RUNNER_RUN_STATUS_INTERVAL)\n if (most_current_execution_state_time is None or\n now > next_execution_state_mark):\n most_current_execution_state_time = now\n yield messages.RunningMessage.get()\n if (now - time_started) > timeout:\n process.terminate()\n yield messages.FinishedMessage.get('interrupted',\n 'timeout')\n break\n else:\n message = queue.get()\n if message.get('type') == 'early_state':\n timeout = float(message.get('timeout') or\n self.DEFAULT_TIMEOUT)\n else:\n yield message\n if message.get('status') == 'finished':\n break\n except Exception as e:\n yield messages.StderrMessage.get(traceback.format_exc())\n yield messages.FinishedMessage.get('error', fail_reason=str(e))\n\n\nclass RunnerApp(nrunner.BaseRunnerApp):\n PROG_NAME = 'avocado-runner-avocado-instrumented'\n PROG_DESCRIPTION = 'nrunner application for avocado-instrumented tests'\n RUNNABLE_KINDS_CAPABLE = {\n 'avocado-instrumented': AvocadoInstrumentedTestRunner\n }\n\n\ndef main():\n nrunner.main(RunnerApp)\n\n\nif __name__ == '__main__':\n main()\n", "path": "avocado/core/runners/avocado_instrumented.py"}], "after_files": [{"content": "import multiprocessing\nimport os\nimport tempfile\nimport time\nimport traceback\n\nfrom avocado.core import nrunner\nfrom avocado.core.runners.utils import messages\nfrom avocado.core.test import TestID\nfrom avocado.core.tree import TreeNodeEnvOnly\nfrom avocado.core.utils import loader\nfrom avocado.core.varianter import is_empty_variant\n\n\nclass AvocadoInstrumentedTestRunner(nrunner.BaseRunner):\n \"\"\"\n Runner for Avocado INSTRUMENTED tests\n\n Runnable attributes usage:\n\n * uri: path to a test file, combined with an Avocado.Test\n inherited class name and method. The test file path and\n class and method names should be separated by a \":\". One\n example of a valid uri is \"mytest.py:Class.test_method\".\n\n * args: not used\n \"\"\"\n DEFAULT_TIMEOUT = 86400\n\n @staticmethod\n def _create_params(runnable):\n \"\"\"Create params for the test\"\"\"\n if runnable.variant is None:\n return None\n\n # rebuild the variant tree\n variant_tree_nodes = [TreeNodeEnvOnly(path, env) for path, env\n in runnable.variant['variant']]\n\n if not is_empty_variant(variant_tree_nodes):\n tree_nodes = variant_tree_nodes\n paths = runnable.variant['paths']\n return tree_nodes, paths\n\n @staticmethod\n def _run_avocado(runnable, queue):\n try:\n # This assumes that a proper resolution (see resolver module)\n # was performed, and that a URI contains:\n # 1) path to python module\n # 2) class\n # 3) method\n #\n # To be defined: if the resolution uri should be composed like\n # this, or broken down and stored into other data fields\n module_path, klass_method = runnable.uri.split(':', 1)\n\n klass, method = klass_method.split('.', 1)\n\n params = AvocadoInstrumentedTestRunner._create_params(runnable)\n result_dir = (runnable.output_dir or\n tempfile.mkdtemp(prefix=\".avocado-task\"))\n test_factory = [klass,\n {'name': TestID(1, runnable.uri),\n 'methodName': method,\n 'config': runnable.config,\n 'modulePath': module_path,\n 'params': params,\n 'tags': runnable.tags,\n 'run.results_dir': result_dir,\n }]\n\n messages.start_logging(runnable.config, queue)\n\n if 'COVERAGE_RUN' in os.environ:\n from coverage import Coverage\n coverage = Coverage()\n coverage.start()\n\n instance = loader.load_test(test_factory)\n early_state = instance.get_state()\n early_state['type'] = \"early_state\"\n queue.put(early_state)\n instance.run_avocado()\n\n if 'COVERAGE_RUN' in os.environ:\n coverage.stop()\n coverage.save()\n\n state = instance.get_state()\n fail_reason = state.get('fail_reason')\n queue.put(messages.WhiteboardMessage.get(state['whiteboard']))\n queue.put(messages.FinishedMessage.get(state['status'].lower(),\n fail_reason=fail_reason))\n except Exception as e:\n queue.put(messages.StderrMessage.get(traceback.format_exc()))\n queue.put(messages.FinishedMessage.get('error', fail_reason=str(e)))\n\n def run(self):\n yield messages.StartedMessage.get()\n try:\n queue = multiprocessing.SimpleQueue()\n process = multiprocessing.Process(target=self._run_avocado,\n args=(self.runnable, queue))\n\n process.start()\n\n time_started = time.monotonic()\n\n timeout = float(self.DEFAULT_TIMEOUT)\n most_current_execution_state_time = None\n while True:\n time.sleep(nrunner.RUNNER_RUN_CHECK_INTERVAL)\n now = time.monotonic()\n if queue.empty():\n if most_current_execution_state_time is not None:\n next_execution_state_mark = (most_current_execution_state_time +\n nrunner.RUNNER_RUN_STATUS_INTERVAL)\n if (most_current_execution_state_time is None or\n now > next_execution_state_mark):\n most_current_execution_state_time = now\n yield messages.RunningMessage.get()\n if (now - time_started) > timeout:\n process.terminate()\n yield messages.FinishedMessage.get('interrupted',\n 'timeout')\n break\n else:\n message = queue.get()\n if message.get('type') == 'early_state':\n timeout = float(message.get('timeout') or\n self.DEFAULT_TIMEOUT)\n else:\n yield message\n if message.get('status') == 'finished':\n break\n except Exception as e:\n yield messages.StderrMessage.get(traceback.format_exc())\n yield messages.FinishedMessage.get('error', fail_reason=str(e))\n\n\nclass RunnerApp(nrunner.BaseRunnerApp):\n PROG_NAME = 'avocado-runner-avocado-instrumented'\n PROG_DESCRIPTION = 'nrunner application for avocado-instrumented tests'\n RUNNABLE_KINDS_CAPABLE = {\n 'avocado-instrumented': AvocadoInstrumentedTestRunner\n }\n\n\ndef main():\n nrunner.main(RunnerApp)\n\n\nif __name__ == '__main__':\n main()\n", "path": "avocado/core/runners/avocado_instrumented.py"}]} | 1,758 | 244 |
gh_patches_debug_60946 | rasdani/github-patches | git_diff | Netflix__lemur-713 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Getting error while creating authority with lemur_cryptography plugin
I added a comment to the offending commit code here: https://github.com/Netflix/lemur/commit/d53f64890cb656765bc1c18f4b8442ee3a592f47
Upon creating an authority certificate with the lemur_cryptography plugin here, I get an error because unit is None and cannot be compared to 90. Is it reasonable to make an validation assumption that unit should be 0 if undefined?
I haven't quite been able to trace how this validation function is called, so I'm not entirely clear on what it is doing or why it's involved in the creation of an authority certificate, but not in the creation of a certificate signed by an authority.
Here's the error I get upon submitting an authority for creation.
```
2017-03-07 01:44:41,352 ERROR: Exception on /api/1/authorities [POST] [in /home/lemur/venv/lib/python3.4/site-packages/flask/app.py:1560]
Traceback (most recent call last):
File "/home/lemur/venv/lib/python3.4/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/home/lemur/venv/lib/python3.4/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/lemur/venv/lib/python3.4/site-packages/flask_restful/__init__.py", line 477, in wrapper
resp = resource(*args, **kwargs)
File "/home/lemur/venv/lib/python3.4/site-packages/flask/views.py", line 84, in view
return self.dispatch_request(*args, **kwargs)
File "/home/lemur/venv/lib/python3.4/site-packages/flask_restful/__init__.py", line 587, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/lemur/app/lemur/auth/service.py", line 110, in decorated_function
return f(*args, **kwargs)
File "/home/lemur/app/lemur/common/schema.py", line 150, in decorated_function
data, errors = input_schema.load(request_data)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/schema.py", line 578, in load
result, errors = self._do_load(data, many, partial=partial, postprocess=True)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/schema.py", line 658, in _do_load
index_errors=self.opts.index_errors,
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py", line 295, in deserialize
index=(index if index_errors else None)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py", line 68, in call_and_store
value = getter_func(data)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py", line 288, in <lambda>
data
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/fields.py", line 265, in deserialize
output = self._deserialize(value, attr, data)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/fields.py", line 465, in _deserialize
data, errors = self.schema.load(value)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/schema.py", line 578, in load
result, errors = self._do_load(data, many, partial=partial, postprocess=True)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/schema.py", line 658, in _do_load
index_errors=self.opts.index_errors,
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py", line 295, in deserialize
index=(index if index_errors else None)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py", line 68, in call_and_store
value = getter_func(data)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py", line 288, in <lambda>
data
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/fields.py", line 266, in deserialize
self._validate(output)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/fields.py", line 196, in _validate
r = validator(value)
File "/home/lemur/app/lemur/schemas.py", line 44, in validate_options
if unit > 90:
TypeError: unorderable types: NoneType() > int()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lemur/schemas.py`
Content:
```
1 """
2 .. module: lemur.schemas
3 :platform: unix
4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
5 :license: Apache, see LICENSE for more details.
6
7 .. moduleauthor:: Kevin Glisson <[email protected]>
8
9 """
10 from sqlalchemy.orm.exc import NoResultFound
11
12 from marshmallow import fields, post_load, pre_load, post_dump
13 from marshmallow.exceptions import ValidationError
14
15 from lemur.common import validators
16 from lemur.common.schema import LemurSchema, LemurInputSchema, LemurOutputSchema
17 from lemur.common.fields import KeyUsageExtension, ExtendedKeyUsageExtension, BasicConstraintsExtension, SubjectAlternativeNameExtension
18
19 from lemur.plugins import plugins
20 from lemur.plugins.utils import get_plugin_option
21 from lemur.roles.models import Role
22 from lemur.users.models import User
23 from lemur.authorities.models import Authority
24 from lemur.certificates.models import Certificate
25 from lemur.destinations.models import Destination
26 from lemur.notifications.models import Notification
27
28
29 def validate_options(options):
30 """
31 Ensures that the plugin options are valid.
32 :param options:
33 :return:
34 """
35 interval = get_plugin_option('interval', options)
36 unit = get_plugin_option('unit', options)
37
38 if interval == 'month':
39 unit *= 30
40
41 elif interval == 'week':
42 unit *= 7
43
44 if unit > 90:
45 raise ValidationError('Notification cannot be more than 90 days into the future.')
46
47
48 def get_object_attribute(data, many=False):
49 if many:
50 ids = [d.get('id') for d in data]
51 names = [d.get('name') for d in data]
52
53 if None in ids:
54 if None in names:
55 raise ValidationError('Associated object require a name or id.')
56 else:
57 return 'name'
58 return 'id'
59 else:
60 if data.get('id'):
61 return 'id'
62 elif data.get('name'):
63 return 'name'
64 else:
65 raise ValidationError('Associated object require a name or id.')
66
67
68 def fetch_objects(model, data, many=False):
69 attr = get_object_attribute(data, many=many)
70
71 if many:
72 values = [v[attr] for v in data]
73 items = model.query.filter(getattr(model, attr).in_(values)).all()
74 found = [getattr(i, attr) for i in items]
75 diff = set(values).symmetric_difference(set(found))
76
77 if diff:
78 raise ValidationError('Unable to locate {model} with {attr} {diff}'.format(
79 model=model,
80 attr=attr,
81 diff=",".join(list(diff))))
82
83 return items
84
85 else:
86 try:
87 return model.query.filter(getattr(model, attr) == data[attr]).one()
88 except NoResultFound:
89 raise ValidationError('Unable to find {model} with {attr}: {data}'.format(
90 model=model,
91 attr=attr,
92 data=data[attr]))
93
94
95 class AssociatedAuthoritySchema(LemurInputSchema):
96 id = fields.Int()
97 name = fields.String()
98
99 @post_load
100 def get_object(self, data, many=False):
101 return fetch_objects(Authority, data, many=many)
102
103
104 class AssociatedRoleSchema(LemurInputSchema):
105 id = fields.Int()
106 name = fields.String()
107
108 @post_load
109 def get_object(self, data, many=False):
110 return fetch_objects(Role, data, many=many)
111
112
113 class AssociatedDestinationSchema(LemurInputSchema):
114 id = fields.Int()
115 name = fields.String()
116
117 @post_load
118 def get_object(self, data, many=False):
119 return fetch_objects(Destination, data, many=many)
120
121
122 class AssociatedNotificationSchema(LemurInputSchema):
123 id = fields.Int()
124 name = fields.String()
125
126 @post_load
127 def get_object(self, data, many=False):
128 return fetch_objects(Notification, data, many=many)
129
130
131 class AssociatedCertificateSchema(LemurInputSchema):
132 id = fields.Int()
133 name = fields.String()
134
135 @post_load
136 def get_object(self, data, many=False):
137 return fetch_objects(Certificate, data, many=many)
138
139
140 class AssociatedUserSchema(LemurInputSchema):
141 id = fields.Int()
142 name = fields.String()
143
144 @post_load
145 def get_object(self, data, many=False):
146 return fetch_objects(User, data, many=many)
147
148
149 class PluginInputSchema(LemurInputSchema):
150 plugin_options = fields.List(fields.Dict(), validate=validate_options)
151 slug = fields.String(required=True)
152 title = fields.String()
153 description = fields.String()
154
155 @post_load
156 def get_object(self, data, many=False):
157 try:
158 data['plugin_object'] = plugins.get(data['slug'])
159 return data
160 except Exception:
161 raise ValidationError('Unable to find plugin: {0}'.format(data['slug']))
162
163
164 class PluginOutputSchema(LemurOutputSchema):
165 id = fields.Integer()
166 label = fields.String()
167 description = fields.String()
168 active = fields.Boolean()
169 options = fields.List(fields.Dict(), dump_to='pluginOptions')
170 slug = fields.String()
171 title = fields.String()
172
173
174 plugins_output_schema = PluginOutputSchema(many=True)
175 plugin_output_schema = PluginOutputSchema
176
177
178 class BaseExtensionSchema(LemurSchema):
179 @pre_load(pass_many=True)
180 def preprocess(self, data, many):
181 return self.under(data, many=many)
182
183 @post_dump(pass_many=True)
184 def post_process(self, data, many):
185 if data:
186 data = self.camel(data, many=many)
187 return data
188
189
190 class AuthorityKeyIdentifierSchema(BaseExtensionSchema):
191 use_key_identifier = fields.Boolean()
192 use_authority_cert = fields.Boolean()
193
194
195 class CertificateInfoAccessSchema(BaseExtensionSchema):
196 include_aia = fields.Boolean()
197
198 @post_dump
199 def handle_keys(self, data):
200 return {'includeAIA': data['include_aia']}
201
202
203 class SubjectKeyIdentifierSchema(BaseExtensionSchema):
204 include_ski = fields.Boolean()
205
206 @post_dump
207 def handle_keys(self, data):
208 return {'includeSKI': data['include_ski']}
209
210
211 class CustomOIDSchema(BaseExtensionSchema):
212 oid = fields.String()
213 encoding = fields.String(validate=validators.encoding)
214 value = fields.String()
215 is_critical = fields.Boolean()
216
217
218 class NamesSchema(BaseExtensionSchema):
219 names = SubjectAlternativeNameExtension()
220
221
222 class ExtensionSchema(BaseExtensionSchema):
223 basic_constraints = BasicConstraintsExtension(missing={'ca': False})
224 key_usage = KeyUsageExtension()
225 extended_key_usage = ExtendedKeyUsageExtension()
226 subject_key_identifier = fields.Nested(SubjectKeyIdentifierSchema)
227 sub_alt_names = fields.Nested(NamesSchema)
228 authority_key_identifier = fields.Nested(AuthorityKeyIdentifierSchema)
229 certificate_info_access = fields.Nested(CertificateInfoAccessSchema)
230 # FIXME: Convert custom OIDs to a custom field in fields.py like other Extensions
231 # FIXME: Remove support in UI for Critical custom extensions https://github.com/Netflix/lemur/issues/665
232 custom = fields.List(fields.Nested(CustomOIDSchema))
233
234
235 class EndpointNestedOutputSchema(LemurOutputSchema):
236 __envelope__ = False
237 id = fields.Integer()
238 description = fields.String()
239 name = fields.String()
240 dnsname = fields.String()
241 owner = fields.Email()
242 type = fields.String()
243 active = fields.Boolean()
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lemur/schemas.py b/lemur/schemas.py
--- a/lemur/schemas.py
+++ b/lemur/schemas.py
@@ -35,6 +35,9 @@
interval = get_plugin_option('interval', options)
unit = get_plugin_option('unit', options)
+ if not interval and not unit:
+ return
+
if interval == 'month':
unit *= 30
| {"golden_diff": "diff --git a/lemur/schemas.py b/lemur/schemas.py\n--- a/lemur/schemas.py\n+++ b/lemur/schemas.py\n@@ -35,6 +35,9 @@\n interval = get_plugin_option('interval', options)\n unit = get_plugin_option('unit', options)\n \n+ if not interval and not unit:\n+ return\n+\n if interval == 'month':\n unit *= 30\n", "issue": "Getting error while creating authority with lemur_cryptography plugin\nI added a comment to the offending commit code here: https://github.com/Netflix/lemur/commit/d53f64890cb656765bc1c18f4b8442ee3a592f47\r\n\r\nUpon creating an authority certificate with the lemur_cryptography plugin here, I get an error because unit is None and cannot be compared to 90. Is it reasonable to make an validation assumption that unit should be 0 if undefined?\r\n\r\nI haven't quite been able to trace how this validation function is called, so I'm not entirely clear on what it is doing or why it's involved in the creation of an authority certificate, but not in the creation of a certificate signed by an authority.\r\n\r\n\r\nHere's the error I get upon submitting an authority for creation.\r\n```\r\n2017-03-07 01:44:41,352 ERROR: Exception on /api/1/authorities [POST] [in /home/lemur/venv/lib/python3.4/site-packages/flask/app.py:1560]\r\nTraceback (most recent call last):\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/flask/app.py\", line 1612, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/flask/app.py\", line 1598, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/flask_restful/__init__.py\", line 477, in wrapper\r\n resp = resource(*args, **kwargs)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/flask/views.py\", line 84, in view\r\n return self.dispatch_request(*args, **kwargs)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/flask_restful/__init__.py\", line 587, in dispatch_request\r\n resp = meth(*args, **kwargs)\r\n File \"/home/lemur/app/lemur/auth/service.py\", line 110, in decorated_function\r\n return f(*args, **kwargs)\r\n File \"/home/lemur/app/lemur/common/schema.py\", line 150, in decorated_function\r\n data, errors = input_schema.load(request_data)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/schema.py\", line 578, in load\r\n result, errors = self._do_load(data, many, partial=partial, postprocess=True)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/schema.py\", line 658, in _do_load\r\n index_errors=self.opts.index_errors,\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py\", line 295, in deserialize\r\n index=(index if index_errors else None)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py\", line 68, in call_and_store\r\n value = getter_func(data)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py\", line 288, in <lambda>\r\n data\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/fields.py\", line 265, in deserialize\r\n output = self._deserialize(value, attr, data)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/fields.py\", line 465, in _deserialize\r\n data, errors = self.schema.load(value)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/schema.py\", line 578, in load\r\n result, errors = self._do_load(data, many, partial=partial, postprocess=True)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/schema.py\", line 658, in _do_load\r\n index_errors=self.opts.index_errors,\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py\", line 295, in deserialize\r\n index=(index if index_errors else None)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py\", line 68, in call_and_store\r\n value = getter_func(data)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py\", line 288, in <lambda>\r\n data\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/fields.py\", line 266, in deserialize\r\n self._validate(output)\r\n File \"/home/lemur/venv/lib/python3.4/site-packages/marshmallow/fields.py\", line 196, in _validate\r\n r = validator(value)\r\n File \"/home/lemur/app/lemur/schemas.py\", line 44, in validate_options\r\n if unit > 90:\r\nTypeError: unorderable types: NoneType() > int()\r\n```\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.schemas\n :platform: unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom marshmallow import fields, post_load, pre_load, post_dump\nfrom marshmallow.exceptions import ValidationError\n\nfrom lemur.common import validators\nfrom lemur.common.schema import LemurSchema, LemurInputSchema, LemurOutputSchema\nfrom lemur.common.fields import KeyUsageExtension, ExtendedKeyUsageExtension, BasicConstraintsExtension, SubjectAlternativeNameExtension\n\nfrom lemur.plugins import plugins\nfrom lemur.plugins.utils import get_plugin_option\nfrom lemur.roles.models import Role\nfrom lemur.users.models import User\nfrom lemur.authorities.models import Authority\nfrom lemur.certificates.models import Certificate\nfrom lemur.destinations.models import Destination\nfrom lemur.notifications.models import Notification\n\n\ndef validate_options(options):\n \"\"\"\n Ensures that the plugin options are valid.\n :param options:\n :return:\n \"\"\"\n interval = get_plugin_option('interval', options)\n unit = get_plugin_option('unit', options)\n\n if interval == 'month':\n unit *= 30\n\n elif interval == 'week':\n unit *= 7\n\n if unit > 90:\n raise ValidationError('Notification cannot be more than 90 days into the future.')\n\n\ndef get_object_attribute(data, many=False):\n if many:\n ids = [d.get('id') for d in data]\n names = [d.get('name') for d in data]\n\n if None in ids:\n if None in names:\n raise ValidationError('Associated object require a name or id.')\n else:\n return 'name'\n return 'id'\n else:\n if data.get('id'):\n return 'id'\n elif data.get('name'):\n return 'name'\n else:\n raise ValidationError('Associated object require a name or id.')\n\n\ndef fetch_objects(model, data, many=False):\n attr = get_object_attribute(data, many=many)\n\n if many:\n values = [v[attr] for v in data]\n items = model.query.filter(getattr(model, attr).in_(values)).all()\n found = [getattr(i, attr) for i in items]\n diff = set(values).symmetric_difference(set(found))\n\n if diff:\n raise ValidationError('Unable to locate {model} with {attr} {diff}'.format(\n model=model,\n attr=attr,\n diff=\",\".join(list(diff))))\n\n return items\n\n else:\n try:\n return model.query.filter(getattr(model, attr) == data[attr]).one()\n except NoResultFound:\n raise ValidationError('Unable to find {model} with {attr}: {data}'.format(\n model=model,\n attr=attr,\n data=data[attr]))\n\n\nclass AssociatedAuthoritySchema(LemurInputSchema):\n id = fields.Int()\n name = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n return fetch_objects(Authority, data, many=many)\n\n\nclass AssociatedRoleSchema(LemurInputSchema):\n id = fields.Int()\n name = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n return fetch_objects(Role, data, many=many)\n\n\nclass AssociatedDestinationSchema(LemurInputSchema):\n id = fields.Int()\n name = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n return fetch_objects(Destination, data, many=many)\n\n\nclass AssociatedNotificationSchema(LemurInputSchema):\n id = fields.Int()\n name = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n return fetch_objects(Notification, data, many=many)\n\n\nclass AssociatedCertificateSchema(LemurInputSchema):\n id = fields.Int()\n name = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n return fetch_objects(Certificate, data, many=many)\n\n\nclass AssociatedUserSchema(LemurInputSchema):\n id = fields.Int()\n name = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n return fetch_objects(User, data, many=many)\n\n\nclass PluginInputSchema(LemurInputSchema):\n plugin_options = fields.List(fields.Dict(), validate=validate_options)\n slug = fields.String(required=True)\n title = fields.String()\n description = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n try:\n data['plugin_object'] = plugins.get(data['slug'])\n return data\n except Exception:\n raise ValidationError('Unable to find plugin: {0}'.format(data['slug']))\n\n\nclass PluginOutputSchema(LemurOutputSchema):\n id = fields.Integer()\n label = fields.String()\n description = fields.String()\n active = fields.Boolean()\n options = fields.List(fields.Dict(), dump_to='pluginOptions')\n slug = fields.String()\n title = fields.String()\n\n\nplugins_output_schema = PluginOutputSchema(many=True)\nplugin_output_schema = PluginOutputSchema\n\n\nclass BaseExtensionSchema(LemurSchema):\n @pre_load(pass_many=True)\n def preprocess(self, data, many):\n return self.under(data, many=many)\n\n @post_dump(pass_many=True)\n def post_process(self, data, many):\n if data:\n data = self.camel(data, many=many)\n return data\n\n\nclass AuthorityKeyIdentifierSchema(BaseExtensionSchema):\n use_key_identifier = fields.Boolean()\n use_authority_cert = fields.Boolean()\n\n\nclass CertificateInfoAccessSchema(BaseExtensionSchema):\n include_aia = fields.Boolean()\n\n @post_dump\n def handle_keys(self, data):\n return {'includeAIA': data['include_aia']}\n\n\nclass SubjectKeyIdentifierSchema(BaseExtensionSchema):\n include_ski = fields.Boolean()\n\n @post_dump\n def handle_keys(self, data):\n return {'includeSKI': data['include_ski']}\n\n\nclass CustomOIDSchema(BaseExtensionSchema):\n oid = fields.String()\n encoding = fields.String(validate=validators.encoding)\n value = fields.String()\n is_critical = fields.Boolean()\n\n\nclass NamesSchema(BaseExtensionSchema):\n names = SubjectAlternativeNameExtension()\n\n\nclass ExtensionSchema(BaseExtensionSchema):\n basic_constraints = BasicConstraintsExtension(missing={'ca': False})\n key_usage = KeyUsageExtension()\n extended_key_usage = ExtendedKeyUsageExtension()\n subject_key_identifier = fields.Nested(SubjectKeyIdentifierSchema)\n sub_alt_names = fields.Nested(NamesSchema)\n authority_key_identifier = fields.Nested(AuthorityKeyIdentifierSchema)\n certificate_info_access = fields.Nested(CertificateInfoAccessSchema)\n # FIXME: Convert custom OIDs to a custom field in fields.py like other Extensions\n # FIXME: Remove support in UI for Critical custom extensions https://github.com/Netflix/lemur/issues/665\n custom = fields.List(fields.Nested(CustomOIDSchema))\n\n\nclass EndpointNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n id = fields.Integer()\n description = fields.String()\n name = fields.String()\n dnsname = fields.String()\n owner = fields.Email()\n type = fields.String()\n active = fields.Boolean()\n", "path": "lemur/schemas.py"}], "after_files": [{"content": "\"\"\"\n.. module: lemur.schemas\n :platform: unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom marshmallow import fields, post_load, pre_load, post_dump\nfrom marshmallow.exceptions import ValidationError\n\nfrom lemur.common import validators\nfrom lemur.common.schema import LemurSchema, LemurInputSchema, LemurOutputSchema\nfrom lemur.common.fields import KeyUsageExtension, ExtendedKeyUsageExtension, BasicConstraintsExtension, SubjectAlternativeNameExtension\n\nfrom lemur.plugins import plugins\nfrom lemur.plugins.utils import get_plugin_option\nfrom lemur.roles.models import Role\nfrom lemur.users.models import User\nfrom lemur.authorities.models import Authority\nfrom lemur.certificates.models import Certificate\nfrom lemur.destinations.models import Destination\nfrom lemur.notifications.models import Notification\n\n\ndef validate_options(options):\n \"\"\"\n Ensures that the plugin options are valid.\n :param options:\n :return:\n \"\"\"\n interval = get_plugin_option('interval', options)\n unit = get_plugin_option('unit', options)\n\n if not interval and not unit:\n return\n\n if interval == 'month':\n unit *= 30\n\n elif interval == 'week':\n unit *= 7\n\n if unit > 90:\n raise ValidationError('Notification cannot be more than 90 days into the future.')\n\n\ndef get_object_attribute(data, many=False):\n if many:\n ids = [d.get('id') for d in data]\n names = [d.get('name') for d in data]\n\n if None in ids:\n if None in names:\n raise ValidationError('Associated object require a name or id.')\n else:\n return 'name'\n return 'id'\n else:\n if data.get('id'):\n return 'id'\n elif data.get('name'):\n return 'name'\n else:\n raise ValidationError('Associated object require a name or id.')\n\n\ndef fetch_objects(model, data, many=False):\n attr = get_object_attribute(data, many=many)\n\n if many:\n values = [v[attr] for v in data]\n items = model.query.filter(getattr(model, attr).in_(values)).all()\n found = [getattr(i, attr) for i in items]\n diff = set(values).symmetric_difference(set(found))\n\n if diff:\n raise ValidationError('Unable to locate {model} with {attr} {diff}'.format(\n model=model,\n attr=attr,\n diff=\",\".join(list(diff))))\n\n return items\n\n else:\n try:\n return model.query.filter(getattr(model, attr) == data[attr]).one()\n except NoResultFound:\n raise ValidationError('Unable to find {model} with {attr}: {data}'.format(\n model=model,\n attr=attr,\n data=data[attr]))\n\n\nclass AssociatedAuthoritySchema(LemurInputSchema):\n id = fields.Int()\n name = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n return fetch_objects(Authority, data, many=many)\n\n\nclass AssociatedRoleSchema(LemurInputSchema):\n id = fields.Int()\n name = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n return fetch_objects(Role, data, many=many)\n\n\nclass AssociatedDestinationSchema(LemurInputSchema):\n id = fields.Int()\n name = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n return fetch_objects(Destination, data, many=many)\n\n\nclass AssociatedNotificationSchema(LemurInputSchema):\n id = fields.Int()\n name = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n return fetch_objects(Notification, data, many=many)\n\n\nclass AssociatedCertificateSchema(LemurInputSchema):\n id = fields.Int()\n name = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n return fetch_objects(Certificate, data, many=many)\n\n\nclass AssociatedUserSchema(LemurInputSchema):\n id = fields.Int()\n name = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n return fetch_objects(User, data, many=many)\n\n\nclass PluginInputSchema(LemurInputSchema):\n plugin_options = fields.List(fields.Dict(), validate=validate_options)\n slug = fields.String(required=True)\n title = fields.String()\n description = fields.String()\n\n @post_load\n def get_object(self, data, many=False):\n try:\n data['plugin_object'] = plugins.get(data['slug'])\n return data\n except Exception:\n raise ValidationError('Unable to find plugin: {0}'.format(data['slug']))\n\n\nclass PluginOutputSchema(LemurOutputSchema):\n id = fields.Integer()\n label = fields.String()\n description = fields.String()\n active = fields.Boolean()\n options = fields.List(fields.Dict(), dump_to='pluginOptions')\n slug = fields.String()\n title = fields.String()\n\n\nplugins_output_schema = PluginOutputSchema(many=True)\nplugin_output_schema = PluginOutputSchema\n\n\nclass BaseExtensionSchema(LemurSchema):\n @pre_load(pass_many=True)\n def preprocess(self, data, many):\n return self.under(data, many=many)\n\n @post_dump(pass_many=True)\n def post_process(self, data, many):\n if data:\n data = self.camel(data, many=many)\n return data\n\n\nclass AuthorityKeyIdentifierSchema(BaseExtensionSchema):\n use_key_identifier = fields.Boolean()\n use_authority_cert = fields.Boolean()\n\n\nclass CertificateInfoAccessSchema(BaseExtensionSchema):\n include_aia = fields.Boolean()\n\n @post_dump\n def handle_keys(self, data):\n return {'includeAIA': data['include_aia']}\n\n\nclass SubjectKeyIdentifierSchema(BaseExtensionSchema):\n include_ski = fields.Boolean()\n\n @post_dump\n def handle_keys(self, data):\n return {'includeSKI': data['include_ski']}\n\n\nclass CustomOIDSchema(BaseExtensionSchema):\n oid = fields.String()\n encoding = fields.String(validate=validators.encoding)\n value = fields.String()\n is_critical = fields.Boolean()\n\n\nclass NamesSchema(BaseExtensionSchema):\n names = SubjectAlternativeNameExtension()\n\n\nclass ExtensionSchema(BaseExtensionSchema):\n basic_constraints = BasicConstraintsExtension(missing={'ca': False})\n key_usage = KeyUsageExtension()\n extended_key_usage = ExtendedKeyUsageExtension()\n subject_key_identifier = fields.Nested(SubjectKeyIdentifierSchema)\n sub_alt_names = fields.Nested(NamesSchema)\n authority_key_identifier = fields.Nested(AuthorityKeyIdentifierSchema)\n certificate_info_access = fields.Nested(CertificateInfoAccessSchema)\n # FIXME: Convert custom OIDs to a custom field in fields.py like other Extensions\n # FIXME: Remove support in UI for Critical custom extensions https://github.com/Netflix/lemur/issues/665\n custom = fields.List(fields.Nested(CustomOIDSchema))\n\n\nclass EndpointNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n id = fields.Integer()\n description = fields.String()\n name = fields.String()\n dnsname = fields.String()\n owner = fields.Email()\n type = fields.String()\n active = fields.Boolean()\n", "path": "lemur/schemas.py"}]} | 3,732 | 99 |
gh_patches_debug_29652 | rasdani/github-patches | git_diff | pretix__pretix-476 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sendmail plugin: Create new mail based on an old one
**Hey!** If you ever wanted to contribute to an Open Source project, but didn't know how, you just found a perfect place to start. We try to write this issue as instructive as possible, but as we're humans we almost certainly forgot to write something important in here. If you want to try to tackle this issue and get stuck at _any_ point, please do not hesitate and ask us, either here on GitHub or via email at [email protected]. We'll be very happy to help!
We think this issue is very suitable to people who are just starting with Python or Django development or who never contributed to Open Source before. We'd like to ask people with wide experience in these areas to leave this issue to beginners. Thanks! (If you want something more advanced, just drop us a line, we'll find something :wink:)
## How to get started
First of all, you need pretix running locally on your machine. We have a guide on the developer installation [over here](https://docs.pretix.eu/en/latest/development/setup.html). Play around with it and get a feeling for what pretix is and how it works :) Try to find the places in the interface mentioned in this issue where something needs to change. Maybe try to find the same places in the code? Just searching for characteristic words can be a powerful tool to find your way around.
## What this is about
If you create an event and then (under Settings > Plugins) activate the Sendmail plugin, there will be a new navigation option allowing you to send emails to everyone who ordered a ticket. Thanks to @FlaviaBastos, there is also a navigation item showing you the history of all mails sent through this feature. It would be nice if this history would include a button that allows you to create a new mail *based* on the old one, i.e. pre-fill the sendmail form with the contents from that time.
## How to do this
Find a way to pass the data around to the form. I believe the best way is to create a link in ``pretix/plugins/sendmail/templates/pretixplugins/sendmail/history.html`` that includes the ID of the log entry in a query parameter to the sending view (defined in ``pretix/plugins/sendmail/views.py``). I suggest that you shortly read up on how our [log objects](https://docs.pretix.eu/en/latest/development/implementation/logging.html) work. You can then modify the ``get_form_kwargs`` method of ``SenderView`` to pass the correct [initial values](https://docs.djangoproject.com/en/1.10/ref/forms/api/#dynamic-initial-values) to the form.
## I want to give it a try!
You're still reading, nice! If you want to try working on this, please comment below, so everyone else knows this is being looked at and can search for a different place to contribute.
First, [fork](https://github.com/pretix/pretix/fork) the repository to your own GitHub account. Then clone it locally (or [set it as your remote](https://help.github.com/articles/configuring-a-remote-for-a-fork/) if you already cloned it earlier) and prepare your pull requests. There are a number of tutorials on the Pull Request workflow online, for example this [short version](https://guides.github.com/introduction/flow/) by GitHub itself and this [more in-depth tutorial](https://www.digitalocean.com/community/tutorials/how-to-create-a-pull-request-on-github). We're excited to see your pull request!
And don't forget: If you get stuck, don't hestitate to ask. We bound ourselves to a [Code of Conduct](https://docs.pretix.eu/en/latest/development/contribution/codeofconduct.html) and we're committed to providing a welcoming experience and giving constructive, friendly and useful feedback on your contributions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pretix/plugins/sendmail/views.py`
Content:
```
1 import logging
2 from datetime import timedelta
3
4 import pytz
5 from django.contrib import messages
6 from django.db.models import Q
7 from django.shortcuts import redirect
8 from django.utils.formats import date_format
9 from django.utils.timezone import now
10 from django.utils.translation import ugettext_lazy as _
11 from django.views.generic import FormView, ListView
12
13 from pretix.base.i18n import language
14 from pretix.base.models import LogEntry, Order
15 from pretix.base.services.mail import SendMailException, mail
16 from pretix.control.permissions import EventPermissionRequiredMixin
17 from pretix.multidomain.urlreverse import build_absolute_uri
18
19 from . import forms
20
21 logger = logging.getLogger('pretix.plugins.sendmail')
22
23
24 class SenderView(EventPermissionRequiredMixin, FormView):
25 template_name = 'pretixplugins/sendmail/send_form.html'
26 permission = 'can_change_orders'
27 form_class = forms.MailForm
28
29 def get_form_kwargs(self):
30 kwargs = super().get_form_kwargs()
31 kwargs['event'] = self.request.event
32 return kwargs
33
34 def form_invalid(self, form):
35 messages.error(self.request, _('We could not send the email. See below for details.'))
36 return super().form_invalid(form)
37
38 def form_valid(self, form):
39 qs = Order.objects.filter(event=self.request.event)
40 statusq = Q(status__in=form.cleaned_data['sendto'])
41 if 'overdue' in form.cleaned_data['sendto']:
42 statusq |= Q(status=Order.STATUS_PENDING, expires__lt=now())
43 orders = qs.filter(statusq)
44
45 tz = pytz.timezone(self.request.event.settings.timezone)
46
47 failures = []
48 self.output = {}
49 if not orders:
50 messages.error(self.request, _('There are no orders matching this selection.'))
51 return self.get(self.request, *self.args, **self.kwargs)
52 for o in orders:
53 if self.request.POST.get("action") == "preview":
54 for l in self.request.event.settings.locales:
55 with language(l):
56 self.output[l] = []
57 self.output[l].append(_('Subject: {subject}').format(subject=form.cleaned_data['subject'].localize(l)))
58 message = form.cleaned_data['message'].localize(l)
59 preview_text = message.format(
60 order='ORDER1234',
61 event=self.request.event.name,
62 order_date=date_format(now(), 'SHORT_DATE_FORMAT'),
63 due_date=date_format(now() + timedelta(days=7), 'SHORT_DATE_FORMAT'),
64 order_url=build_absolute_uri(self.request.event, 'presale:event.order', kwargs={
65 'order': 'ORDER1234',
66 'secret': 'longrandomsecretabcdef123456'
67 }))
68 self.output[l].append(preview_text)
69 return self.get(self.request, *self.args, **self.kwargs)
70 else:
71 try:
72 with language(o.locale):
73 mail(o.email, form.cleaned_data['subject'], form.cleaned_data['message'],
74 {
75 'event': o.event,
76 'order': o.code,
77 'order_date': date_format(o.datetime.astimezone(tz), 'SHORT_DATETIME_FORMAT'),
78 'due_date': date_format(o.expires, 'SHORT_DATE_FORMAT'),
79 'order_url': build_absolute_uri(o.event, 'presale:event.order', kwargs={
80 'order': o.code,
81 'secret': o.secret
82 })},
83 self.request.event, locale=o.locale, order=o)
84 o.log_action(
85 'pretix.plugins.sendmail.order.email.sent',
86 user=self.request.user,
87 data={
88 'subject': form.cleaned_data['subject'],
89 'message': form.cleaned_data['message'],
90 'recipient': o.email
91 }
92 )
93 except SendMailException:
94 failures.append(o.email)
95 self.request.event.log_action('pretix.plugins.sendmail.sent',
96 user=self.request.user,
97 data=dict(form.cleaned_data))
98 if failures:
99 messages.error(self.request, _('Failed to send mails to the following users: {}'.format(' '.join(failures))))
100 else:
101 messages.success(self.request, _('Your message has been queued and will be sent to the selected users.'))
102
103 return redirect(
104 'plugins:sendmail:send',
105 event=self.request.event.slug,
106 organizer=self.request.event.organizer.slug
107 )
108
109 def get_context_data(self, *args, **kwargs):
110 ctx = super().get_context_data(*args, **kwargs)
111 ctx['output'] = getattr(self, 'output', None)
112 return ctx
113
114
115 class EmailHistoryView(EventPermissionRequiredMixin, ListView):
116 template_name = 'pretixplugins/sendmail/history.html'
117 permission = 'can_change_orders'
118 model = LogEntry
119 context_object_name = 'logs'
120 paginate_by = 5
121
122 def get_queryset(self):
123 qs = LogEntry.objects.filter(
124 event=self.request.event,
125 action_type='pretix.plugins.sendmail.sent'
126 )
127 return qs
128
129 def get_context_data(self, **kwargs):
130 ctx = super().get_context_data()
131
132 status = dict(Order.STATUS_CHOICE)
133 status['overdue'] = _('pending with payment overdue')
134 for log in ctx['logs']:
135 log.pdata = log.parsed_data
136 log.pdata['locales'] = {}
137 for locale, msg in log.pdata['message'].items():
138 log.pdata['locales'][locale] = {
139 'message': msg,
140 'subject': log.pdata['subject'][locale]
141 }
142 log.pdata['sendto'] = [
143 status[s] for s in log.pdata['sendto']
144 ]
145
146 return ctx
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pretix/plugins/sendmail/views.py b/src/pretix/plugins/sendmail/views.py
--- a/src/pretix/plugins/sendmail/views.py
+++ b/src/pretix/plugins/sendmail/views.py
@@ -4,13 +4,14 @@
import pytz
from django.contrib import messages
from django.db.models import Q
+from django.http import Http404
from django.shortcuts import redirect
from django.utils.formats import date_format
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView, ListView
-from pretix.base.i18n import language
+from pretix.base.i18n import LazyI18nString, language
from pretix.base.models import LogEntry, Order
from pretix.base.services.mail import SendMailException, mail
from pretix.control.permissions import EventPermissionRequiredMixin
@@ -29,6 +30,18 @@
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['event'] = self.request.event
+ if 'from_log' in self.request.GET:
+ try:
+ from_log_id = self.request.GET.get('from_log')
+ logentry = LogEntry.objects.get(
+ id=from_log_id,
+ event=self.request.event,
+ action_type='pretix.plugins.sendmail.sent'
+ )
+ message = LazyI18nString(logentry.parsed_data['message'])
+ kwargs['initial'] = {'message': message}
+ except LogEntry.DoesNotExist:
+ raise Http404(_('You supplied an invalid log entry ID'))
return kwargs
def form_invalid(self, form):
| {"golden_diff": "diff --git a/src/pretix/plugins/sendmail/views.py b/src/pretix/plugins/sendmail/views.py\n--- a/src/pretix/plugins/sendmail/views.py\n+++ b/src/pretix/plugins/sendmail/views.py\n@@ -4,13 +4,14 @@\n import pytz\n from django.contrib import messages\n from django.db.models import Q\n+from django.http import Http404\n from django.shortcuts import redirect\n from django.utils.formats import date_format\n from django.utils.timezone import now\n from django.utils.translation import ugettext_lazy as _\n from django.views.generic import FormView, ListView\n \n-from pretix.base.i18n import language\n+from pretix.base.i18n import LazyI18nString, language\n from pretix.base.models import LogEntry, Order\n from pretix.base.services.mail import SendMailException, mail\n from pretix.control.permissions import EventPermissionRequiredMixin\n@@ -29,6 +30,18 @@\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['event'] = self.request.event\n+ if 'from_log' in self.request.GET:\n+ try:\n+ from_log_id = self.request.GET.get('from_log')\n+ logentry = LogEntry.objects.get(\n+ id=from_log_id,\n+ event=self.request.event,\n+ action_type='pretix.plugins.sendmail.sent'\n+ )\n+ message = LazyI18nString(logentry.parsed_data['message'])\n+ kwargs['initial'] = {'message': message}\n+ except LogEntry.DoesNotExist:\n+ raise Http404(_('You supplied an invalid log entry ID'))\n return kwargs\n \n def form_invalid(self, form):\n", "issue": "Sendmail plugin: Create new mail based on an old one\n**Hey!** If you ever wanted to contribute to an Open Source project, but didn't know how, you just found a perfect place to start. We try to write this issue as instructive as possible, but as we're humans we almost certainly forgot to write something important in here. If you want to try to tackle this issue and get stuck at _any_ point, please do not hesitate and ask us, either here on GitHub or via email at [email protected]. We'll be very happy to help! \r\n\r\nWe think this issue is very suitable to people who are just starting with Python or Django development or who never contributed to Open Source before. We'd like to ask people with wide experience in these areas to leave this issue to beginners. Thanks! (If you want something more advanced, just drop us a line, we'll find something :wink:)\r\n\r\n## How to get started\r\n\r\nFirst of all, you need pretix running locally on your machine. We have a guide on the developer installation [over here](https://docs.pretix.eu/en/latest/development/setup.html). Play around with it and get a feeling for what pretix is and how it works :) Try to find the places in the interface mentioned in this issue where something needs to change. Maybe try to find the same places in the code? Just searching for characteristic words can be a powerful tool to find your way around.\r\n\r\n\r\n## What this is about\r\n\r\nIf you create an event and then (under Settings > Plugins) activate the Sendmail plugin, there will be a new navigation option allowing you to send emails to everyone who ordered a ticket. Thanks to @FlaviaBastos, there is also a navigation item showing you the history of all mails sent through this feature. It would be nice if this history would include a button that allows you to create a new mail *based* on the old one, i.e. pre-fill the sendmail form with the contents from that time.\r\n\r\n## How to do this\r\n\r\nFind a way to pass the data around to the form. I believe the best way is to create a link in ``pretix/plugins/sendmail/templates/pretixplugins/sendmail/history.html`` that includes the ID of the log entry in a query parameter to the sending view (defined in ``pretix/plugins/sendmail/views.py``). I suggest that you shortly read up on how our [log objects](https://docs.pretix.eu/en/latest/development/implementation/logging.html) work. You can then modify the ``get_form_kwargs`` method of ``SenderView`` to pass the correct [initial values](https://docs.djangoproject.com/en/1.10/ref/forms/api/#dynamic-initial-values) to the form.\r\n\r\n## I want to give it a try!\r\n\r\nYou're still reading, nice! If you want to try working on this, please comment below, so everyone else knows this is being looked at and can search for a different place to contribute. \r\n\r\nFirst, [fork](https://github.com/pretix/pretix/fork) the repository to your own GitHub account. Then clone it locally (or [set it as your remote](https://help.github.com/articles/configuring-a-remote-for-a-fork/) if you already cloned it earlier) and prepare your pull requests. There are a number of tutorials on the Pull Request workflow online, for example this [short version](https://guides.github.com/introduction/flow/) by GitHub itself and this [more in-depth tutorial](https://www.digitalocean.com/community/tutorials/how-to-create-a-pull-request-on-github). We're excited to see your pull request! \r\n\r\nAnd don't forget: If you get stuck, don't hestitate to ask. We bound ourselves to a [Code of Conduct](https://docs.pretix.eu/en/latest/development/contribution/codeofconduct.html) and we're committed to providing a welcoming experience and giving constructive, friendly and useful feedback on your contributions.\r\n\n", "before_files": [{"content": "import logging\nfrom datetime import timedelta\n\nimport pytz\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.shortcuts import redirect\nfrom django.utils.formats import date_format\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import FormView, ListView\n\nfrom pretix.base.i18n import language\nfrom pretix.base.models import LogEntry, Order\nfrom pretix.base.services.mail import SendMailException, mail\nfrom pretix.control.permissions import EventPermissionRequiredMixin\nfrom pretix.multidomain.urlreverse import build_absolute_uri\n\nfrom . import forms\n\nlogger = logging.getLogger('pretix.plugins.sendmail')\n\n\nclass SenderView(EventPermissionRequiredMixin, FormView):\n template_name = 'pretixplugins/sendmail/send_form.html'\n permission = 'can_change_orders'\n form_class = forms.MailForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['event'] = self.request.event\n return kwargs\n\n def form_invalid(self, form):\n messages.error(self.request, _('We could not send the email. See below for details.'))\n return super().form_invalid(form)\n\n def form_valid(self, form):\n qs = Order.objects.filter(event=self.request.event)\n statusq = Q(status__in=form.cleaned_data['sendto'])\n if 'overdue' in form.cleaned_data['sendto']:\n statusq |= Q(status=Order.STATUS_PENDING, expires__lt=now())\n orders = qs.filter(statusq)\n\n tz = pytz.timezone(self.request.event.settings.timezone)\n\n failures = []\n self.output = {}\n if not orders:\n messages.error(self.request, _('There are no orders matching this selection.'))\n return self.get(self.request, *self.args, **self.kwargs)\n for o in orders:\n if self.request.POST.get(\"action\") == \"preview\":\n for l in self.request.event.settings.locales:\n with language(l):\n self.output[l] = []\n self.output[l].append(_('Subject: {subject}').format(subject=form.cleaned_data['subject'].localize(l)))\n message = form.cleaned_data['message'].localize(l)\n preview_text = message.format(\n order='ORDER1234',\n event=self.request.event.name,\n order_date=date_format(now(), 'SHORT_DATE_FORMAT'),\n due_date=date_format(now() + timedelta(days=7), 'SHORT_DATE_FORMAT'),\n order_url=build_absolute_uri(self.request.event, 'presale:event.order', kwargs={\n 'order': 'ORDER1234',\n 'secret': 'longrandomsecretabcdef123456'\n }))\n self.output[l].append(preview_text)\n return self.get(self.request, *self.args, **self.kwargs)\n else:\n try:\n with language(o.locale):\n mail(o.email, form.cleaned_data['subject'], form.cleaned_data['message'],\n {\n 'event': o.event,\n 'order': o.code,\n 'order_date': date_format(o.datetime.astimezone(tz), 'SHORT_DATETIME_FORMAT'),\n 'due_date': date_format(o.expires, 'SHORT_DATE_FORMAT'),\n 'order_url': build_absolute_uri(o.event, 'presale:event.order', kwargs={\n 'order': o.code,\n 'secret': o.secret\n })},\n self.request.event, locale=o.locale, order=o)\n o.log_action(\n 'pretix.plugins.sendmail.order.email.sent',\n user=self.request.user,\n data={\n 'subject': form.cleaned_data['subject'],\n 'message': form.cleaned_data['message'],\n 'recipient': o.email\n }\n )\n except SendMailException:\n failures.append(o.email)\n self.request.event.log_action('pretix.plugins.sendmail.sent',\n user=self.request.user,\n data=dict(form.cleaned_data))\n if failures:\n messages.error(self.request, _('Failed to send mails to the following users: {}'.format(' '.join(failures))))\n else:\n messages.success(self.request, _('Your message has been queued and will be sent to the selected users.'))\n\n return redirect(\n 'plugins:sendmail:send',\n event=self.request.event.slug,\n organizer=self.request.event.organizer.slug\n )\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['output'] = getattr(self, 'output', None)\n return ctx\n\n\nclass EmailHistoryView(EventPermissionRequiredMixin, ListView):\n template_name = 'pretixplugins/sendmail/history.html'\n permission = 'can_change_orders'\n model = LogEntry\n context_object_name = 'logs'\n paginate_by = 5\n\n def get_queryset(self):\n qs = LogEntry.objects.filter(\n event=self.request.event,\n action_type='pretix.plugins.sendmail.sent'\n )\n return qs\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n status = dict(Order.STATUS_CHOICE)\n status['overdue'] = _('pending with payment overdue')\n for log in ctx['logs']:\n log.pdata = log.parsed_data\n log.pdata['locales'] = {}\n for locale, msg in log.pdata['message'].items():\n log.pdata['locales'][locale] = {\n 'message': msg,\n 'subject': log.pdata['subject'][locale]\n }\n log.pdata['sendto'] = [\n status[s] for s in log.pdata['sendto']\n ]\n\n return ctx\n", "path": "src/pretix/plugins/sendmail/views.py"}], "after_files": [{"content": "import logging\nfrom datetime import timedelta\n\nimport pytz\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.utils.formats import date_format\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import FormView, ListView\n\nfrom pretix.base.i18n import LazyI18nString, language\nfrom pretix.base.models import LogEntry, Order\nfrom pretix.base.services.mail import SendMailException, mail\nfrom pretix.control.permissions import EventPermissionRequiredMixin\nfrom pretix.multidomain.urlreverse import build_absolute_uri\n\nfrom . import forms\n\nlogger = logging.getLogger('pretix.plugins.sendmail')\n\n\nclass SenderView(EventPermissionRequiredMixin, FormView):\n template_name = 'pretixplugins/sendmail/send_form.html'\n permission = 'can_change_orders'\n form_class = forms.MailForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['event'] = self.request.event\n if 'from_log' in self.request.GET:\n try:\n from_log_id = self.request.GET.get('from_log')\n logentry = LogEntry.objects.get(\n id=from_log_id,\n event=self.request.event,\n action_type='pretix.plugins.sendmail.sent'\n )\n message = LazyI18nString(logentry.parsed_data['message'])\n kwargs['initial'] = {'message': message}\n except LogEntry.DoesNotExist:\n raise Http404(_('You supplied an invalid log entry ID'))\n return kwargs\n\n def form_invalid(self, form):\n messages.error(self.request, _('We could not send the email. See below for details.'))\n return super().form_invalid(form)\n\n def form_valid(self, form):\n qs = Order.objects.filter(event=self.request.event)\n statusq = Q(status__in=form.cleaned_data['sendto'])\n if 'overdue' in form.cleaned_data['sendto']:\n statusq |= Q(status=Order.STATUS_PENDING, expires__lt=now())\n orders = qs.filter(statusq)\n\n tz = pytz.timezone(self.request.event.settings.timezone)\n\n failures = []\n self.output = {}\n if not orders:\n messages.error(self.request, _('There are no orders matching this selection.'))\n return self.get(self.request, *self.args, **self.kwargs)\n for o in orders:\n if self.request.POST.get(\"action\") == \"preview\":\n for l in self.request.event.settings.locales:\n with language(l):\n self.output[l] = []\n self.output[l].append(_('Subject: {subject}').format(subject=form.cleaned_data['subject'].localize(l)))\n message = form.cleaned_data['message'].localize(l)\n preview_text = message.format(\n order='ORDER1234',\n event=self.request.event.name,\n order_date=date_format(now(), 'SHORT_DATE_FORMAT'),\n due_date=date_format(now() + timedelta(days=7), 'SHORT_DATE_FORMAT'),\n order_url=build_absolute_uri(self.request.event, 'presale:event.order', kwargs={\n 'order': 'ORDER1234',\n 'secret': 'longrandomsecretabcdef123456'\n }))\n self.output[l].append(preview_text)\n return self.get(self.request, *self.args, **self.kwargs)\n else:\n try:\n with language(o.locale):\n mail(o.email, form.cleaned_data['subject'], form.cleaned_data['message'],\n {\n 'event': o.event,\n 'order': o.code,\n 'order_date': date_format(o.datetime.astimezone(tz), 'SHORT_DATETIME_FORMAT'),\n 'due_date': date_format(o.expires, 'SHORT_DATE_FORMAT'),\n 'order_url': build_absolute_uri(o.event, 'presale:event.order', kwargs={\n 'order': o.code,\n 'secret': o.secret\n })},\n self.request.event, locale=o.locale, order=o)\n o.log_action(\n 'pretix.plugins.sendmail.order.email.sent',\n user=self.request.user,\n data={\n 'subject': form.cleaned_data['subject'],\n 'message': form.cleaned_data['message'],\n 'recipient': o.email\n }\n )\n except SendMailException:\n failures.append(o.email)\n self.request.event.log_action('pretix.plugins.sendmail.sent',\n user=self.request.user,\n data=dict(form.cleaned_data))\n if failures:\n messages.error(self.request, _('Failed to send mails to the following users: {}'.format(' '.join(failures))))\n else:\n messages.success(self.request, _('Your message has been queued and will be sent to the selected users.'))\n\n return redirect(\n 'plugins:sendmail:send',\n event=self.request.event.slug,\n organizer=self.request.event.organizer.slug\n )\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['output'] = getattr(self, 'output', None)\n return ctx\n\n\nclass EmailHistoryView(EventPermissionRequiredMixin, ListView):\n template_name = 'pretixplugins/sendmail/history.html'\n permission = 'can_change_orders'\n model = LogEntry\n context_object_name = 'logs'\n paginate_by = 5\n\n def get_queryset(self):\n qs = LogEntry.objects.filter(\n event=self.request.event,\n action_type='pretix.plugins.sendmail.sent'\n )\n return qs\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n status = dict(Order.STATUS_CHOICE)\n status['overdue'] = _('pending with payment overdue')\n for log in ctx['logs']:\n log.pdata = log.parsed_data\n log.pdata['locales'] = {}\n for locale, msg in log.pdata['message'].items():\n log.pdata['locales'][locale] = {\n 'message': msg,\n 'subject': log.pdata['subject'][locale]\n }\n log.pdata['sendto'] = [\n status[s] for s in log.pdata['sendto']\n ]\n\n return ctx\n", "path": "src/pretix/plugins/sendmail/views.py"}]} | 2,612 | 367 |
gh_patches_debug_34104 | rasdani/github-patches | git_diff | pallets__werkzeug-2777 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Elapsed and timestamp values are not available to filename_format function
When the `ProfilerMiddleware` `filename_format` argument is a callable, the callable will only receive the WSGI environ variable. However, the default format string can use the `elapsed` and `time` values. This means that one using a format function cannot recreate a similar output pattern. In my case, I'd like to use both, but format the timestamp differently.
Example: I'd like a filename of the form `2023-08-21:14:05:05.POST.myurl.13ms.prof`. I can get the timestamp by doing my own `datetime.now()` call, but the elapsed time is not available.
This problem is solvable by subclassing `ProfilerMiddleware` or writing ones own profiling middleware, but this seems like a useful thing to have in the core library so library users don't need to duplicate the `__call__()` code in their own projects.
I can submit a PR if this change is welcome.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/werkzeug/middleware/profiler.py`
Content:
```
1 """
2 Application Profiler
3 ====================
4
5 This module provides a middleware that profiles each request with the
6 :mod:`cProfile` module. This can help identify bottlenecks in your code
7 that may be slowing down your application.
8
9 .. autoclass:: ProfilerMiddleware
10
11 :copyright: 2007 Pallets
12 :license: BSD-3-Clause
13 """
14 from __future__ import annotations
15
16 import os.path
17 import sys
18 import time
19 import typing as t
20 from pstats import Stats
21
22 try:
23 from cProfile import Profile
24 except ImportError:
25 from profile import Profile # type: ignore
26
27 if t.TYPE_CHECKING:
28 from _typeshed.wsgi import StartResponse
29 from _typeshed.wsgi import WSGIApplication
30 from _typeshed.wsgi import WSGIEnvironment
31
32
33 class ProfilerMiddleware:
34 """Wrap a WSGI application and profile the execution of each
35 request. Responses are buffered so that timings are more exact.
36
37 If ``stream`` is given, :class:`pstats.Stats` are written to it
38 after each request. If ``profile_dir`` is given, :mod:`cProfile`
39 data files are saved to that directory, one file per request.
40
41 The filename can be customized by passing ``filename_format``. If
42 it is a string, it will be formatted using :meth:`str.format` with
43 the following fields available:
44
45 - ``{method}`` - The request method; GET, POST, etc.
46 - ``{path}`` - The request path or 'root' should one not exist.
47 - ``{elapsed}`` - The elapsed time of the request.
48 - ``{time}`` - The time of the request.
49
50 If it is a callable, it will be called with the WSGI ``environ``
51 dict and should return a filename.
52
53 :param app: The WSGI application to wrap.
54 :param stream: Write stats to this stream. Disable with ``None``.
55 :param sort_by: A tuple of columns to sort stats by. See
56 :meth:`pstats.Stats.sort_stats`.
57 :param restrictions: A tuple of restrictions to filter stats by. See
58 :meth:`pstats.Stats.print_stats`.
59 :param profile_dir: Save profile data files to this directory.
60 :param filename_format: Format string for profile data file names,
61 or a callable returning a name. See explanation above.
62
63 .. code-block:: python
64
65 from werkzeug.middleware.profiler import ProfilerMiddleware
66 app = ProfilerMiddleware(app)
67
68 .. versionchanged:: 0.15
69 Stats are written even if ``profile_dir`` is given, and can be
70 disable by passing ``stream=None``.
71
72 .. versionadded:: 0.15
73 Added ``filename_format``.
74
75 .. versionadded:: 0.9
76 Added ``restrictions`` and ``profile_dir``.
77 """
78
79 def __init__(
80 self,
81 app: WSGIApplication,
82 stream: t.IO[str] | None = sys.stdout,
83 sort_by: t.Iterable[str] = ("time", "calls"),
84 restrictions: t.Iterable[str | int | float] = (),
85 profile_dir: str | None = None,
86 filename_format: str = "{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof",
87 ) -> None:
88 self._app = app
89 self._stream = stream
90 self._sort_by = sort_by
91 self._restrictions = restrictions
92 self._profile_dir = profile_dir
93 self._filename_format = filename_format
94
95 def __call__(
96 self, environ: WSGIEnvironment, start_response: StartResponse
97 ) -> t.Iterable[bytes]:
98 response_body: list[bytes] = []
99
100 def catching_start_response(status, headers, exc_info=None): # type: ignore
101 start_response(status, headers, exc_info)
102 return response_body.append
103
104 def runapp() -> None:
105 app_iter = self._app(
106 environ, t.cast("StartResponse", catching_start_response)
107 )
108 response_body.extend(app_iter)
109
110 if hasattr(app_iter, "close"):
111 app_iter.close()
112
113 profile = Profile()
114 start = time.time()
115 profile.runcall(runapp)
116 body = b"".join(response_body)
117 elapsed = time.time() - start
118
119 if self._profile_dir is not None:
120 if callable(self._filename_format):
121 filename = self._filename_format(environ)
122 else:
123 filename = self._filename_format.format(
124 method=environ["REQUEST_METHOD"],
125 path=environ["PATH_INFO"].strip("/").replace("/", ".") or "root",
126 elapsed=elapsed * 1000.0,
127 time=time.time(),
128 )
129 filename = os.path.join(self._profile_dir, filename)
130 profile.dump_stats(filename)
131
132 if self._stream is not None:
133 stats = Stats(profile, stream=self._stream)
134 stats.sort_stats(*self._sort_by)
135 print("-" * 80, file=self._stream)
136 path_info = environ.get("PATH_INFO", "")
137 print(f"PATH: {path_info!r}", file=self._stream)
138 stats.print_stats(*self._restrictions)
139 print(f"{'-' * 80}\n", file=self._stream)
140
141 return [body]
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/werkzeug/middleware/profiler.py b/src/werkzeug/middleware/profiler.py
--- a/src/werkzeug/middleware/profiler.py
+++ b/src/werkzeug/middleware/profiler.py
@@ -44,11 +44,16 @@
- ``{method}`` - The request method; GET, POST, etc.
- ``{path}`` - The request path or 'root' should one not exist.
- - ``{elapsed}`` - The elapsed time of the request.
+ - ``{elapsed}`` - The elapsed time of the request in milliseconds.
- ``{time}`` - The time of the request.
- If it is a callable, it will be called with the WSGI ``environ``
- dict and should return a filename.
+ If it is a callable, it will be called with the WSGI ``environ`` and
+ be expected to return a filename string. The ``environ`` dictionary
+ will also have the ``"werkzeug.profiler"`` key populated with a
+ dictionary containing the following fields (more may be added in the
+ future):
+ - ``{elapsed}`` - The elapsed time of the request in milliseconds.
+ - ``{time}`` - The time of the request.
:param app: The WSGI application to wrap.
:param stream: Write stats to this stream. Disable with ``None``.
@@ -65,6 +70,10 @@
from werkzeug.middleware.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
+ .. versionchanged:: 3.0
+ Added the ``"werkzeug.profiler"`` key to the ``filename_format(environ)``
+ parameter with the ``elapsed`` and ``time`` fields.
+
.. versionchanged:: 0.15
Stats are written even if ``profile_dir`` is given, and can be
disable by passing ``stream=None``.
@@ -118,6 +127,10 @@
if self._profile_dir is not None:
if callable(self._filename_format):
+ environ["werkzeug.profiler"] = {
+ "elapsed": elapsed * 1000.0,
+ "time": time.time(),
+ }
filename = self._filename_format(environ)
else:
filename = self._filename_format.format(
| {"golden_diff": "diff --git a/src/werkzeug/middleware/profiler.py b/src/werkzeug/middleware/profiler.py\n--- a/src/werkzeug/middleware/profiler.py\n+++ b/src/werkzeug/middleware/profiler.py\n@@ -44,11 +44,16 @@\n \n - ``{method}`` - The request method; GET, POST, etc.\n - ``{path}`` - The request path or 'root' should one not exist.\n- - ``{elapsed}`` - The elapsed time of the request.\n+ - ``{elapsed}`` - The elapsed time of the request in milliseconds.\n - ``{time}`` - The time of the request.\n \n- If it is a callable, it will be called with the WSGI ``environ``\n- dict and should return a filename.\n+ If it is a callable, it will be called with the WSGI ``environ`` and\n+ be expected to return a filename string. The ``environ`` dictionary\n+ will also have the ``\"werkzeug.profiler\"`` key populated with a\n+ dictionary containing the following fields (more may be added in the\n+ future):\n+ - ``{elapsed}`` - The elapsed time of the request in milliseconds.\n+ - ``{time}`` - The time of the request.\n \n :param app: The WSGI application to wrap.\n :param stream: Write stats to this stream. Disable with ``None``.\n@@ -65,6 +70,10 @@\n from werkzeug.middleware.profiler import ProfilerMiddleware\n app = ProfilerMiddleware(app)\n \n+ .. versionchanged:: 3.0\n+ Added the ``\"werkzeug.profiler\"`` key to the ``filename_format(environ)``\n+ parameter with the ``elapsed`` and ``time`` fields.\n+\n .. versionchanged:: 0.15\n Stats are written even if ``profile_dir`` is given, and can be\n disable by passing ``stream=None``.\n@@ -118,6 +127,10 @@\n \n if self._profile_dir is not None:\n if callable(self._filename_format):\n+ environ[\"werkzeug.profiler\"] = {\n+ \"elapsed\": elapsed * 1000.0,\n+ \"time\": time.time(),\n+ }\n filename = self._filename_format(environ)\n else:\n filename = self._filename_format.format(\n", "issue": "Elapsed and timestamp values are not available to filename_format function\nWhen the `ProfilerMiddleware` `filename_format` argument is a callable, the callable will only receive the WSGI environ variable. However, the default format string can use the `elapsed` and `time` values. This means that one using a format function cannot recreate a similar output pattern. In my case, I'd like to use both, but format the timestamp differently.\r\n\r\n\r\nExample: I'd like a filename of the form `2023-08-21:14:05:05.POST.myurl.13ms.prof`. I can get the timestamp by doing my own `datetime.now()` call, but the elapsed time is not available.\r\n\r\nThis problem is solvable by subclassing `ProfilerMiddleware` or writing ones own profiling middleware, but this seems like a useful thing to have in the core library so library users don't need to duplicate the `__call__()` code in their own projects.\r\n\r\nI can submit a PR if this change is welcome.\r\n\n", "before_files": [{"content": "\"\"\"\nApplication Profiler\n====================\n\nThis module provides a middleware that profiles each request with the\n:mod:`cProfile` module. This can help identify bottlenecks in your code\nthat may be slowing down your application.\n\n.. autoclass:: ProfilerMiddleware\n\n:copyright: 2007 Pallets\n:license: BSD-3-Clause\n\"\"\"\nfrom __future__ import annotations\n\nimport os.path\nimport sys\nimport time\nimport typing as t\nfrom pstats import Stats\n\ntry:\n from cProfile import Profile\nexcept ImportError:\n from profile import Profile # type: ignore\n\nif t.TYPE_CHECKING:\n from _typeshed.wsgi import StartResponse\n from _typeshed.wsgi import WSGIApplication\n from _typeshed.wsgi import WSGIEnvironment\n\n\nclass ProfilerMiddleware:\n \"\"\"Wrap a WSGI application and profile the execution of each\n request. Responses are buffered so that timings are more exact.\n\n If ``stream`` is given, :class:`pstats.Stats` are written to it\n after each request. If ``profile_dir`` is given, :mod:`cProfile`\n data files are saved to that directory, one file per request.\n\n The filename can be customized by passing ``filename_format``. If\n it is a string, it will be formatted using :meth:`str.format` with\n the following fields available:\n\n - ``{method}`` - The request method; GET, POST, etc.\n - ``{path}`` - The request path or 'root' should one not exist.\n - ``{elapsed}`` - The elapsed time of the request.\n - ``{time}`` - The time of the request.\n\n If it is a callable, it will be called with the WSGI ``environ``\n dict and should return a filename.\n\n :param app: The WSGI application to wrap.\n :param stream: Write stats to this stream. Disable with ``None``.\n :param sort_by: A tuple of columns to sort stats by. See\n :meth:`pstats.Stats.sort_stats`.\n :param restrictions: A tuple of restrictions to filter stats by. See\n :meth:`pstats.Stats.print_stats`.\n :param profile_dir: Save profile data files to this directory.\n :param filename_format: Format string for profile data file names,\n or a callable returning a name. See explanation above.\n\n .. code-block:: python\n\n from werkzeug.middleware.profiler import ProfilerMiddleware\n app = ProfilerMiddleware(app)\n\n .. versionchanged:: 0.15\n Stats are written even if ``profile_dir`` is given, and can be\n disable by passing ``stream=None``.\n\n .. versionadded:: 0.15\n Added ``filename_format``.\n\n .. versionadded:: 0.9\n Added ``restrictions`` and ``profile_dir``.\n \"\"\"\n\n def __init__(\n self,\n app: WSGIApplication,\n stream: t.IO[str] | None = sys.stdout,\n sort_by: t.Iterable[str] = (\"time\", \"calls\"),\n restrictions: t.Iterable[str | int | float] = (),\n profile_dir: str | None = None,\n filename_format: str = \"{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof\",\n ) -> None:\n self._app = app\n self._stream = stream\n self._sort_by = sort_by\n self._restrictions = restrictions\n self._profile_dir = profile_dir\n self._filename_format = filename_format\n\n def __call__(\n self, environ: WSGIEnvironment, start_response: StartResponse\n ) -> t.Iterable[bytes]:\n response_body: list[bytes] = []\n\n def catching_start_response(status, headers, exc_info=None): # type: ignore\n start_response(status, headers, exc_info)\n return response_body.append\n\n def runapp() -> None:\n app_iter = self._app(\n environ, t.cast(\"StartResponse\", catching_start_response)\n )\n response_body.extend(app_iter)\n\n if hasattr(app_iter, \"close\"):\n app_iter.close()\n\n profile = Profile()\n start = time.time()\n profile.runcall(runapp)\n body = b\"\".join(response_body)\n elapsed = time.time() - start\n\n if self._profile_dir is not None:\n if callable(self._filename_format):\n filename = self._filename_format(environ)\n else:\n filename = self._filename_format.format(\n method=environ[\"REQUEST_METHOD\"],\n path=environ[\"PATH_INFO\"].strip(\"/\").replace(\"/\", \".\") or \"root\",\n elapsed=elapsed * 1000.0,\n time=time.time(),\n )\n filename = os.path.join(self._profile_dir, filename)\n profile.dump_stats(filename)\n\n if self._stream is not None:\n stats = Stats(profile, stream=self._stream)\n stats.sort_stats(*self._sort_by)\n print(\"-\" * 80, file=self._stream)\n path_info = environ.get(\"PATH_INFO\", \"\")\n print(f\"PATH: {path_info!r}\", file=self._stream)\n stats.print_stats(*self._restrictions)\n print(f\"{'-' * 80}\\n\", file=self._stream)\n\n return [body]\n", "path": "src/werkzeug/middleware/profiler.py"}], "after_files": [{"content": "\"\"\"\nApplication Profiler\n====================\n\nThis module provides a middleware that profiles each request with the\n:mod:`cProfile` module. This can help identify bottlenecks in your code\nthat may be slowing down your application.\n\n.. autoclass:: ProfilerMiddleware\n\n:copyright: 2007 Pallets\n:license: BSD-3-Clause\n\"\"\"\nfrom __future__ import annotations\n\nimport os.path\nimport sys\nimport time\nimport typing as t\nfrom pstats import Stats\n\ntry:\n from cProfile import Profile\nexcept ImportError:\n from profile import Profile # type: ignore\n\nif t.TYPE_CHECKING:\n from _typeshed.wsgi import StartResponse\n from _typeshed.wsgi import WSGIApplication\n from _typeshed.wsgi import WSGIEnvironment\n\n\nclass ProfilerMiddleware:\n \"\"\"Wrap a WSGI application and profile the execution of each\n request. Responses are buffered so that timings are more exact.\n\n If ``stream`` is given, :class:`pstats.Stats` are written to it\n after each request. If ``profile_dir`` is given, :mod:`cProfile`\n data files are saved to that directory, one file per request.\n\n The filename can be customized by passing ``filename_format``. If\n it is a string, it will be formatted using :meth:`str.format` with\n the following fields available:\n\n - ``{method}`` - The request method; GET, POST, etc.\n - ``{path}`` - The request path or 'root' should one not exist.\n - ``{elapsed}`` - The elapsed time of the request in milliseconds.\n - ``{time}`` - The time of the request.\n\n If it is a callable, it will be called with the WSGI ``environ`` and\n be expected to return a filename string. The ``environ`` dictionary\n will also have the ``\"werkzeug.profiler\"`` key populated with a\n dictionary containing the following fields (more may be added in the\n future):\n - ``{elapsed}`` - The elapsed time of the request in milliseconds.\n - ``{time}`` - The time of the request.\n\n :param app: The WSGI application to wrap.\n :param stream: Write stats to this stream. Disable with ``None``.\n :param sort_by: A tuple of columns to sort stats by. See\n :meth:`pstats.Stats.sort_stats`.\n :param restrictions: A tuple of restrictions to filter stats by. See\n :meth:`pstats.Stats.print_stats`.\n :param profile_dir: Save profile data files to this directory.\n :param filename_format: Format string for profile data file names,\n or a callable returning a name. See explanation above.\n\n .. code-block:: python\n\n from werkzeug.middleware.profiler import ProfilerMiddleware\n app = ProfilerMiddleware(app)\n\n .. versionchanged:: 3.0\n Added the ``\"werkzeug.profiler\"`` key to the ``filename_format(environ)``\n parameter with the ``elapsed`` and ``time`` fields.\n\n .. versionchanged:: 0.15\n Stats are written even if ``profile_dir`` is given, and can be\n disable by passing ``stream=None``.\n\n .. versionadded:: 0.15\n Added ``filename_format``.\n\n .. versionadded:: 0.9\n Added ``restrictions`` and ``profile_dir``.\n \"\"\"\n\n def __init__(\n self,\n app: WSGIApplication,\n stream: t.IO[str] | None = sys.stdout,\n sort_by: t.Iterable[str] = (\"time\", \"calls\"),\n restrictions: t.Iterable[str | int | float] = (),\n profile_dir: str | None = None,\n filename_format: str = \"{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof\",\n ) -> None:\n self._app = app\n self._stream = stream\n self._sort_by = sort_by\n self._restrictions = restrictions\n self._profile_dir = profile_dir\n self._filename_format = filename_format\n\n def __call__(\n self, environ: WSGIEnvironment, start_response: StartResponse\n ) -> t.Iterable[bytes]:\n response_body: list[bytes] = []\n\n def catching_start_response(status, headers, exc_info=None): # type: ignore\n start_response(status, headers, exc_info)\n return response_body.append\n\n def runapp() -> None:\n app_iter = self._app(\n environ, t.cast(\"StartResponse\", catching_start_response)\n )\n response_body.extend(app_iter)\n\n if hasattr(app_iter, \"close\"):\n app_iter.close()\n\n profile = Profile()\n start = time.time()\n profile.runcall(runapp)\n body = b\"\".join(response_body)\n elapsed = time.time() - start\n\n if self._profile_dir is not None:\n if callable(self._filename_format):\n environ[\"werkzeug.profiler\"] = {\n \"elapsed\": elapsed * 1000.0,\n \"time\": time.time(),\n }\n filename = self._filename_format(environ)\n else:\n filename = self._filename_format.format(\n method=environ[\"REQUEST_METHOD\"],\n path=environ[\"PATH_INFO\"].strip(\"/\").replace(\"/\", \".\") or \"root\",\n elapsed=elapsed * 1000.0,\n time=time.time(),\n )\n filename = os.path.join(self._profile_dir, filename)\n profile.dump_stats(filename)\n\n if self._stream is not None:\n stats = Stats(profile, stream=self._stream)\n stats.sort_stats(*self._sort_by)\n print(\"-\" * 80, file=self._stream)\n path_info = environ.get(\"PATH_INFO\", \"\")\n print(f\"PATH: {path_info!r}\", file=self._stream)\n stats.print_stats(*self._restrictions)\n print(f\"{'-' * 80}\\n\", file=self._stream)\n\n return [body]\n", "path": "src/werkzeug/middleware/profiler.py"}]} | 1,974 | 542 |
gh_patches_debug_16133 | rasdani/github-patches | git_diff | pyscript__pyscript-1017 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Antigravity example is broken after stdout/display changes
### Checklist
- [X] I added a descriptive title
- [X] I searched for other issues and couldn't find a solution or duplication
- [X] I already searched in Google and didn't find any good information or help
### What happened?
After the changes related to stdout/err and display, the antigravity example is broken. It currently fails with the following error:
```
Traceback (most recent call last):
File "/lib/python3.10/site-packages/_pyodide/_base.py", line 435, in eval_code
.run(globals, locals)
File "/lib/python3.10/site-packages/_pyodide/_base.py", line 304, in run
coroutine = eval(self.code, globals, locals)
File "<exec>", line 1, in <module>
File "/home/pyodide/antigravity.py", line 44, in <module>
_auto = Antigravity(append=True)
File "/home/pyodide/antigravity.py", line 14, in __init__
target = target or sys.stdout._out
AttributeError: '_io.TextIOWrapper' object has no attribute '_out'
```
This is due to the fact that we are not custom changing `sys.stdout` the same way.
It seems like the best option here would be to implement the right `_repr_` method but would love to hear @philippjfr on this
**NOTE: ** goes without saying that we should add a test as well :)
### What browsers are you seeing the problem on? (if applicable)
_No response_
### Console info
_No response_
### Additional Context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/antigravity.py`
Content:
```
1 import random
2 import sys
3
4 from js import DOMParser, document, setInterval
5 from pyodide.ffi import create_proxy
6 from pyodide.http import open_url
7
8
9 class Antigravity:
10
11 url = "./antigravity.svg"
12
13 def __init__(self, target=None, interval=10, append=True, fly=False):
14 target = target or sys.stdout._out
15 self.target = (
16 document.getElementById(target) if isinstance(target, str) else target
17 )
18 doc = DOMParser.new().parseFromString(
19 open_url(self.url).read(), "image/svg+xml"
20 )
21 self.node = doc.documentElement
22 if append:
23 self.target.append(self.node)
24 else:
25 self.target.replaceChildren(self.node)
26 self.xoffset, self.yoffset = 0, 0
27 self.interval = interval
28 if fly:
29 self.fly()
30
31 def fly(self):
32 setInterval(create_proxy(self.move), self.interval)
33
34 def move(self):
35 char = self.node.getElementsByTagName("g")[1]
36 char.setAttribute("transform", f"translate({self.xoffset}, {-self.yoffset})")
37 self.xoffset += random.normalvariate(0, 1) / 20
38 if self.yoffset < 50:
39 self.yoffset += 0.1
40 else:
41 self.yoffset += random.normalvariate(0, 1) / 20
42
43
44 _auto = Antigravity(append=True)
45 fly = _auto.fly
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/antigravity.py b/examples/antigravity.py
--- a/examples/antigravity.py
+++ b/examples/antigravity.py
@@ -1,5 +1,4 @@
import random
-import sys
from js import DOMParser, document, setInterval
from pyodide.ffi import create_proxy
@@ -11,9 +10,10 @@
url = "./antigravity.svg"
def __init__(self, target=None, interval=10, append=True, fly=False):
- target = target or sys.stdout._out
self.target = (
- document.getElementById(target) if isinstance(target, str) else target
+ document.getElementById(target)
+ if isinstance(target, str)
+ else document.body
)
doc = DOMParser.new().parseFromString(
open_url(self.url).read(), "image/svg+xml"
| {"golden_diff": "diff --git a/examples/antigravity.py b/examples/antigravity.py\n--- a/examples/antigravity.py\n+++ b/examples/antigravity.py\n@@ -1,5 +1,4 @@\n import random\n-import sys\n \n from js import DOMParser, document, setInterval\n from pyodide.ffi import create_proxy\n@@ -11,9 +10,10 @@\n url = \"./antigravity.svg\"\n \n def __init__(self, target=None, interval=10, append=True, fly=False):\n- target = target or sys.stdout._out\n self.target = (\n- document.getElementById(target) if isinstance(target, str) else target\n+ document.getElementById(target)\n+ if isinstance(target, str)\n+ else document.body\n )\n doc = DOMParser.new().parseFromString(\n open_url(self.url).read(), \"image/svg+xml\"\n", "issue": "Antigravity example is broken after stdout/display changes\n### Checklist\n\n- [X] I added a descriptive title\n- [X] I searched for other issues and couldn't find a solution or duplication\n- [X] I already searched in Google and didn't find any good information or help\n\n### What happened?\n\nAfter the changes related to stdout/err and display, the antigravity example is broken. It currently fails with the following error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/lib/python3.10/site-packages/_pyodide/_base.py\", line 435, in eval_code\r\n .run(globals, locals)\r\n File \"/lib/python3.10/site-packages/_pyodide/_base.py\", line 304, in run\r\n coroutine = eval(self.code, globals, locals)\r\n File \"<exec>\", line 1, in <module>\r\n File \"/home/pyodide/antigravity.py\", line 44, in <module>\r\n _auto = Antigravity(append=True)\r\n File \"/home/pyodide/antigravity.py\", line 14, in __init__\r\n target = target or sys.stdout._out\r\nAttributeError: '_io.TextIOWrapper' object has no attribute '_out'\r\n```\r\n\r\nThis is due to the fact that we are not custom changing `sys.stdout` the same way. \r\n\r\nIt seems like the best option here would be to implement the right `_repr_` method but would love to hear @philippjfr on this\r\n\r\n**NOTE: ** goes without saying that we should add a test as well :)\n\n### What browsers are you seeing the problem on? (if applicable)\n\n_No response_\n\n### Console info\n\n_No response_\n\n### Additional Context\n\n_No response_\n", "before_files": [{"content": "import random\nimport sys\n\nfrom js import DOMParser, document, setInterval\nfrom pyodide.ffi import create_proxy\nfrom pyodide.http import open_url\n\n\nclass Antigravity:\n\n url = \"./antigravity.svg\"\n\n def __init__(self, target=None, interval=10, append=True, fly=False):\n target = target or sys.stdout._out\n self.target = (\n document.getElementById(target) if isinstance(target, str) else target\n )\n doc = DOMParser.new().parseFromString(\n open_url(self.url).read(), \"image/svg+xml\"\n )\n self.node = doc.documentElement\n if append:\n self.target.append(self.node)\n else:\n self.target.replaceChildren(self.node)\n self.xoffset, self.yoffset = 0, 0\n self.interval = interval\n if fly:\n self.fly()\n\n def fly(self):\n setInterval(create_proxy(self.move), self.interval)\n\n def move(self):\n char = self.node.getElementsByTagName(\"g\")[1]\n char.setAttribute(\"transform\", f\"translate({self.xoffset}, {-self.yoffset})\")\n self.xoffset += random.normalvariate(0, 1) / 20\n if self.yoffset < 50:\n self.yoffset += 0.1\n else:\n self.yoffset += random.normalvariate(0, 1) / 20\n\n\n_auto = Antigravity(append=True)\nfly = _auto.fly\n", "path": "examples/antigravity.py"}], "after_files": [{"content": "import random\n\nfrom js import DOMParser, document, setInterval\nfrom pyodide.ffi import create_proxy\nfrom pyodide.http import open_url\n\n\nclass Antigravity:\n\n url = \"./antigravity.svg\"\n\n def __init__(self, target=None, interval=10, append=True, fly=False):\n self.target = (\n document.getElementById(target)\n if isinstance(target, str)\n else document.body\n )\n doc = DOMParser.new().parseFromString(\n open_url(self.url).read(), \"image/svg+xml\"\n )\n self.node = doc.documentElement\n if append:\n self.target.append(self.node)\n else:\n self.target.replaceChildren(self.node)\n self.xoffset, self.yoffset = 0, 0\n self.interval = interval\n if fly:\n self.fly()\n\n def fly(self):\n setInterval(create_proxy(self.move), self.interval)\n\n def move(self):\n char = self.node.getElementsByTagName(\"g\")[1]\n char.setAttribute(\"transform\", f\"translate({self.xoffset}, {-self.yoffset})\")\n self.xoffset += random.normalvariate(0, 1) / 20\n if self.yoffset < 50:\n self.yoffset += 0.1\n else:\n self.yoffset += random.normalvariate(0, 1) / 20\n\n\n_auto = Antigravity(append=True)\nfly = _auto.fly\n", "path": "examples/antigravity.py"}]} | 1,042 | 192 |
gh_patches_debug_35814 | rasdani/github-patches | git_diff | dask__dask-4187 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
read_sql_table gives AttributeError when reading a database
When I try to read from a sql database:
`df_database = create_engine('sqlite:///' + database_path)`
`df = dd.read_sql_table('table', df_database, 'id_driver')`
I get this error: `AttributeError: 'Engine' object has no attribute '_instantiate_plugins'`
If I use the pandas `read_sql_query` function on the same db, it works just fine.
Here's a full trace:
> AttributeError Traceback (most recent call last)
> ipython-input-10-2da58b7e1773 in <module>()
> 3 t0 = time.time()
> ----> 4 df = dd.read_sql_table('table', df_database, 'id_driver')
> 5 t1 = time.time()
> 6
>
> ~/lib/python3.6/site-packages/dask/dataframe/io/sql.py in read_sql_table(table, uri, index_col, divisions, npartitions, limits, columns, bytes_per_chunk, **kwargs)
> 66 if index_col is None:
> 67 raise ValueError("Must specify index column to partition on")
> ---> 68 engine = sa.create_engine(uri)
> 69 meta = sa.MetaData()
> 70 if isinstance(table, six.string_types):
>
> ~/lib/python3.6/site-packages/sqlalchemy/engine/__init__.py in create_engine(*args, **kwargs)
> 389 strategy = kwargs.pop('strategy', default_strategy)
> 390 strategy = strategies.strategies[strategy]
> --> 391 return strategy.create(*args, **kwargs)
> 392
> 393
>
> ~/lib/python3.6/site-packages/sqlalchemy/engine/strategies.py in create(self, name_or_url, **kwargs)
> 50 u = url.make_url(name_or_url)
> 51
> ---> 52 plugins = u._instantiate_plugins(kwargs)
> 53
> 54 u.query.pop('plugin', None)
>
> AttributeError: 'Engine' object has no attribute '_instantiate_plugins'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dask/dataframe/io/sql.py`
Content:
```
1 import numpy as np
2 import pandas as pd
3
4 from ... import delayed
5 from ...compatibility import string_types
6 from .io import from_delayed, from_pandas
7
8
9 def read_sql_table(table, uri, index_col, divisions=None, npartitions=None,
10 limits=None, columns=None, bytes_per_chunk=256 * 2**20,
11 head_rows=5, schema=None, meta=None, **kwargs):
12 """
13 Create dataframe from an SQL table.
14
15 If neither divisions or npartitions is given, the memory footprint of the
16 first few rows will be determined, and partitions of size ~256MB will
17 be used.
18
19 Parameters
20 ----------
21 table : string or sqlalchemy expression
22 Select columns from here.
23 uri : string
24 Full sqlalchemy URI for the database connection
25 index_col : string
26 Column which becomes the index, and defines the partitioning. Should
27 be a indexed column in the SQL server, and any orderable type. If the
28 type is number or time, then partition boundaries can be inferred from
29 npartitions or bytes_per_chunk; otherwide must supply explicit
30 ``divisions=``.
31 ``index_col`` could be a function to return a value, e.g.,
32 ``sql.func.abs(sql.column('value')).label('abs(value)')``.
33 Labeling columns created by functions or arithmetic operations is
34 required.
35 divisions: sequence
36 Values of the index column to split the table by. If given, this will
37 override npartitions and bytes_per_chunk. The divisions are the value
38 boundaries of the index column used to define the partitions. For
39 example, ``divisions=list('acegikmoqsuwz')`` could be used to partition
40 a string column lexographically into 12 partitions, with the implicit
41 assumption that each partition contains similar numbers of records.
42 npartitions : int
43 Number of partitions, if divisions is not given. Will split the values
44 of the index column linearly between limits, if given, or the column
45 max/min. The index column must be numeric or time for this to work
46 limits: 2-tuple or None
47 Manually give upper and lower range of values for use with npartitions;
48 if None, first fetches max/min from the DB. Upper limit, if
49 given, is inclusive.
50 columns : list of strings or None
51 Which columns to select; if None, gets all; can include sqlalchemy
52 functions, e.g.,
53 ``sql.func.abs(sql.column('value')).label('abs(value)')``.
54 Labeling columns created by functions or arithmetic operations is
55 recommended.
56 bytes_per_chunk: int
57 If both divisions and npartitions is None, this is the target size of
58 each partition, in bytes
59 head_rows: int
60 How many rows to load for inferring the data-types, unless passing meta
61 meta: empty DataFrame or None
62 If provided, do not attempt to infer dtypes, but use these, coercing
63 all chunks on load
64 schema: str or None
65 If using a table name, pass this to sqlalchemy to select which DB
66 schema to use within the URI connection
67 kwargs : dict
68 Additional parameters to pass to `pd.read_sql()`
69
70 Returns
71 -------
72 dask.dataframe
73
74 Examples
75 --------
76 >>> df = dd.read_sql_table('accounts', 'sqlite:///path/to/bank.db',
77 ... npartitions=10, index_col='id') # doctest: +SKIP
78 """
79 import sqlalchemy as sa
80 from sqlalchemy import sql
81 from sqlalchemy.sql import elements
82 if index_col is None:
83 raise ValueError("Must specify index column to partition on")
84 engine = sa.create_engine(uri)
85 m = sa.MetaData()
86 if isinstance(table, string_types):
87 table = sa.Table(table, m, autoload=True, autoload_with=engine,
88 schema=schema)
89
90 index = (table.columns[index_col] if isinstance(index_col, string_types)
91 else index_col)
92 if not isinstance(index_col, string_types + (elements.Label,)):
93 raise ValueError('Use label when passing an SQLAlchemy instance'
94 ' as the index (%s)' % index)
95 if divisions and npartitions:
96 raise TypeError('Must supply either divisions or npartitions, not both')
97
98 columns = ([(table.columns[c] if isinstance(c, string_types) else c)
99 for c in columns]
100 if columns else list(table.columns))
101 if index_col not in columns:
102 columns.append(table.columns[index_col]
103 if isinstance(index_col, string_types)
104 else index_col)
105
106 if isinstance(index_col, string_types):
107 kwargs['index_col'] = index_col
108 else:
109 # function names get pandas auto-named
110 kwargs['index_col'] = index_col.name
111
112 if meta is None:
113 # derrive metadata from first few rows
114 q = sql.select(columns).limit(head_rows).select_from(table)
115 head = pd.read_sql(q, engine, **kwargs)
116
117 if head.empty:
118 # no results at all
119 name = table.name
120 head = pd.read_sql_table(name, uri, index_col=index_col)
121 return from_pandas(head, npartitions=1)
122
123 bytes_per_row = (head.memory_usage(deep=True, index=True)).sum() / 5
124 meta = head[:0]
125 else:
126 if divisions is None and npartitions is None:
127 raise ValueError('Must provide divisions or npartitions when'
128 'using explicit meta.')
129
130 if divisions is None:
131 if limits is None:
132 # calculate max and min for given index
133 q = sql.select([sql.func.max(index), sql.func.min(index)]
134 ).select_from(table)
135 minmax = pd.read_sql(q, engine)
136 maxi, mini = minmax.iloc[0]
137 dtype = minmax.dtypes['max_1']
138 else:
139 mini, maxi = limits
140 dtype = pd.Series(limits).dtype
141 if npartitions is None:
142 q = sql.select([sql.func.count(index)]).select_from(table)
143 count = pd.read_sql(q, engine)['count_1'][0]
144 npartitions = round(count * bytes_per_row / bytes_per_chunk) or 1
145 if dtype.kind == "M":
146 divisions = pd.date_range(
147 start=mini, end=maxi, freq='%iS' % (
148 (maxi - mini) / npartitions).total_seconds()).tolist()
149 divisions[0] = mini
150 divisions[-1] = maxi
151 else:
152 divisions = np.linspace(mini, maxi, npartitions + 1).tolist()
153
154 parts = []
155 lowers, uppers = divisions[:-1], divisions[1:]
156 for i, (lower, upper) in enumerate(zip(lowers, uppers)):
157 cond = index <= upper if i == len(lowers) - 1 else index < upper
158 q = sql.select(columns).where(sql.and_(index >= lower, cond)
159 ).select_from(table)
160 parts.append(delayed(_read_sql_chunk)(q, uri, meta, **kwargs))
161
162 return from_delayed(parts, meta, divisions=divisions)
163
164
165 def _read_sql_chunk(q, uri, meta, **kwargs):
166 df = pd.read_sql(q, uri, **kwargs)
167 if df.empty:
168 return meta
169 else:
170 return df.astype(meta.dtypes.to_dict(), copy=False)
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dask/dataframe/io/sql.py b/dask/dataframe/io/sql.py
--- a/dask/dataframe/io/sql.py
+++ b/dask/dataframe/io/sql.py
@@ -8,7 +8,7 @@
def read_sql_table(table, uri, index_col, divisions=None, npartitions=None,
limits=None, columns=None, bytes_per_chunk=256 * 2**20,
- head_rows=5, schema=None, meta=None, **kwargs):
+ head_rows=5, schema=None, meta=None, engine_kwargs=None, **kwargs):
"""
Create dataframe from an SQL table.
@@ -53,17 +53,19 @@
``sql.func.abs(sql.column('value')).label('abs(value)')``.
Labeling columns created by functions or arithmetic operations is
recommended.
- bytes_per_chunk: int
+ bytes_per_chunk : int
If both divisions and npartitions is None, this is the target size of
each partition, in bytes
- head_rows: int
+ head_rows : int
How many rows to load for inferring the data-types, unless passing meta
- meta: empty DataFrame or None
+ meta : empty DataFrame or None
If provided, do not attempt to infer dtypes, but use these, coercing
all chunks on load
- schema: str or None
+ schema : str or None
If using a table name, pass this to sqlalchemy to select which DB
schema to use within the URI connection
+ engine_kwargs : dict or None
+ Specific db engine parameters for sqlalchemy
kwargs : dict
Additional parameters to pass to `pd.read_sql()`
@@ -81,7 +83,8 @@
from sqlalchemy.sql import elements
if index_col is None:
raise ValueError("Must specify index column to partition on")
- engine = sa.create_engine(uri)
+ engine_kwargs = {} if engine_kwargs is None else engine_kwargs
+ engine = sa.create_engine(uri, **engine_kwargs)
m = sa.MetaData()
if isinstance(table, string_types):
table = sa.Table(table, m, autoload=True, autoload_with=engine,
| {"golden_diff": "diff --git a/dask/dataframe/io/sql.py b/dask/dataframe/io/sql.py\n--- a/dask/dataframe/io/sql.py\n+++ b/dask/dataframe/io/sql.py\n@@ -8,7 +8,7 @@\n \n def read_sql_table(table, uri, index_col, divisions=None, npartitions=None,\n limits=None, columns=None, bytes_per_chunk=256 * 2**20,\n- head_rows=5, schema=None, meta=None, **kwargs):\n+ head_rows=5, schema=None, meta=None, engine_kwargs=None, **kwargs):\n \"\"\"\n Create dataframe from an SQL table.\n \n@@ -53,17 +53,19 @@\n ``sql.func.abs(sql.column('value')).label('abs(value)')``.\n Labeling columns created by functions or arithmetic operations is\n recommended.\n- bytes_per_chunk: int\n+ bytes_per_chunk : int\n If both divisions and npartitions is None, this is the target size of\n each partition, in bytes\n- head_rows: int\n+ head_rows : int\n How many rows to load for inferring the data-types, unless passing meta\n- meta: empty DataFrame or None\n+ meta : empty DataFrame or None\n If provided, do not attempt to infer dtypes, but use these, coercing\n all chunks on load\n- schema: str or None\n+ schema : str or None\n If using a table name, pass this to sqlalchemy to select which DB\n schema to use within the URI connection\n+ engine_kwargs : dict or None\n+ Specific db engine parameters for sqlalchemy\n kwargs : dict\n Additional parameters to pass to `pd.read_sql()`\n \n@@ -81,7 +83,8 @@\n from sqlalchemy.sql import elements\n if index_col is None:\n raise ValueError(\"Must specify index column to partition on\")\n- engine = sa.create_engine(uri)\n+ engine_kwargs = {} if engine_kwargs is None else engine_kwargs\n+ engine = sa.create_engine(uri, **engine_kwargs)\n m = sa.MetaData()\n if isinstance(table, string_types):\n table = sa.Table(table, m, autoload=True, autoload_with=engine,\n", "issue": "read_sql_table gives AttributeError when reading a database\nWhen I try to read from a sql database:\r\n`df_database = create_engine('sqlite:///' + database_path)`\r\n`df = dd.read_sql_table('table', df_database, 'id_driver')`\r\n\r\nI get this error: `AttributeError: 'Engine' object has no attribute '_instantiate_plugins'`\r\n\r\nIf I use the pandas `read_sql_query` function on the same db, it works just fine.\r\n\r\nHere's a full trace:\r\n> AttributeError Traceback (most recent call last)\r\n> ipython-input-10-2da58b7e1773 in <module>()\r\n> 3 t0 = time.time()\r\n> ----> 4 df = dd.read_sql_table('table', df_database, 'id_driver')\r\n> 5 t1 = time.time()\r\n> 6 \r\n> \r\n> ~/lib/python3.6/site-packages/dask/dataframe/io/sql.py in read_sql_table(table, uri, index_col, divisions, npartitions, limits, columns, bytes_per_chunk, **kwargs)\r\n> 66 if index_col is None:\r\n> 67 raise ValueError(\"Must specify index column to partition on\")\r\n> ---> 68 engine = sa.create_engine(uri)\r\n> 69 meta = sa.MetaData()\r\n> 70 if isinstance(table, six.string_types):\r\n> \r\n> ~/lib/python3.6/site-packages/sqlalchemy/engine/__init__.py in create_engine(*args, **kwargs)\r\n> 389 strategy = kwargs.pop('strategy', default_strategy)\r\n> 390 strategy = strategies.strategies[strategy]\r\n> --> 391 return strategy.create(*args, **kwargs)\r\n> 392 \r\n> 393 \r\n> \r\n> ~/lib/python3.6/site-packages/sqlalchemy/engine/strategies.py in create(self, name_or_url, **kwargs)\r\n> 50 u = url.make_url(name_or_url)\r\n> 51 \r\n> ---> 52 plugins = u._instantiate_plugins(kwargs)\r\n> 53 \r\n> 54 u.query.pop('plugin', None)\r\n> \r\n> AttributeError: 'Engine' object has no attribute '_instantiate_plugins'\n", "before_files": [{"content": "import numpy as np\nimport pandas as pd\n\nfrom ... import delayed\nfrom ...compatibility import string_types\nfrom .io import from_delayed, from_pandas\n\n\ndef read_sql_table(table, uri, index_col, divisions=None, npartitions=None,\n limits=None, columns=None, bytes_per_chunk=256 * 2**20,\n head_rows=5, schema=None, meta=None, **kwargs):\n \"\"\"\n Create dataframe from an SQL table.\n\n If neither divisions or npartitions is given, the memory footprint of the\n first few rows will be determined, and partitions of size ~256MB will\n be used.\n\n Parameters\n ----------\n table : string or sqlalchemy expression\n Select columns from here.\n uri : string\n Full sqlalchemy URI for the database connection\n index_col : string\n Column which becomes the index, and defines the partitioning. Should\n be a indexed column in the SQL server, and any orderable type. If the\n type is number or time, then partition boundaries can be inferred from\n npartitions or bytes_per_chunk; otherwide must supply explicit\n ``divisions=``.\n ``index_col`` could be a function to return a value, e.g.,\n ``sql.func.abs(sql.column('value')).label('abs(value)')``.\n Labeling columns created by functions or arithmetic operations is\n required.\n divisions: sequence\n Values of the index column to split the table by. If given, this will\n override npartitions and bytes_per_chunk. The divisions are the value\n boundaries of the index column used to define the partitions. For\n example, ``divisions=list('acegikmoqsuwz')`` could be used to partition\n a string column lexographically into 12 partitions, with the implicit\n assumption that each partition contains similar numbers of records.\n npartitions : int\n Number of partitions, if divisions is not given. Will split the values\n of the index column linearly between limits, if given, or the column\n max/min. The index column must be numeric or time for this to work\n limits: 2-tuple or None\n Manually give upper and lower range of values for use with npartitions;\n if None, first fetches max/min from the DB. Upper limit, if\n given, is inclusive.\n columns : list of strings or None\n Which columns to select; if None, gets all; can include sqlalchemy\n functions, e.g.,\n ``sql.func.abs(sql.column('value')).label('abs(value)')``.\n Labeling columns created by functions or arithmetic operations is\n recommended.\n bytes_per_chunk: int\n If both divisions and npartitions is None, this is the target size of\n each partition, in bytes\n head_rows: int\n How many rows to load for inferring the data-types, unless passing meta\n meta: empty DataFrame or None\n If provided, do not attempt to infer dtypes, but use these, coercing\n all chunks on load\n schema: str or None\n If using a table name, pass this to sqlalchemy to select which DB\n schema to use within the URI connection\n kwargs : dict\n Additional parameters to pass to `pd.read_sql()`\n\n Returns\n -------\n dask.dataframe\n\n Examples\n --------\n >>> df = dd.read_sql_table('accounts', 'sqlite:///path/to/bank.db',\n ... npartitions=10, index_col='id') # doctest: +SKIP\n \"\"\"\n import sqlalchemy as sa\n from sqlalchemy import sql\n from sqlalchemy.sql import elements\n if index_col is None:\n raise ValueError(\"Must specify index column to partition on\")\n engine = sa.create_engine(uri)\n m = sa.MetaData()\n if isinstance(table, string_types):\n table = sa.Table(table, m, autoload=True, autoload_with=engine,\n schema=schema)\n\n index = (table.columns[index_col] if isinstance(index_col, string_types)\n else index_col)\n if not isinstance(index_col, string_types + (elements.Label,)):\n raise ValueError('Use label when passing an SQLAlchemy instance'\n ' as the index (%s)' % index)\n if divisions and npartitions:\n raise TypeError('Must supply either divisions or npartitions, not both')\n\n columns = ([(table.columns[c] if isinstance(c, string_types) else c)\n for c in columns]\n if columns else list(table.columns))\n if index_col not in columns:\n columns.append(table.columns[index_col]\n if isinstance(index_col, string_types)\n else index_col)\n\n if isinstance(index_col, string_types):\n kwargs['index_col'] = index_col\n else:\n # function names get pandas auto-named\n kwargs['index_col'] = index_col.name\n\n if meta is None:\n # derrive metadata from first few rows\n q = sql.select(columns).limit(head_rows).select_from(table)\n head = pd.read_sql(q, engine, **kwargs)\n\n if head.empty:\n # no results at all\n name = table.name\n head = pd.read_sql_table(name, uri, index_col=index_col)\n return from_pandas(head, npartitions=1)\n\n bytes_per_row = (head.memory_usage(deep=True, index=True)).sum() / 5\n meta = head[:0]\n else:\n if divisions is None and npartitions is None:\n raise ValueError('Must provide divisions or npartitions when'\n 'using explicit meta.')\n\n if divisions is None:\n if limits is None:\n # calculate max and min for given index\n q = sql.select([sql.func.max(index), sql.func.min(index)]\n ).select_from(table)\n minmax = pd.read_sql(q, engine)\n maxi, mini = minmax.iloc[0]\n dtype = minmax.dtypes['max_1']\n else:\n mini, maxi = limits\n dtype = pd.Series(limits).dtype\n if npartitions is None:\n q = sql.select([sql.func.count(index)]).select_from(table)\n count = pd.read_sql(q, engine)['count_1'][0]\n npartitions = round(count * bytes_per_row / bytes_per_chunk) or 1\n if dtype.kind == \"M\":\n divisions = pd.date_range(\n start=mini, end=maxi, freq='%iS' % (\n (maxi - mini) / npartitions).total_seconds()).tolist()\n divisions[0] = mini\n divisions[-1] = maxi\n else:\n divisions = np.linspace(mini, maxi, npartitions + 1).tolist()\n\n parts = []\n lowers, uppers = divisions[:-1], divisions[1:]\n for i, (lower, upper) in enumerate(zip(lowers, uppers)):\n cond = index <= upper if i == len(lowers) - 1 else index < upper\n q = sql.select(columns).where(sql.and_(index >= lower, cond)\n ).select_from(table)\n parts.append(delayed(_read_sql_chunk)(q, uri, meta, **kwargs))\n\n return from_delayed(parts, meta, divisions=divisions)\n\n\ndef _read_sql_chunk(q, uri, meta, **kwargs):\n df = pd.read_sql(q, uri, **kwargs)\n if df.empty:\n return meta\n else:\n return df.astype(meta.dtypes.to_dict(), copy=False)\n", "path": "dask/dataframe/io/sql.py"}], "after_files": [{"content": "import numpy as np\nimport pandas as pd\n\nfrom ... import delayed\nfrom ...compatibility import string_types\nfrom .io import from_delayed, from_pandas\n\n\ndef read_sql_table(table, uri, index_col, divisions=None, npartitions=None,\n limits=None, columns=None, bytes_per_chunk=256 * 2**20,\n head_rows=5, schema=None, meta=None, engine_kwargs=None, **kwargs):\n \"\"\"\n Create dataframe from an SQL table.\n\n If neither divisions or npartitions is given, the memory footprint of the\n first few rows will be determined, and partitions of size ~256MB will\n be used.\n\n Parameters\n ----------\n table : string or sqlalchemy expression\n Select columns from here.\n uri : string\n Full sqlalchemy URI for the database connection\n index_col : string\n Column which becomes the index, and defines the partitioning. Should\n be a indexed column in the SQL server, and any orderable type. If the\n type is number or time, then partition boundaries can be inferred from\n npartitions or bytes_per_chunk; otherwide must supply explicit\n ``divisions=``.\n ``index_col`` could be a function to return a value, e.g.,\n ``sql.func.abs(sql.column('value')).label('abs(value)')``.\n Labeling columns created by functions or arithmetic operations is\n required.\n divisions: sequence\n Values of the index column to split the table by. If given, this will\n override npartitions and bytes_per_chunk. The divisions are the value\n boundaries of the index column used to define the partitions. For\n example, ``divisions=list('acegikmoqsuwz')`` could be used to partition\n a string column lexographically into 12 partitions, with the implicit\n assumption that each partition contains similar numbers of records.\n npartitions : int\n Number of partitions, if divisions is not given. Will split the values\n of the index column linearly between limits, if given, or the column\n max/min. The index column must be numeric or time for this to work\n limits: 2-tuple or None\n Manually give upper and lower range of values for use with npartitions;\n if None, first fetches max/min from the DB. Upper limit, if\n given, is inclusive.\n columns : list of strings or None\n Which columns to select; if None, gets all; can include sqlalchemy\n functions, e.g.,\n ``sql.func.abs(sql.column('value')).label('abs(value)')``.\n Labeling columns created by functions or arithmetic operations is\n recommended.\n bytes_per_chunk : int\n If both divisions and npartitions is None, this is the target size of\n each partition, in bytes\n head_rows : int\n How many rows to load for inferring the data-types, unless passing meta\n meta : empty DataFrame or None\n If provided, do not attempt to infer dtypes, but use these, coercing\n all chunks on load\n schema : str or None\n If using a table name, pass this to sqlalchemy to select which DB\n schema to use within the URI connection\n engine_kwargs : dict or None\n Specific db engine parameters for sqlalchemy\n kwargs : dict\n Additional parameters to pass to `pd.read_sql()`\n\n Returns\n -------\n dask.dataframe\n\n Examples\n --------\n >>> df = dd.read_sql_table('accounts', 'sqlite:///path/to/bank.db',\n ... npartitions=10, index_col='id') # doctest: +SKIP\n \"\"\"\n import sqlalchemy as sa\n from sqlalchemy import sql\n from sqlalchemy.sql import elements\n if index_col is None:\n raise ValueError(\"Must specify index column to partition on\")\n engine_kwargs = {} if engine_kwargs is None else engine_kwargs\n engine = sa.create_engine(uri, **engine_kwargs)\n m = sa.MetaData()\n if isinstance(table, string_types):\n table = sa.Table(table, m, autoload=True, autoload_with=engine,\n schema=schema)\n\n index = (table.columns[index_col] if isinstance(index_col, string_types)\n else index_col)\n if not isinstance(index_col, string_types + (elements.Label,)):\n raise ValueError('Use label when passing an SQLAlchemy instance'\n ' as the index (%s)' % index)\n if divisions and npartitions:\n raise TypeError('Must supply either divisions or npartitions, not both')\n\n columns = ([(table.columns[c] if isinstance(c, string_types) else c)\n for c in columns]\n if columns else list(table.columns))\n if index_col not in columns:\n columns.append(table.columns[index_col]\n if isinstance(index_col, string_types)\n else index_col)\n\n if isinstance(index_col, string_types):\n kwargs['index_col'] = index_col\n else:\n # function names get pandas auto-named\n kwargs['index_col'] = index_col.name\n\n if meta is None:\n # derrive metadata from first few rows\n q = sql.select(columns).limit(head_rows).select_from(table)\n head = pd.read_sql(q, engine, **kwargs)\n\n if head.empty:\n # no results at all\n name = table.name\n head = pd.read_sql_table(name, uri, index_col=index_col)\n return from_pandas(head, npartitions=1)\n\n bytes_per_row = (head.memory_usage(deep=True, index=True)).sum() / 5\n meta = head[:0]\n else:\n if divisions is None and npartitions is None:\n raise ValueError('Must provide divisions or npartitions when'\n 'using explicit meta.')\n\n if divisions is None:\n if limits is None:\n # calculate max and min for given index\n q = sql.select([sql.func.max(index), sql.func.min(index)]\n ).select_from(table)\n minmax = pd.read_sql(q, engine)\n maxi, mini = minmax.iloc[0]\n dtype = minmax.dtypes['max_1']\n else:\n mini, maxi = limits\n dtype = pd.Series(limits).dtype\n if npartitions is None:\n q = sql.select([sql.func.count(index)]).select_from(table)\n count = pd.read_sql(q, engine)['count_1'][0]\n npartitions = round(count * bytes_per_row / bytes_per_chunk) or 1\n if dtype.kind == \"M\":\n divisions = pd.date_range(\n start=mini, end=maxi, freq='%iS' % (\n (maxi - mini) / npartitions).total_seconds()).tolist()\n divisions[0] = mini\n divisions[-1] = maxi\n else:\n divisions = np.linspace(mini, maxi, npartitions + 1).tolist()\n\n parts = []\n lowers, uppers = divisions[:-1], divisions[1:]\n for i, (lower, upper) in enumerate(zip(lowers, uppers)):\n cond = index <= upper if i == len(lowers) - 1 else index < upper\n q = sql.select(columns).where(sql.and_(index >= lower, cond)\n ).select_from(table)\n parts.append(delayed(_read_sql_chunk)(q, uri, meta, **kwargs))\n\n return from_delayed(parts, meta, divisions=divisions)\n\n\ndef _read_sql_chunk(q, uri, meta, **kwargs):\n df = pd.read_sql(q, uri, **kwargs)\n if df.empty:\n return meta\n else:\n return df.astype(meta.dtypes.to_dict(), copy=False)\n", "path": "dask/dataframe/io/sql.py"}]} | 2,783 | 484 |
gh_patches_debug_58413 | rasdani/github-patches | git_diff | instadeepai__Mava-595 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[TEST] Jax Trainer Init
### What do you want to test?
Jax trainer init components
### Outline of test structure
* Unit tests
* Test components and hooks
### Definition of done
Passing checks, cover all hooks, edge cases considered
### Mandatory checklist before making a PR
* [ ] The success criteria laid down in “Definition of done” are met.
* [ ] Test code is documented - docstrings for methods and classes, static types for arguments.
* [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mava/components/jax/training/trainer.py`
Content:
```
1 # python3
2 # Copyright 2021 InstaDeep Ltd. All rights reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """Trainer components for system builders."""
17
18 import abc
19 from dataclasses import dataclass, field
20 from types import SimpleNamespace
21 from typing import Any, Callable, Dict, Optional
22
23 from mava.components.jax import Component
24 from mava.core_jax import SystemBuilder, SystemTrainer
25 from mava.utils.sort_utils import sort_str_num
26
27
28 class BaseTrainerInit(Component):
29 @abc.abstractmethod
30 def __init__(
31 self,
32 config: Any,
33 ):
34 """Initialise system init components.
35
36 Args:
37 config : a dataclass specifying the component parameters.
38 """
39 self.config = config
40
41 @abc.abstractmethod
42 def on_building_init_end(self, builder: SystemBuilder) -> None:
43 """Summary."""
44 pass
45
46 @abc.abstractmethod
47 def on_training_utility_fns(self, trainer: SystemTrainer) -> None:
48 """Summary."""
49 pass
50
51 @staticmethod
52 def name() -> str:
53 """Component name."""
54
55 return "trainer_init"
56
57
58 class SingleTrainerInit(BaseTrainerInit):
59 def __init__(self, config: SimpleNamespace = SimpleNamespace()):
60 """Initialises a single trainer.
61
62 Single trainer is used to train all networks.
63
64 Args:
65 config : a dataclass specifying the component parameters.
66 """
67 self.config = config
68
69 def on_building_init_end(self, builder: SystemBuilder) -> None:
70 """Assigns trainers to networks for training.
71
72 Args:
73 builder : the system builder
74 Raises:
75 ValueError: Raises an error when trainer_networks is not
76 set to single_trainer.
77 """
78 unique_net_keys = builder.store.unique_net_keys
79
80 # Setup trainer_networks
81
82 builder.store.trainer_networks = {"trainer": unique_net_keys}
83
84 # Get all the unique trainer network keys
85 all_trainer_net_keys = []
86 for trainer_nets in builder.store.trainer_networks.values():
87 all_trainer_net_keys.extend(trainer_nets)
88 unique_trainer_net_keys = sort_str_num(list(set(all_trainer_net_keys)))
89
90 # Check that all agent_net_keys are in trainer_networks
91 assert unique_net_keys == unique_trainer_net_keys
92 # Setup specs for each network
93 builder.store.net_spec_keys = {}
94 for i in range(len(unique_net_keys)):
95 builder.store.net_spec_keys[unique_net_keys[i]] = builder.store.agents[
96 i % len(builder.store.agents)
97 ]
98
99 # Setup table_network_config
100 builder.store.table_network_config = {}
101 for trainer_key in builder.store.trainer_networks.keys():
102 most_matches = 0
103 trainer_nets = builder.store.trainer_networks[trainer_key]
104 for sample in builder.store.network_sampling_setup:
105 matches = 0
106 for entry in sample:
107 if entry in trainer_nets:
108 matches += 1
109 if most_matches < matches:
110 matches = most_matches
111 builder.store.table_network_config[trainer_key] = sample
112
113 builder.store.networks = builder.store.network_factory()
114
115 def on_training_utility_fns(self, trainer: SystemTrainer) -> None:
116 """_summary_"""
117 # Convert network keys for the trainer.
118 trainer.store.trainer_table_entry = trainer.store.table_network_config[
119 trainer.store.trainer_id
120 ]
121 trainer.store.trainer_agents = trainer.store.agents[
122 : len(trainer.store.trainer_table_entry)
123 ]
124 trainer.store.trainer_agent_net_keys = {
125 agent: trainer.store.trainer_table_entry[a_i]
126 for a_i, agent in enumerate(trainer.store.trainer_agents)
127 }
128
129
130 class OneTrainerPerNetworkInit(BaseTrainerInit):
131 def __init__(self, config: SimpleNamespace = SimpleNamespace()):
132 """Initialises a multiple trainers.
133
134 Different trainer will be dedicated to training each network.
135
136 Args:
137 config : a dataclass specifying the component parameters.
138 """
139 self.config = config
140
141 def on_building_init_end(self, builder: SystemBuilder) -> None:
142 """.
143
144 Args:
145 builder : the system builder
146 Raises:
147 ValueError: Raises an error when trainer_networks is not
148 set to one_trainer_per_network.
149 """
150 unique_net_keys = builder.store.unique_net_keys
151
152 # Setup trainer_networks
153 builder.store.trainer_networks = {
154 f"trainer_{i}": [unique_net_keys[i]] for i in range(len(unique_net_keys))
155 }
156
157 # Get all the unique trainer network keys
158 all_trainer_net_keys = []
159 for trainer_nets in builder.store.trainer_networks.values():
160 all_trainer_net_keys.extend(trainer_nets)
161 unique_trainer_net_keys = sort_str_num(list(set(all_trainer_net_keys)))
162
163 # Check that all agent_net_keys are in trainer_networks
164 assert unique_net_keys == unique_trainer_net_keys
165 # Setup specs for each network
166 builder.store.net_spec_keys = {}
167 for i in range(len(unique_net_keys)):
168 builder.store.net_spec_keys[unique_net_keys[i]] = builder.store.agents[
169 i % len(builder.store.agents)
170 ]
171
172 # Setup table_network_config
173 builder.store.table_network_config = {}
174 for trainer_key in builder.store.trainer_networks.keys():
175 most_matches = 0
176 trainer_nets = builder.store.trainer_networks[trainer_key]
177 for sample in builder.store.network_sampling_setup:
178 matches = 0
179 for entry in sample:
180 if entry in trainer_nets:
181 matches += 1
182 if most_matches < matches:
183 matches = most_matches
184 builder.store.table_network_config[trainer_key] = sample
185
186 builder.store.networks = builder.store.network_factory()
187
188 def on_training_utility_fns(self, trainer: SystemTrainer) -> None:
189 """_summary_"""
190 # Convert network keys for the trainer.
191 trainer.store.trainer_table_entry = trainer.store.table_network_config[
192 trainer.store.trainer_id
193 ]
194 trainer.store.trainer_agents = trainer.store.agents[
195 : len(trainer.store.trainer_table_entry)
196 ]
197 trainer.store.trainer_agent_net_keys = {
198 agent: trainer.store.trainer_table_entry[a_i]
199 for a_i, agent in enumerate(trainer.store.trainer_agents)
200 }
201
202
203 @dataclass
204 class CustomTrainerInitConfig:
205 trainer_networks: Dict = field(default_factory=lambda: {})
206
207
208 class CustomTrainerInit(BaseTrainerInit):
209 def __init__(self, config: CustomTrainerInitConfig = CustomTrainerInitConfig()):
210 """Initialises custom trainers.
211
212 Custom trainer network configuration can be given as a dictionary
213 assigning specific trainers to specific networks.
214
215 Args:
216 config : a dataclass specifying the component parameters.
217 """
218
219 self.config = config
220
221 def on_building_init_end(self, builder: SystemBuilder) -> None:
222 """Assigns trainers to networks for training.
223
224 Args:
225 builder : the system builder
226 Raises:
227 ValueError: Raises an error when trainer_networks is not
228 passed in as a dictionary.
229 """
230 trainer_networks = self.config.trainer_networks
231 unique_net_keys = builder.store.unique_net_keys
232
233 # Setup trainer_networks
234 if not isinstance(trainer_networks, dict) or trainer_networks == {}:
235
236 raise ValueError("trainer_networks must be a dictionary.")
237
238 builder.store.trainer_networks = trainer_networks
239
240 # Get all the unique trainer network keys
241 all_trainer_net_keys = []
242 for trainer_nets in builder.store.trainer_networks.values():
243 all_trainer_net_keys.extend(trainer_nets)
244 unique_trainer_net_keys = sort_str_num(list(set(all_trainer_net_keys)))
245
246 # Check that all agent_net_keys are in trainer_networks
247 assert unique_net_keys == unique_trainer_net_keys
248 # Setup specs for each network
249 builder.store.net_spec_keys = {}
250 for i in range(len(unique_net_keys)):
251 builder.store.net_spec_keys[unique_net_keys[i]] = builder.store.agents[
252 i % len(builder.store.agents)
253 ]
254
255 # Setup table_network_config
256 builder.store.table_network_config = {}
257 for trainer_key in builder.store.trainer_networks.keys():
258 most_matches = 0
259 trainer_nets = builder.store.trainer_networks[trainer_key]
260 for sample in builder.store.network_sampling_setup:
261 matches = 0
262 for entry in sample:
263 if entry in trainer_nets:
264 matches += 1
265 if most_matches < matches:
266 matches = most_matches
267 builder.store.table_network_config[trainer_key] = sample
268
269 builder.store.networks = builder.store.network_factory()
270
271 def on_training_utility_fns(self, trainer: SystemTrainer) -> None:
272 """_summary_"""
273 # Convert network keys for the trainer.
274 trainer.store.trainer_table_entry = trainer.store.table_network_config[
275 trainer.store.trainer_id
276 ]
277 trainer.store.trainer_agents = trainer.store.agents[
278 : len(trainer.store.trainer_table_entry)
279 ]
280 trainer.store.trainer_agent_net_keys = {
281 agent: trainer.store.trainer_table_entry[a_i]
282 for a_i, agent in enumerate(trainer.store.trainer_agents)
283 }
284
285 @staticmethod
286 def config_class() -> Optional[Callable]:
287 """Config class used for component.
288
289 Returns:
290 config class/dataclass for component.
291 """
292 return CustomTrainerInitConfig
293
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mava/components/jax/training/trainer.py b/mava/components/jax/training/trainer.py
--- a/mava/components/jax/training/trainer.py
+++ b/mava/components/jax/training/trainer.py
@@ -233,7 +233,7 @@
# Setup trainer_networks
if not isinstance(trainer_networks, dict) or trainer_networks == {}:
- raise ValueError("trainer_networks must be a dictionary.")
+ raise ValueError("trainer_networks must be a non-empty dictionary.")
builder.store.trainer_networks = trainer_networks
| {"golden_diff": "diff --git a/mava/components/jax/training/trainer.py b/mava/components/jax/training/trainer.py\n--- a/mava/components/jax/training/trainer.py\n+++ b/mava/components/jax/training/trainer.py\n@@ -233,7 +233,7 @@\n # Setup trainer_networks\n if not isinstance(trainer_networks, dict) or trainer_networks == {}:\n \n- raise ValueError(\"trainer_networks must be a dictionary.\")\n+ raise ValueError(\"trainer_networks must be a non-empty dictionary.\")\n \n builder.store.trainer_networks = trainer_networks\n", "issue": "[TEST] Jax Trainer Init\n### What do you want to test?\r\nJax trainer init components\r\n\r\n### Outline of test structure\r\n* Unit tests\r\n* Test components and hooks\r\n\r\n### Definition of done\r\nPassing checks, cover all hooks, edge cases considered\r\n\r\n### Mandatory checklist before making a PR\r\n* [ ] The success criteria laid down in \u201cDefinition of done\u201d are met.\r\n* [ ] Test code is documented - docstrings for methods and classes, static types for arguments.\r\n* [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.\n", "before_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Trainer components for system builders.\"\"\"\n\nimport abc\nfrom dataclasses import dataclass, field\nfrom types import SimpleNamespace\nfrom typing import Any, Callable, Dict, Optional\n\nfrom mava.components.jax import Component\nfrom mava.core_jax import SystemBuilder, SystemTrainer\nfrom mava.utils.sort_utils import sort_str_num\n\n\nclass BaseTrainerInit(Component):\n @abc.abstractmethod\n def __init__(\n self,\n config: Any,\n ):\n \"\"\"Initialise system init components.\n\n Args:\n config : a dataclass specifying the component parameters.\n \"\"\"\n self.config = config\n\n @abc.abstractmethod\n def on_building_init_end(self, builder: SystemBuilder) -> None:\n \"\"\"Summary.\"\"\"\n pass\n\n @abc.abstractmethod\n def on_training_utility_fns(self, trainer: SystemTrainer) -> None:\n \"\"\"Summary.\"\"\"\n pass\n\n @staticmethod\n def name() -> str:\n \"\"\"Component name.\"\"\"\n\n return \"trainer_init\"\n\n\nclass SingleTrainerInit(BaseTrainerInit):\n def __init__(self, config: SimpleNamespace = SimpleNamespace()):\n \"\"\"Initialises a single trainer.\n\n Single trainer is used to train all networks.\n\n Args:\n config : a dataclass specifying the component parameters.\n \"\"\"\n self.config = config\n\n def on_building_init_end(self, builder: SystemBuilder) -> None:\n \"\"\"Assigns trainers to networks for training.\n\n Args:\n builder : the system builder\n Raises:\n ValueError: Raises an error when trainer_networks is not\n set to single_trainer.\n \"\"\"\n unique_net_keys = builder.store.unique_net_keys\n\n # Setup trainer_networks\n\n builder.store.trainer_networks = {\"trainer\": unique_net_keys}\n\n # Get all the unique trainer network keys\n all_trainer_net_keys = []\n for trainer_nets in builder.store.trainer_networks.values():\n all_trainer_net_keys.extend(trainer_nets)\n unique_trainer_net_keys = sort_str_num(list(set(all_trainer_net_keys)))\n\n # Check that all agent_net_keys are in trainer_networks\n assert unique_net_keys == unique_trainer_net_keys\n # Setup specs for each network\n builder.store.net_spec_keys = {}\n for i in range(len(unique_net_keys)):\n builder.store.net_spec_keys[unique_net_keys[i]] = builder.store.agents[\n i % len(builder.store.agents)\n ]\n\n # Setup table_network_config\n builder.store.table_network_config = {}\n for trainer_key in builder.store.trainer_networks.keys():\n most_matches = 0\n trainer_nets = builder.store.trainer_networks[trainer_key]\n for sample in builder.store.network_sampling_setup:\n matches = 0\n for entry in sample:\n if entry in trainer_nets:\n matches += 1\n if most_matches < matches:\n matches = most_matches\n builder.store.table_network_config[trainer_key] = sample\n\n builder.store.networks = builder.store.network_factory()\n\n def on_training_utility_fns(self, trainer: SystemTrainer) -> None:\n \"\"\"_summary_\"\"\"\n # Convert network keys for the trainer.\n trainer.store.trainer_table_entry = trainer.store.table_network_config[\n trainer.store.trainer_id\n ]\n trainer.store.trainer_agents = trainer.store.agents[\n : len(trainer.store.trainer_table_entry)\n ]\n trainer.store.trainer_agent_net_keys = {\n agent: trainer.store.trainer_table_entry[a_i]\n for a_i, agent in enumerate(trainer.store.trainer_agents)\n }\n\n\nclass OneTrainerPerNetworkInit(BaseTrainerInit):\n def __init__(self, config: SimpleNamespace = SimpleNamespace()):\n \"\"\"Initialises a multiple trainers.\n\n Different trainer will be dedicated to training each network.\n\n Args:\n config : a dataclass specifying the component parameters.\n \"\"\"\n self.config = config\n\n def on_building_init_end(self, builder: SystemBuilder) -> None:\n \"\"\".\n\n Args:\n builder : the system builder\n Raises:\n ValueError: Raises an error when trainer_networks is not\n set to one_trainer_per_network.\n \"\"\"\n unique_net_keys = builder.store.unique_net_keys\n\n # Setup trainer_networks\n builder.store.trainer_networks = {\n f\"trainer_{i}\": [unique_net_keys[i]] for i in range(len(unique_net_keys))\n }\n\n # Get all the unique trainer network keys\n all_trainer_net_keys = []\n for trainer_nets in builder.store.trainer_networks.values():\n all_trainer_net_keys.extend(trainer_nets)\n unique_trainer_net_keys = sort_str_num(list(set(all_trainer_net_keys)))\n\n # Check that all agent_net_keys are in trainer_networks\n assert unique_net_keys == unique_trainer_net_keys\n # Setup specs for each network\n builder.store.net_spec_keys = {}\n for i in range(len(unique_net_keys)):\n builder.store.net_spec_keys[unique_net_keys[i]] = builder.store.agents[\n i % len(builder.store.agents)\n ]\n\n # Setup table_network_config\n builder.store.table_network_config = {}\n for trainer_key in builder.store.trainer_networks.keys():\n most_matches = 0\n trainer_nets = builder.store.trainer_networks[trainer_key]\n for sample in builder.store.network_sampling_setup:\n matches = 0\n for entry in sample:\n if entry in trainer_nets:\n matches += 1\n if most_matches < matches:\n matches = most_matches\n builder.store.table_network_config[trainer_key] = sample\n\n builder.store.networks = builder.store.network_factory()\n\n def on_training_utility_fns(self, trainer: SystemTrainer) -> None:\n \"\"\"_summary_\"\"\"\n # Convert network keys for the trainer.\n trainer.store.trainer_table_entry = trainer.store.table_network_config[\n trainer.store.trainer_id\n ]\n trainer.store.trainer_agents = trainer.store.agents[\n : len(trainer.store.trainer_table_entry)\n ]\n trainer.store.trainer_agent_net_keys = {\n agent: trainer.store.trainer_table_entry[a_i]\n for a_i, agent in enumerate(trainer.store.trainer_agents)\n }\n\n\n@dataclass\nclass CustomTrainerInitConfig:\n trainer_networks: Dict = field(default_factory=lambda: {})\n\n\nclass CustomTrainerInit(BaseTrainerInit):\n def __init__(self, config: CustomTrainerInitConfig = CustomTrainerInitConfig()):\n \"\"\"Initialises custom trainers.\n\n Custom trainer network configuration can be given as a dictionary\n assigning specific trainers to specific networks.\n\n Args:\n config : a dataclass specifying the component parameters.\n \"\"\"\n\n self.config = config\n\n def on_building_init_end(self, builder: SystemBuilder) -> None:\n \"\"\"Assigns trainers to networks for training.\n\n Args:\n builder : the system builder\n Raises:\n ValueError: Raises an error when trainer_networks is not\n passed in as a dictionary.\n \"\"\"\n trainer_networks = self.config.trainer_networks\n unique_net_keys = builder.store.unique_net_keys\n\n # Setup trainer_networks\n if not isinstance(trainer_networks, dict) or trainer_networks == {}:\n\n raise ValueError(\"trainer_networks must be a dictionary.\")\n\n builder.store.trainer_networks = trainer_networks\n\n # Get all the unique trainer network keys\n all_trainer_net_keys = []\n for trainer_nets in builder.store.trainer_networks.values():\n all_trainer_net_keys.extend(trainer_nets)\n unique_trainer_net_keys = sort_str_num(list(set(all_trainer_net_keys)))\n\n # Check that all agent_net_keys are in trainer_networks\n assert unique_net_keys == unique_trainer_net_keys\n # Setup specs for each network\n builder.store.net_spec_keys = {}\n for i in range(len(unique_net_keys)):\n builder.store.net_spec_keys[unique_net_keys[i]] = builder.store.agents[\n i % len(builder.store.agents)\n ]\n\n # Setup table_network_config\n builder.store.table_network_config = {}\n for trainer_key in builder.store.trainer_networks.keys():\n most_matches = 0\n trainer_nets = builder.store.trainer_networks[trainer_key]\n for sample in builder.store.network_sampling_setup:\n matches = 0\n for entry in sample:\n if entry in trainer_nets:\n matches += 1\n if most_matches < matches:\n matches = most_matches\n builder.store.table_network_config[trainer_key] = sample\n\n builder.store.networks = builder.store.network_factory()\n\n def on_training_utility_fns(self, trainer: SystemTrainer) -> None:\n \"\"\"_summary_\"\"\"\n # Convert network keys for the trainer.\n trainer.store.trainer_table_entry = trainer.store.table_network_config[\n trainer.store.trainer_id\n ]\n trainer.store.trainer_agents = trainer.store.agents[\n : len(trainer.store.trainer_table_entry)\n ]\n trainer.store.trainer_agent_net_keys = {\n agent: trainer.store.trainer_table_entry[a_i]\n for a_i, agent in enumerate(trainer.store.trainer_agents)\n }\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return CustomTrainerInitConfig\n", "path": "mava/components/jax/training/trainer.py"}], "after_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Trainer components for system builders.\"\"\"\n\nimport abc\nfrom dataclasses import dataclass, field\nfrom types import SimpleNamespace\nfrom typing import Any, Callable, Dict, Optional\n\nfrom mava.components.jax import Component\nfrom mava.core_jax import SystemBuilder, SystemTrainer\nfrom mava.utils.sort_utils import sort_str_num\n\n\nclass BaseTrainerInit(Component):\n @abc.abstractmethod\n def __init__(\n self,\n config: Any,\n ):\n \"\"\"Initialise system init components.\n\n Args:\n config : a dataclass specifying the component parameters.\n \"\"\"\n self.config = config\n\n @abc.abstractmethod\n def on_building_init_end(self, builder: SystemBuilder) -> None:\n \"\"\"Summary.\"\"\"\n pass\n\n @abc.abstractmethod\n def on_training_utility_fns(self, trainer: SystemTrainer) -> None:\n \"\"\"Summary.\"\"\"\n pass\n\n @staticmethod\n def name() -> str:\n \"\"\"Component name.\"\"\"\n\n return \"trainer_init\"\n\n\nclass SingleTrainerInit(BaseTrainerInit):\n def __init__(self, config: SimpleNamespace = SimpleNamespace()):\n \"\"\"Initialises a single trainer.\n\n Single trainer is used to train all networks.\n\n Args:\n config : a dataclass specifying the component parameters.\n \"\"\"\n self.config = config\n\n def on_building_init_end(self, builder: SystemBuilder) -> None:\n \"\"\"Assigns trainers to networks for training.\n\n Args:\n builder : the system builder\n Raises:\n ValueError: Raises an error when trainer_networks is not\n set to single_trainer.\n \"\"\"\n unique_net_keys = builder.store.unique_net_keys\n\n # Setup trainer_networks\n\n builder.store.trainer_networks = {\"trainer\": unique_net_keys}\n\n # Get all the unique trainer network keys\n all_trainer_net_keys = []\n for trainer_nets in builder.store.trainer_networks.values():\n all_trainer_net_keys.extend(trainer_nets)\n unique_trainer_net_keys = sort_str_num(list(set(all_trainer_net_keys)))\n\n # Check that all agent_net_keys are in trainer_networks\n assert unique_net_keys == unique_trainer_net_keys\n # Setup specs for each network\n builder.store.net_spec_keys = {}\n for i in range(len(unique_net_keys)):\n builder.store.net_spec_keys[unique_net_keys[i]] = builder.store.agents[\n i % len(builder.store.agents)\n ]\n\n # Setup table_network_config\n builder.store.table_network_config = {}\n for trainer_key in builder.store.trainer_networks.keys():\n most_matches = 0\n trainer_nets = builder.store.trainer_networks[trainer_key]\n for sample in builder.store.network_sampling_setup:\n matches = 0\n for entry in sample:\n if entry in trainer_nets:\n matches += 1\n if most_matches < matches:\n matches = most_matches\n builder.store.table_network_config[trainer_key] = sample\n\n builder.store.networks = builder.store.network_factory()\n\n def on_training_utility_fns(self, trainer: SystemTrainer) -> None:\n \"\"\"_summary_\"\"\"\n # Convert network keys for the trainer.\n trainer.store.trainer_table_entry = trainer.store.table_network_config[\n trainer.store.trainer_id\n ]\n trainer.store.trainer_agents = trainer.store.agents[\n : len(trainer.store.trainer_table_entry)\n ]\n trainer.store.trainer_agent_net_keys = {\n agent: trainer.store.trainer_table_entry[a_i]\n for a_i, agent in enumerate(trainer.store.trainer_agents)\n }\n\n\nclass OneTrainerPerNetworkInit(BaseTrainerInit):\n def __init__(self, config: SimpleNamespace = SimpleNamespace()):\n \"\"\"Initialises a multiple trainers.\n\n Different trainer will be dedicated to training each network.\n\n Args:\n config : a dataclass specifying the component parameters.\n \"\"\"\n self.config = config\n\n def on_building_init_end(self, builder: SystemBuilder) -> None:\n \"\"\".\n\n Args:\n builder : the system builder\n Raises:\n ValueError: Raises an error when trainer_networks is not\n set to one_trainer_per_network.\n \"\"\"\n unique_net_keys = builder.store.unique_net_keys\n\n # Setup trainer_networks\n builder.store.trainer_networks = {\n f\"trainer_{i}\": [unique_net_keys[i]] for i in range(len(unique_net_keys))\n }\n\n # Get all the unique trainer network keys\n all_trainer_net_keys = []\n for trainer_nets in builder.store.trainer_networks.values():\n all_trainer_net_keys.extend(trainer_nets)\n unique_trainer_net_keys = sort_str_num(list(set(all_trainer_net_keys)))\n\n # Check that all agent_net_keys are in trainer_networks\n assert unique_net_keys == unique_trainer_net_keys\n # Setup specs for each network\n builder.store.net_spec_keys = {}\n for i in range(len(unique_net_keys)):\n builder.store.net_spec_keys[unique_net_keys[i]] = builder.store.agents[\n i % len(builder.store.agents)\n ]\n\n # Setup table_network_config\n builder.store.table_network_config = {}\n for trainer_key in builder.store.trainer_networks.keys():\n most_matches = 0\n trainer_nets = builder.store.trainer_networks[trainer_key]\n for sample in builder.store.network_sampling_setup:\n matches = 0\n for entry in sample:\n if entry in trainer_nets:\n matches += 1\n if most_matches < matches:\n matches = most_matches\n builder.store.table_network_config[trainer_key] = sample\n\n builder.store.networks = builder.store.network_factory()\n\n def on_training_utility_fns(self, trainer: SystemTrainer) -> None:\n \"\"\"_summary_\"\"\"\n # Convert network keys for the trainer.\n trainer.store.trainer_table_entry = trainer.store.table_network_config[\n trainer.store.trainer_id\n ]\n trainer.store.trainer_agents = trainer.store.agents[\n : len(trainer.store.trainer_table_entry)\n ]\n trainer.store.trainer_agent_net_keys = {\n agent: trainer.store.trainer_table_entry[a_i]\n for a_i, agent in enumerate(trainer.store.trainer_agents)\n }\n\n\n@dataclass\nclass CustomTrainerInitConfig:\n trainer_networks: Dict = field(default_factory=lambda: {})\n\n\nclass CustomTrainerInit(BaseTrainerInit):\n def __init__(self, config: CustomTrainerInitConfig = CustomTrainerInitConfig()):\n \"\"\"Initialises custom trainers.\n\n Custom trainer network configuration can be given as a dictionary\n assigning specific trainers to specific networks.\n\n Args:\n config : a dataclass specifying the component parameters.\n \"\"\"\n\n self.config = config\n\n def on_building_init_end(self, builder: SystemBuilder) -> None:\n \"\"\"Assigns trainers to networks for training.\n\n Args:\n builder : the system builder\n Raises:\n ValueError: Raises an error when trainer_networks is not\n passed in as a dictionary.\n \"\"\"\n trainer_networks = self.config.trainer_networks\n unique_net_keys = builder.store.unique_net_keys\n\n # Setup trainer_networks\n if not isinstance(trainer_networks, dict) or trainer_networks == {}:\n\n raise ValueError(\"trainer_networks must be a non-empty dictionary.\")\n\n builder.store.trainer_networks = trainer_networks\n\n # Get all the unique trainer network keys\n all_trainer_net_keys = []\n for trainer_nets in builder.store.trainer_networks.values():\n all_trainer_net_keys.extend(trainer_nets)\n unique_trainer_net_keys = sort_str_num(list(set(all_trainer_net_keys)))\n\n # Check that all agent_net_keys are in trainer_networks\n assert unique_net_keys == unique_trainer_net_keys\n # Setup specs for each network\n builder.store.net_spec_keys = {}\n for i in range(len(unique_net_keys)):\n builder.store.net_spec_keys[unique_net_keys[i]] = builder.store.agents[\n i % len(builder.store.agents)\n ]\n\n # Setup table_network_config\n builder.store.table_network_config = {}\n for trainer_key in builder.store.trainer_networks.keys():\n most_matches = 0\n trainer_nets = builder.store.trainer_networks[trainer_key]\n for sample in builder.store.network_sampling_setup:\n matches = 0\n for entry in sample:\n if entry in trainer_nets:\n matches += 1\n if most_matches < matches:\n matches = most_matches\n builder.store.table_network_config[trainer_key] = sample\n\n builder.store.networks = builder.store.network_factory()\n\n def on_training_utility_fns(self, trainer: SystemTrainer) -> None:\n \"\"\"_summary_\"\"\"\n # Convert network keys for the trainer.\n trainer.store.trainer_table_entry = trainer.store.table_network_config[\n trainer.store.trainer_id\n ]\n trainer.store.trainer_agents = trainer.store.agents[\n : len(trainer.store.trainer_table_entry)\n ]\n trainer.store.trainer_agent_net_keys = {\n agent: trainer.store.trainer_table_entry[a_i]\n for a_i, agent in enumerate(trainer.store.trainer_agents)\n }\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return CustomTrainerInitConfig\n", "path": "mava/components/jax/training/trainer.py"}]} | 3,333 | 133 |
gh_patches_debug_1660 | rasdani/github-patches | git_diff | e2nIEE__pandapower-2106 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] Pandapower interferes with matplotlib savefig
### Bug report checklis
- [X] Searched the [issues page](https://github.com/e2nIEE/pandapower/issues) for similar reports
- [X] Read the relevant sections of the [documentation](https://pandapower.readthedocs.io/en/latest/about.html)
- [X] Browse the [tutorials](https://github.com/e2nIEE/pandapower/tree/develop/tutorials) and [tests](https://github.com/e2nIEE/pandapower/tree/develop/pandapower/test) for usefull code snippets and examples of use
- [X] Reproduced the issue after updating with `pip install --upgrade pandapower` (or `git pull`)
- [X] Tried basic troubleshooting (if a bug/error) like restarting the interpreter and checking the pythonpath
### Reproducible Example
```python
import matplotlib.pyplot as plt
import pandapower
fig, ax = plt.subplots()
ax.scatter(range(5), range(5))
fig.savefig('test.svg')
```
### Issue Description and Traceback
When pandapower is imported, matplotlib `savefig()` may run into a bug where the `GraphicsContextBase._capstyle` is set to a `str` instead of a `CapStyle` instance. Calling the proper `set_capstyle()` method solves this issue. Also, somehow, this issue does not arise when calling `fig.savefig('test.png')`. It only arises when the figure save type is SVG.
The following code works fine. Notice that I have commented out `import pandapower`:
```python
import matplotlib.pyplot as plt
# import pandapower
fig, ax = plt.subplots()
ax.scatter(range(5), range(5))
fig.savefig('test.svg')
```
However, if I uncomment the `import pandapower` line, then I will get an error:
```python
import matplotlib.pyplot as plt
import pandapower
fig, ax = plt.subplots()
ax.scatter(range(5), range(5))
fig.savefig('test.svg')
```
Error:
```
Traceback (most recent call last):
File "/home/user/testenv/test.py", line 6, in <module>
fig.savefig('test.svg')
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/figure.py", line 3378, in savefig
self.canvas.print_figure(fname, **kwargs)
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py", line 2366, in print_figure
result = print_method(
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py", line 2232, in <lambda>
print_method = functools.wraps(meth)(lambda *args, **kwargs: meth(
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py", line 1369, in print_svg
self.figure.draw(renderer)
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 95, in draw_wrapper
result = draw(artist, renderer, *args, **kwargs)
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 72, in draw_wrapper
return draw(artist, renderer)
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/figure.py", line 3175, in draw
mimage._draw_list_compositing_images(
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/image.py", line 131, in _draw_list_compositing_images
a.draw(renderer)
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 72, in draw_wrapper
return draw(artist, renderer)
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/axes/_base.py", line 3064, in draw
mimage._draw_list_compositing_images(
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/image.py", line 131, in _draw_list_compositing_images
a.draw(renderer)
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 72, in draw_wrapper
return draw(artist, renderer)
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/collections.py", line 972, in draw
super().draw(renderer)
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 72, in draw_wrapper
return draw(artist, renderer)
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/collections.py", line 405, in draw
renderer.draw_markers(
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py", line 717, in draw_markers
style = self._get_style_dict(gc, rgbFace)
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py", line 609, in _get_style_dict
if gc.get_capstyle() != 'butt':
File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py", line 820, in get_capstyle
return self._capstyle.name
AttributeError: 'str' object has no attribute 'name'
```
### Expected Behavior
I would expect the following 2 code blocks to produce identical (or at least similar) results:
```python
import matplotlib.pyplot as plt
# import pandapower
fig, ax = plt.subplots()
ax.scatter(range(5), range(5))
fig.savefig('test.svg')
```
and
```python
import matplotlib.pyplot as plt
import pandapower
fig, ax = plt.subplots()
ax.scatter(range(5), range(5))
fig.savefig('test.svg')
```
The 1st code block works fine, whereas the 2nd code block throws an `AttributeError`.
### Installed Versions
OS: Ubuntu 22.04 LTS
Python 3.10
Matplotlib 3.7.2
Pandapower 2.13.1
### Label
- [X] Relevant labels are selected
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandapower/plotting/__init__.py`
Content:
```
1 from pandapower.plotting.collections import *
2 from pandapower.plotting.colormaps import *
3 from pandapower.plotting.generic_geodata import *
4 from pandapower.plotting.powerflow_results import *
5 from pandapower.plotting.simple_plot import *
6 from pandapower.plotting.plotly import *
7 from pandapower.plotting.geo import *
8 from pandapower.plotting.plotting_toolbox import set_line_geodata_from_bus_geodata
9 from pandapower.plotting.to_html import to_html
10
11 import types
12
13 try:
14 from matplotlib.backend_bases import GraphicsContextBase, RendererBase
15
16 class GC(GraphicsContextBase):
17 def __init__(self):
18 super().__init__()
19 self._capstyle = 'round'
20
21 def custom_new_gc(self):
22 return GC()
23
24 RendererBase.new_gc = types.MethodType(custom_new_gc, RendererBase)
25
26 except ImportError:
27 pass
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandapower/plotting/__init__.py b/pandapower/plotting/__init__.py
--- a/pandapower/plotting/__init__.py
+++ b/pandapower/plotting/__init__.py
@@ -16,7 +16,7 @@
class GC(GraphicsContextBase):
def __init__(self):
super().__init__()
- self._capstyle = 'round'
+ self.set_capstyle('round')
def custom_new_gc(self):
return GC()
| {"golden_diff": "diff --git a/pandapower/plotting/__init__.py b/pandapower/plotting/__init__.py\n--- a/pandapower/plotting/__init__.py\n+++ b/pandapower/plotting/__init__.py\n@@ -16,7 +16,7 @@\n class GC(GraphicsContextBase):\n def __init__(self):\n super().__init__()\n- self._capstyle = 'round'\n+ self.set_capstyle('round')\n \n def custom_new_gc(self):\n return GC()\n", "issue": "[bug] Pandapower interferes with matplotlib savefig\n### Bug report checklis\n\n- [X] Searched the [issues page](https://github.com/e2nIEE/pandapower/issues) for similar reports\n\n- [X] Read the relevant sections of the [documentation](https://pandapower.readthedocs.io/en/latest/about.html)\n\n- [X] Browse the [tutorials](https://github.com/e2nIEE/pandapower/tree/develop/tutorials) and [tests](https://github.com/e2nIEE/pandapower/tree/develop/pandapower/test) for usefull code snippets and examples of use\n\n- [X] Reproduced the issue after updating with `pip install --upgrade pandapower` (or `git pull`)\n\n- [X] Tried basic troubleshooting (if a bug/error) like restarting the interpreter and checking the pythonpath\n\n\n### Reproducible Example\n\n```python\nimport matplotlib.pyplot as plt\r\nimport pandapower\r\n\r\nfig, ax = plt.subplots()\r\nax.scatter(range(5), range(5))\r\nfig.savefig('test.svg')\n```\n\n\n### Issue Description and Traceback\n\nWhen pandapower is imported, matplotlib `savefig()` may run into a bug where the `GraphicsContextBase._capstyle` is set to a `str` instead of a `CapStyle` instance. Calling the proper `set_capstyle()` method solves this issue. Also, somehow, this issue does not arise when calling `fig.savefig('test.png')`. It only arises when the figure save type is SVG.\r\n\r\nThe following code works fine. Notice that I have commented out `import pandapower`:\r\n```python\r\nimport matplotlib.pyplot as plt\r\n# import pandapower\r\n\r\nfig, ax = plt.subplots()\r\nax.scatter(range(5), range(5))\r\nfig.savefig('test.svg')\r\n```\r\n\r\nHowever, if I uncomment the `import pandapower` line, then I will get an error:\r\n```python\r\nimport matplotlib.pyplot as plt\r\nimport pandapower\r\n\r\nfig, ax = plt.subplots()\r\nax.scatter(range(5), range(5))\r\nfig.savefig('test.svg')\r\n```\r\n\r\nError:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/user/testenv/test.py\", line 6, in <module>\r\n fig.savefig('test.svg')\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/figure.py\", line 3378, in savefig\r\n self.canvas.print_figure(fname, **kwargs)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py\", line 2366, in print_figure\r\n result = print_method(\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py\", line 2232, in <lambda>\r\n print_method = functools.wraps(meth)(lambda *args, **kwargs: meth(\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py\", line 1369, in print_svg\r\n self.figure.draw(renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py\", line 95, in draw_wrapper\r\n result = draw(artist, renderer, *args, **kwargs)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py\", line 72, in draw_wrapper\r\n return draw(artist, renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/figure.py\", line 3175, in draw\r\n mimage._draw_list_compositing_images(\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/image.py\", line 131, in _draw_list_compositing_images\r\n a.draw(renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py\", line 72, in draw_wrapper\r\n return draw(artist, renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/axes/_base.py\", line 3064, in draw\r\n mimage._draw_list_compositing_images(\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/image.py\", line 131, in _draw_list_compositing_images\r\n a.draw(renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py\", line 72, in draw_wrapper\r\n return draw(artist, renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/collections.py\", line 972, in draw\r\n super().draw(renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py\", line 72, in draw_wrapper\r\n return draw(artist, renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/collections.py\", line 405, in draw\r\n renderer.draw_markers(\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py\", line 717, in draw_markers\r\n style = self._get_style_dict(gc, rgbFace)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py\", line 609, in _get_style_dict\r\n if gc.get_capstyle() != 'butt':\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py\", line 820, in get_capstyle\r\n return self._capstyle.name\r\nAttributeError: 'str' object has no attribute 'name'\r\n```\n\n### Expected Behavior\n\nI would expect the following 2 code blocks to produce identical (or at least similar) results:\r\n\r\n```python\r\nimport matplotlib.pyplot as plt\r\n# import pandapower\r\n\r\nfig, ax = plt.subplots()\r\nax.scatter(range(5), range(5))\r\nfig.savefig('test.svg')\r\n```\r\n\r\nand \r\n\r\n```python\r\nimport matplotlib.pyplot as plt\r\nimport pandapower\r\n\r\nfig, ax = plt.subplots()\r\nax.scatter(range(5), range(5))\r\nfig.savefig('test.svg')\r\n```\r\n\r\nThe 1st code block works fine, whereas the 2nd code block throws an `AttributeError`.\n\n### Installed Versions\n\nOS: Ubuntu 22.04 LTS\r\nPython 3.10\r\nMatplotlib 3.7.2\r\nPandapower 2.13.1\n\n### Label\n\n- [X] Relevant labels are selected\n\n", "before_files": [{"content": "from pandapower.plotting.collections import *\nfrom pandapower.plotting.colormaps import *\nfrom pandapower.plotting.generic_geodata import *\nfrom pandapower.plotting.powerflow_results import *\nfrom pandapower.plotting.simple_plot import *\nfrom pandapower.plotting.plotly import *\nfrom pandapower.plotting.geo import *\nfrom pandapower.plotting.plotting_toolbox import set_line_geodata_from_bus_geodata\nfrom pandapower.plotting.to_html import to_html\n\nimport types\n\ntry:\n from matplotlib.backend_bases import GraphicsContextBase, RendererBase\n\n class GC(GraphicsContextBase):\n def __init__(self):\n super().__init__()\n self._capstyle = 'round'\n\n def custom_new_gc(self):\n return GC()\n\n RendererBase.new_gc = types.MethodType(custom_new_gc, RendererBase)\n\nexcept ImportError:\n pass\n", "path": "pandapower/plotting/__init__.py"}], "after_files": [{"content": "from pandapower.plotting.collections import *\nfrom pandapower.plotting.colormaps import *\nfrom pandapower.plotting.generic_geodata import *\nfrom pandapower.plotting.powerflow_results import *\nfrom pandapower.plotting.simple_plot import *\nfrom pandapower.plotting.plotly import *\nfrom pandapower.plotting.geo import *\nfrom pandapower.plotting.plotting_toolbox import set_line_geodata_from_bus_geodata\nfrom pandapower.plotting.to_html import to_html\n\nimport types\n\ntry:\n from matplotlib.backend_bases import GraphicsContextBase, RendererBase\n\n class GC(GraphicsContextBase):\n def __init__(self):\n super().__init__()\n self.set_capstyle('round')\n\n def custom_new_gc(self):\n return GC()\n\n RendererBase.new_gc = types.MethodType(custom_new_gc, RendererBase)\n\nexcept ImportError:\n pass\n", "path": "pandapower/plotting/__init__.py"}]} | 2,025 | 118 |
gh_patches_debug_34258 | rasdani/github-patches | git_diff | beetbox__beets-4197 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
kodiupdate: support updating multiple Kodi instances
Pretty straitforward ask, it would be nice if when running beets using the Kodi update client that you could enter multiple clients
syntax would be something to the tune of ...
kodi:
host: x.x.x.x
port: 8080
user: user
pwd: password
kodi:
host: x.x.x.x
port: 8080
user: user
pwd: password
kodi:
host: x.x.x.x
port: 8080
user: user
pwd: password
can this please be implemented
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/kodiupdate.py`
Content:
```
1 # This file is part of beets.
2 # Copyright 2017, Pauli Kettunen.
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """Updates a Kodi library whenever the beets library is changed.
16 This is based on the Plex Update plugin.
17
18 Put something like the following in your config.yaml to configure:
19 kodi:
20 host: localhost
21 port: 8080
22 user: user
23 pwd: secret
24 """
25
26 import requests
27 from beets import config
28 from beets.plugins import BeetsPlugin
29
30
31 def update_kodi(host, port, user, password):
32 """Sends request to the Kodi api to start a library refresh.
33 """
34 url = f"http://{host}:{port}/jsonrpc"
35
36 """Content-Type: application/json is mandatory
37 according to the kodi jsonrpc documentation"""
38
39 headers = {'Content-Type': 'application/json'}
40
41 # Create the payload. Id seems to be mandatory.
42 payload = {'jsonrpc': '2.0', 'method': 'AudioLibrary.Scan', 'id': 1}
43 r = requests.post(
44 url,
45 auth=(user, password),
46 json=payload,
47 headers=headers)
48
49 return r
50
51
52 class KodiUpdate(BeetsPlugin):
53 def __init__(self):
54 super().__init__()
55
56 # Adding defaults.
57 config['kodi'].add({
58 'host': 'localhost',
59 'port': 8080,
60 'user': 'kodi',
61 'pwd': 'kodi'})
62
63 config['kodi']['pwd'].redact = True
64 self.register_listener('database_change', self.listen_for_db_change)
65
66 def listen_for_db_change(self, lib, model):
67 """Listens for beets db change and register the update"""
68 self.register_listener('cli_exit', self.update)
69
70 def update(self, lib):
71 """When the client exists try to send refresh request to Kodi server.
72 """
73 self._log.info('Requesting a Kodi library update...')
74
75 # Try to send update request.
76 try:
77 r = update_kodi(
78 config['kodi']['host'].get(),
79 config['kodi']['port'].get(),
80 config['kodi']['user'].get(),
81 config['kodi']['pwd'].get())
82 r.raise_for_status()
83
84 except requests.exceptions.RequestException as e:
85 self._log.warning('Kodi update failed: {0}',
86 str(e))
87 return
88
89 json = r.json()
90 if json.get('result') != 'OK':
91 self._log.warning('Kodi update failed: JSON response was {0!r}',
92 json)
93 return
94
95 self._log.info('Kodi update triggered')
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/kodiupdate.py b/beetsplug/kodiupdate.py
--- a/beetsplug/kodiupdate.py
+++ b/beetsplug/kodiupdate.py
@@ -54,11 +54,12 @@
super().__init__()
# Adding defaults.
- config['kodi'].add({
+ config['kodi'].add([{
'host': 'localhost',
'port': 8080,
'user': 'kodi',
- 'pwd': 'kodi'})
+ 'pwd': 'kodi'
+ }])
config['kodi']['pwd'].redact = True
self.register_listener('database_change', self.listen_for_db_change)
@@ -72,24 +73,34 @@
"""
self._log.info('Requesting a Kodi library update...')
- # Try to send update request.
- try:
- r = update_kodi(
- config['kodi']['host'].get(),
- config['kodi']['port'].get(),
- config['kodi']['user'].get(),
- config['kodi']['pwd'].get())
- r.raise_for_status()
-
- except requests.exceptions.RequestException as e:
- self._log.warning('Kodi update failed: {0}',
- str(e))
- return
-
- json = r.json()
- if json.get('result') != 'OK':
- self._log.warning('Kodi update failed: JSON response was {0!r}',
- json)
- return
-
- self._log.info('Kodi update triggered')
+ kodi = config['kodi'].get()
+
+ # Backwards compatibility in case not configured as an array
+ if not isinstance(kodi, list):
+ kodi = [kodi]
+
+ for instance in kodi:
+ # Try to send update request.
+ try:
+ r = update_kodi(
+ instance['host'],
+ instance['port'],
+ instance['user'],
+ instance['pwd']
+ )
+ r.raise_for_status()
+
+ json = r.json()
+ if json.get('result') != 'OK':
+ self._log.warning(
+ 'Kodi update failed: JSON response was {0!r}', json
+ )
+ continue
+
+ self._log.info(
+ 'Kodi update triggered for {0}:{1}',
+ instance['host'], instance['port']
+ )
+ except requests.exceptions.RequestException as e:
+ self._log.warning('Kodi update failed: {0}', str(e))
+ continue
| {"golden_diff": "diff --git a/beetsplug/kodiupdate.py b/beetsplug/kodiupdate.py\n--- a/beetsplug/kodiupdate.py\n+++ b/beetsplug/kodiupdate.py\n@@ -54,11 +54,12 @@\n super().__init__()\n \n # Adding defaults.\n- config['kodi'].add({\n+ config['kodi'].add([{\n 'host': 'localhost',\n 'port': 8080,\n 'user': 'kodi',\n- 'pwd': 'kodi'})\n+ 'pwd': 'kodi'\n+ }])\n \n config['kodi']['pwd'].redact = True\n self.register_listener('database_change', self.listen_for_db_change)\n@@ -72,24 +73,34 @@\n \"\"\"\n self._log.info('Requesting a Kodi library update...')\n \n- # Try to send update request.\n- try:\n- r = update_kodi(\n- config['kodi']['host'].get(),\n- config['kodi']['port'].get(),\n- config['kodi']['user'].get(),\n- config['kodi']['pwd'].get())\n- r.raise_for_status()\n-\n- except requests.exceptions.RequestException as e:\n- self._log.warning('Kodi update failed: {0}',\n- str(e))\n- return\n-\n- json = r.json()\n- if json.get('result') != 'OK':\n- self._log.warning('Kodi update failed: JSON response was {0!r}',\n- json)\n- return\n-\n- self._log.info('Kodi update triggered')\n+ kodi = config['kodi'].get()\n+\n+ # Backwards compatibility in case not configured as an array\n+ if not isinstance(kodi, list):\n+ kodi = [kodi]\n+\n+ for instance in kodi:\n+ # Try to send update request.\n+ try:\n+ r = update_kodi(\n+ instance['host'],\n+ instance['port'],\n+ instance['user'],\n+ instance['pwd']\n+ )\n+ r.raise_for_status()\n+\n+ json = r.json()\n+ if json.get('result') != 'OK':\n+ self._log.warning(\n+ 'Kodi update failed: JSON response was {0!r}', json\n+ )\n+ continue\n+\n+ self._log.info(\n+ 'Kodi update triggered for {0}:{1}',\n+ instance['host'], instance['port']\n+ )\n+ except requests.exceptions.RequestException as e:\n+ self._log.warning('Kodi update failed: {0}', str(e))\n+ continue\n", "issue": "kodiupdate: support updating multiple Kodi instances\nPretty straitforward ask, it would be nice if when running beets using the Kodi update client that you could enter multiple clients\r\n\r\nsyntax would be something to the tune of ...\r\n\r\nkodi:\r\n host: x.x.x.x\r\n port: 8080\r\n user: user\r\n pwd: password\r\nkodi:\r\n host: x.x.x.x\r\n port: 8080\r\n user: user\r\n pwd: password\r\nkodi:\r\n host: x.x.x.x\r\n port: 8080\r\n user: user\r\n pwd: password\r\ncan this please be implemented\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2017, Pauli Kettunen.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Updates a Kodi library whenever the beets library is changed.\nThis is based on the Plex Update plugin.\n\nPut something like the following in your config.yaml to configure:\n kodi:\n host: localhost\n port: 8080\n user: user\n pwd: secret\n\"\"\"\n\nimport requests\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef update_kodi(host, port, user, password):\n \"\"\"Sends request to the Kodi api to start a library refresh.\n \"\"\"\n url = f\"http://{host}:{port}/jsonrpc\"\n\n \"\"\"Content-Type: application/json is mandatory\n according to the kodi jsonrpc documentation\"\"\"\n\n headers = {'Content-Type': 'application/json'}\n\n # Create the payload. Id seems to be mandatory.\n payload = {'jsonrpc': '2.0', 'method': 'AudioLibrary.Scan', 'id': 1}\n r = requests.post(\n url,\n auth=(user, password),\n json=payload,\n headers=headers)\n\n return r\n\n\nclass KodiUpdate(BeetsPlugin):\n def __init__(self):\n super().__init__()\n\n # Adding defaults.\n config['kodi'].add({\n 'host': 'localhost',\n 'port': 8080,\n 'user': 'kodi',\n 'pwd': 'kodi'})\n\n config['kodi']['pwd'].redact = True\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Kodi server.\n \"\"\"\n self._log.info('Requesting a Kodi library update...')\n\n # Try to send update request.\n try:\n r = update_kodi(\n config['kodi']['host'].get(),\n config['kodi']['port'].get(),\n config['kodi']['user'].get(),\n config['kodi']['pwd'].get())\n r.raise_for_status()\n\n except requests.exceptions.RequestException as e:\n self._log.warning('Kodi update failed: {0}',\n str(e))\n return\n\n json = r.json()\n if json.get('result') != 'OK':\n self._log.warning('Kodi update failed: JSON response was {0!r}',\n json)\n return\n\n self._log.info('Kodi update triggered')\n", "path": "beetsplug/kodiupdate.py"}], "after_files": [{"content": "# This file is part of beets.\n# Copyright 2017, Pauli Kettunen.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Updates a Kodi library whenever the beets library is changed.\nThis is based on the Plex Update plugin.\n\nPut something like the following in your config.yaml to configure:\n kodi:\n host: localhost\n port: 8080\n user: user\n pwd: secret\n\"\"\"\n\nimport requests\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef update_kodi(host, port, user, password):\n \"\"\"Sends request to the Kodi api to start a library refresh.\n \"\"\"\n url = f\"http://{host}:{port}/jsonrpc\"\n\n \"\"\"Content-Type: application/json is mandatory\n according to the kodi jsonrpc documentation\"\"\"\n\n headers = {'Content-Type': 'application/json'}\n\n # Create the payload. Id seems to be mandatory.\n payload = {'jsonrpc': '2.0', 'method': 'AudioLibrary.Scan', 'id': 1}\n r = requests.post(\n url,\n auth=(user, password),\n json=payload,\n headers=headers)\n\n return r\n\n\nclass KodiUpdate(BeetsPlugin):\n def __init__(self):\n super().__init__()\n\n # Adding defaults.\n config['kodi'].add([{\n 'host': 'localhost',\n 'port': 8080,\n 'user': 'kodi',\n 'pwd': 'kodi'\n }])\n\n config['kodi']['pwd'].redact = True\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Kodi server.\n \"\"\"\n self._log.info('Requesting a Kodi library update...')\n\n kodi = config['kodi'].get()\n\n # Backwards compatibility in case not configured as an array\n if not isinstance(kodi, list):\n kodi = [kodi]\n\n for instance in kodi:\n # Try to send update request.\n try:\n r = update_kodi(\n instance['host'],\n instance['port'],\n instance['user'],\n instance['pwd']\n )\n r.raise_for_status()\n\n json = r.json()\n if json.get('result') != 'OK':\n self._log.warning(\n 'Kodi update failed: JSON response was {0!r}', json\n )\n continue\n\n self._log.info(\n 'Kodi update triggered for {0}:{1}',\n instance['host'], instance['port']\n )\n except requests.exceptions.RequestException as e:\n self._log.warning('Kodi update failed: {0}', str(e))\n continue\n", "path": "beetsplug/kodiupdate.py"}]} | 1,285 | 584 |
gh_patches_debug_65407 | rasdani/github-patches | git_diff | scikit-hep__awkward-1822 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add statistics tracker to the new website
### Which documentation?
Tutorials site
### What needs to be documented?
Stefan gave us a `data-domain` for Plausible; we should add this text
```html
<script defer data-domain="awkward-array.org" src="https://views.scientific-python.org/js/plausible.js"></script>
```
to every HTML page. The HTML is generated by Sphinx, so it has to go into a template somewhere. I think we don't actually have the default HTML templates; I think that all of the HTML in [docs-sphinx/_templates](https://github.com/scikit-hep/awkward/tree/main/docs-sphinx/_templates) is an override. For instance, the [breadcrumbs.html](https://github.com/scikit-hep/awkward/blob/main/docs-sphinx/_templates/breadcrumbs.html) is just to eliminate something that would be on the default page (the "fork me on GitHub" ribbon?).
@agoose77, you've recently added [funding.html](https://github.com/scikit-hep/awkward/blob/main/docs-sphinx/_templates/funding.html), which appears at the bottom of every page. (I just checked.) If the `<script>` is added there, I think it would reach every page.
Except redirects. The [redirect.html](https://github.com/scikit-hep/awkward/blob/main/docs-sphinx/_templates/redirect.html) describes an entire page. It's debatable whether we'd want to count redirects. It would tell us how many people are using old URLs versus new URLs, but I don't see how useful that information is, and it would have to be disaggregated from the totals, since redirected links shouldn't be counted twice if we're interested in how many people went to a given _topic_ (not _page_). I vote for no statistics on redirects, and I'm not even sure if the script will work if the redirect happens through the meta mechanism (because such an access has `<noscript>`.
Arguably, a `<script>` element ought to go in the HTML `<head>`, rather than the `<body>` (`footer.html`). That's where I usually see them. Actually, I stand corrected: they're legal anywhere, and there are reasons to put them in the `<body>`. [This StackOverflow post](https://stackoverflow.com/a/24070373/1623645) presents the pros and cons: page rendering will pause while a script is being downloaded an executed, and that's no good. We could follow that page's "antiquated recommendation" by putting the `<script>` at the end of the page (`footer.html`); the reason against it doesn't apply: we don't need the statistics-counter script to run to render the page—that can happen late. The "modern approach" is to use `async` or `defer`, which I just noticed is in our snippet, so there are no constraints on when this snippet can be placed. (And it could be `async`, rather than `defer`, because we don't care whether it runs before or after other scripts on the page.)
The only argument I can see for putting it in the `<head>`, then, is that if the statistics-counter starts too late, we could undercount our bounce rate: users click the back button before rendering gets to the `<script>` and the page view gets counted. There will always be some cut-off in the bounce rate, much like $p_T$ in a tracking distribution, since very small values are hard to measure. Having a lower implicit cut on bounce rate, rather than a higher implicit cut, doesn't sound very important to me.
Trying to get it into the `<head>` would mean overloading more templates, and I don't like to overload Sphinx templates because it means we no longer get version updates for that part of the page, and who knows if the template designer intends some relationship between two parts of a page, starting with a particular version number? So I'm in favor of adding the `<script>` to `footer.html`, and `defer` may be replaced by `async` just to loosen an unnecessary constraint for the browser.
Sorry for the long-winded write-up; just thinking through the issues while I type!
Oh, one last thing: let's add the statistics-counter to the new documentation _only_. In other words, _not_ the one with the `latest` tag. The v1 documentation (`latest`) is split between two sites, I don't want to add it to the Netlify site, and the statistics will be easier to interpret if we have it on only one site: we'll know what page is meant by a given URL. We should also see the turn-on curve when the new documentation goes public. If we include the old documentation, we might not be able to filter it out of the statistics, since some of the URLs are the same.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs-sphinx/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 import os
14 import json
15 import datetime
16 import runpy
17 import sys
18 import subprocess
19 import pathlib
20
21 # -- Project information -----------------------------------------------------
22
23 project = "Awkward Array"
24 copyright = f"{datetime.datetime.now().year}, Awkward Array development team"
25 author = "Jim Pivarski"
26
27 # -- General configuration ---------------------------------------------------
28
29 # Add any Sphinx extension module names here, as strings. They can be
30 # extensions coming with Sphinx (named "sphinx.ext.*") or your custom
31 # ones.
32 extensions = [
33 "sphinx_copybutton",
34 "sphinx_design",
35 "sphinx_external_toc",
36 "sphinx.ext.intersphinx",
37 "myst_nb",
38 # Preserve old links
39 "sphinx_reredirects",
40 "jupyterlite_sphinx",
41 ]
42
43 # Add any paths that contain templates here, relative to this directory.
44 templates_path = ["_templates"]
45
46 # List of patterns, relative to source directory, that match files and
47 # directories to ignore when looking for source files.
48 # This pattern also affects html_static_path and html_extra_path.
49 exclude_patterns = ["_build", "_templates", "Thumbs.db", "jupyter_execute", ".*"]
50
51 # -- Options for HTML output -------------------------------------------------
52
53 # The theme to use for HTML and HTML Help pages. See the documentation for
54 # a list of builtin themes.
55 #
56 html_context = {
57 "github_user": "scikit-hep",
58 "github_repo": "awkward",
59 # TODO: set this
60 "github_version": os.environ.get("READTHEDOCS_VERSION", "main"),
61 "doc_path": "docs-sphinx",
62 }
63 html_theme = "pydata_sphinx_theme"
64 html_show_sourcelink = True
65 html_theme_options = {
66 "logo": {
67 "image_light": "image/logo-300px.png",
68 "image_dark": "image/logo-300px-white.png",
69 },
70 "github_url": "https://github.com/scikit-hep/awkward",
71 # Add light/dark mode and documentation version switcher:
72 "navbar_end": ["theme-switcher", "navbar-icon-links"],
73 "footer_items": ["copyright", "sphinx-version", "funding"],
74 "icon_links": [
75 {
76 "name": "PyPI",
77 "url": "https://pypi.org/project/awkward",
78 "icon": "fab fa-python",
79 }
80 ],
81 "use_edit_page_button": True,
82 "external_links": [
83 {
84 "name": "Contributor guide",
85 "url": "https://github.com/scikit-hep/awkward/blob/main/CONTRIBUTING.md",
86 },
87 {
88 "name": "Release history",
89 "url": "https://github.com/scikit-hep/awkward/releases",
90 },
91 ],
92 }
93
94 # Add any paths that contain custom static files (such as style sheets) here,
95 # relative to this directory. They are copied after the builtin static files,
96 # so a file named "default.css" will overwrite the builtin "default.css".
97 html_static_path = ["_static"]
98 html_css_files = ["css/awkward.css"]
99
100 # MyST settings
101 myst_enable_extensions = [
102 "colon_fence",
103 ]
104
105 nb_execution_mode = "cache"
106 nb_execution_raise_on_error = True
107 # unpkg is currently _very_ slow
108 nb_ipywidgets_js = {
109 # Load RequireJS, used by the IPywidgets for dependency management
110 "https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js": {
111 "integrity": "sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=",
112 "crossorigin": "anonymous",
113 },
114 # Load IPywidgets bundle for embedding.
115 "https://cdn.jsdelivr.net/npm/@jupyter-widgets/[email protected]/dist/embed-amd.js": {
116 "data-jupyter-widgets-cdn": "https://cdn.jsdelivr.net/npm/",
117 "crossorigin": "anonymous",
118 },
119 }
120 # Additional stuff
121 master_doc = "index"
122
123 # Cross-reference existing Python objects
124 intersphinx_mapping = {
125 "python": ("https://docs.python.org/3/", None),
126 "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
127 "numpy": ("https://numpy.org/doc/stable", None),
128 "scipy": ("https://docs.scipy.org/doc/scipy", None),
129 "numba": ("https://numba.pydata.org/numba-doc/latest", None),
130 "arrow": ("https://arrow.apache.org/docs/", None),
131 "jax": ("https://jax.readthedocs.io/en/latest", None),
132 }
133
134 # Preserve legacy routes
135 with open("redirects.json") as f:
136 redirects = json.load(f)
137
138 redirect_html_template_file = "_templates/redirect.html"
139
140 # JupyterLite configuration
141 jupyterlite_dir = "./lite"
142 # Don't override ipynb format
143 jupyterlite_bind_ipynb_suffix = False
144 # We've disabled localstorage, so we must provide the contents explicitly
145 jupyterlite_contents = ["getting-started/demo/*"]
146
147 HERE = pathlib.Path(__file__).parent
148
149 # Generate C++ bindings
150 subprocess.check_call(
151 ["doxygen", str(HERE.parent / "docs-doxygen" / "Doxyfile")], cwd=HERE.parent
152 )
153
154 # Generate Python docstrings
155 runpy.run_path(HERE / "prepare_docstrings.py", run_name="__main__")
156
157 # Generate kernel docs
158 runpy.run_path(HERE.parent / "dev" / "generate-kerneldocs.py", run_name="__main__")
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs-sphinx/conf.py b/docs-sphinx/conf.py
--- a/docs-sphinx/conf.py
+++ b/docs-sphinx/conf.py
@@ -89,6 +89,10 @@
"url": "https://github.com/scikit-hep/awkward/releases",
},
],
+ "analytics": {
+ "plausible_analytics_domain": "awkward-array.org",
+ "plausible_analytics_url": "https://views.scientific-python.org/js/plausible.js"
+ }
}
# Add any paths that contain custom static files (such as style sheets) here,
| {"golden_diff": "diff --git a/docs-sphinx/conf.py b/docs-sphinx/conf.py\n--- a/docs-sphinx/conf.py\n+++ b/docs-sphinx/conf.py\n@@ -89,6 +89,10 @@\n \"url\": \"https://github.com/scikit-hep/awkward/releases\",\n },\n ],\n+ \"analytics\": {\n+ \"plausible_analytics_domain\": \"awkward-array.org\",\n+ \"plausible_analytics_url\": \"https://views.scientific-python.org/js/plausible.js\"\n+ }\n }\n \n # Add any paths that contain custom static files (such as style sheets) here,\n", "issue": "Add statistics tracker to the new website\n### Which documentation?\n\nTutorials site\n\n### What needs to be documented?\n\nStefan gave us a `data-domain` for Plausible; we should add this text\r\n\r\n```html\r\n<script defer data-domain=\"awkward-array.org\" src=\"https://views.scientific-python.org/js/plausible.js\"></script>\r\n```\r\n\r\nto every HTML page. The HTML is generated by Sphinx, so it has to go into a template somewhere. I think we don't actually have the default HTML templates; I think that all of the HTML in [docs-sphinx/_templates](https://github.com/scikit-hep/awkward/tree/main/docs-sphinx/_templates) is an override. For instance, the [breadcrumbs.html](https://github.com/scikit-hep/awkward/blob/main/docs-sphinx/_templates/breadcrumbs.html) is just to eliminate something that would be on the default page (the \"fork me on GitHub\" ribbon?).\r\n\r\n@agoose77, you've recently added [funding.html](https://github.com/scikit-hep/awkward/blob/main/docs-sphinx/_templates/funding.html), which appears at the bottom of every page. (I just checked.) If the `<script>` is added there, I think it would reach every page.\r\n\r\nExcept redirects. The [redirect.html](https://github.com/scikit-hep/awkward/blob/main/docs-sphinx/_templates/redirect.html) describes an entire page. It's debatable whether we'd want to count redirects. It would tell us how many people are using old URLs versus new URLs, but I don't see how useful that information is, and it would have to be disaggregated from the totals, since redirected links shouldn't be counted twice if we're interested in how many people went to a given _topic_ (not _page_). I vote for no statistics on redirects, and I'm not even sure if the script will work if the redirect happens through the meta mechanism (because such an access has `<noscript>`.\r\n\r\nArguably, a `<script>` element ought to go in the HTML `<head>`, rather than the `<body>` (`footer.html`). That's where I usually see them. Actually, I stand corrected: they're legal anywhere, and there are reasons to put them in the `<body>`. [This StackOverflow post](https://stackoverflow.com/a/24070373/1623645) presents the pros and cons: page rendering will pause while a script is being downloaded an executed, and that's no good. We could follow that page's \"antiquated recommendation\" by putting the `<script>` at the end of the page (`footer.html`); the reason against it doesn't apply: we don't need the statistics-counter script to run to render the page\u2014that can happen late. The \"modern approach\" is to use `async` or `defer`, which I just noticed is in our snippet, so there are no constraints on when this snippet can be placed. (And it could be `async`, rather than `defer`, because we don't care whether it runs before or after other scripts on the page.)\r\n\r\nThe only argument I can see for putting it in the `<head>`, then, is that if the statistics-counter starts too late, we could undercount our bounce rate: users click the back button before rendering gets to the `<script>` and the page view gets counted. There will always be some cut-off in the bounce rate, much like $p_T$ in a tracking distribution, since very small values are hard to measure. Having a lower implicit cut on bounce rate, rather than a higher implicit cut, doesn't sound very important to me.\r\n\r\nTrying to get it into the `<head>` would mean overloading more templates, and I don't like to overload Sphinx templates because it means we no longer get version updates for that part of the page, and who knows if the template designer intends some relationship between two parts of a page, starting with a particular version number? So I'm in favor of adding the `<script>` to `footer.html`, and `defer` may be replaced by `async` just to loosen an unnecessary constraint for the browser.\r\n\r\nSorry for the long-winded write-up; just thinking through the issues while I type!\r\n\r\nOh, one last thing: let's add the statistics-counter to the new documentation _only_. In other words, _not_ the one with the `latest` tag. The v1 documentation (`latest`) is split between two sites, I don't want to add it to the Netlify site, and the statistics will be easier to interpret if we have it on only one site: we'll know what page is meant by a given URL. We should also see the turn-on curve when the new documentation goes public. If we include the old documentation, we might not be able to filter it out of the statistics, since some of the URLs are the same.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport json\nimport datetime\nimport runpy\nimport sys\nimport subprocess\nimport pathlib\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Awkward Array\"\ncopyright = f\"{datetime.datetime.now().year}, Awkward Array development team\"\nauthor = \"Jim Pivarski\"\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named \"sphinx.ext.*\") or your custom\n# ones.\nextensions = [\n \"sphinx_copybutton\",\n \"sphinx_design\",\n \"sphinx_external_toc\",\n \"sphinx.ext.intersphinx\",\n \"myst_nb\",\n # Preserve old links\n \"sphinx_reredirects\",\n \"jupyterlite_sphinx\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"_templates\", \"Thumbs.db\", \"jupyter_execute\", \".*\"]\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_context = {\n \"github_user\": \"scikit-hep\",\n \"github_repo\": \"awkward\",\n # TODO: set this\n \"github_version\": os.environ.get(\"READTHEDOCS_VERSION\", \"main\"),\n \"doc_path\": \"docs-sphinx\",\n}\nhtml_theme = \"pydata_sphinx_theme\"\nhtml_show_sourcelink = True\nhtml_theme_options = {\n \"logo\": {\n \"image_light\": \"image/logo-300px.png\",\n \"image_dark\": \"image/logo-300px-white.png\",\n },\n \"github_url\": \"https://github.com/scikit-hep/awkward\",\n # Add light/dark mode and documentation version switcher:\n \"navbar_end\": [\"theme-switcher\", \"navbar-icon-links\"],\n \"footer_items\": [\"copyright\", \"sphinx-version\", \"funding\"],\n \"icon_links\": [\n {\n \"name\": \"PyPI\",\n \"url\": \"https://pypi.org/project/awkward\",\n \"icon\": \"fab fa-python\",\n }\n ],\n \"use_edit_page_button\": True,\n \"external_links\": [\n {\n \"name\": \"Contributor guide\",\n \"url\": \"https://github.com/scikit-hep/awkward/blob/main/CONTRIBUTING.md\",\n },\n {\n \"name\": \"Release history\",\n \"url\": \"https://github.com/scikit-hep/awkward/releases\",\n },\n ],\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"css/awkward.css\"]\n\n# MyST settings\nmyst_enable_extensions = [\n \"colon_fence\",\n]\n\nnb_execution_mode = \"cache\"\nnb_execution_raise_on_error = True\n# unpkg is currently _very_ slow\nnb_ipywidgets_js = {\n # Load RequireJS, used by the IPywidgets for dependency management\n \"https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js\": {\n \"integrity\": \"sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=\",\n \"crossorigin\": \"anonymous\",\n },\n # Load IPywidgets bundle for embedding.\n \"https://cdn.jsdelivr.net/npm/@jupyter-widgets/[email protected]/dist/embed-amd.js\": {\n \"data-jupyter-widgets-cdn\": \"https://cdn.jsdelivr.net/npm/\",\n \"crossorigin\": \"anonymous\",\n },\n}\n# Additional stuff\nmaster_doc = \"index\"\n\n# Cross-reference existing Python objects\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable\", None),\n \"numpy\": (\"https://numpy.org/doc/stable\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy\", None),\n \"numba\": (\"https://numba.pydata.org/numba-doc/latest\", None),\n \"arrow\": (\"https://arrow.apache.org/docs/\", None),\n \"jax\": (\"https://jax.readthedocs.io/en/latest\", None),\n}\n\n# Preserve legacy routes\nwith open(\"redirects.json\") as f:\n redirects = json.load(f)\n\nredirect_html_template_file = \"_templates/redirect.html\"\n\n# JupyterLite configuration\njupyterlite_dir = \"./lite\"\n# Don't override ipynb format\njupyterlite_bind_ipynb_suffix = False\n# We've disabled localstorage, so we must provide the contents explicitly\njupyterlite_contents = [\"getting-started/demo/*\"]\n\nHERE = pathlib.Path(__file__).parent\n\n# Generate C++ bindings\nsubprocess.check_call(\n [\"doxygen\", str(HERE.parent / \"docs-doxygen\" / \"Doxyfile\")], cwd=HERE.parent\n)\n\n# Generate Python docstrings\nrunpy.run_path(HERE / \"prepare_docstrings.py\", run_name=\"__main__\")\n\n# Generate kernel docs\nrunpy.run_path(HERE.parent / \"dev\" / \"generate-kerneldocs.py\", run_name=\"__main__\")\n", "path": "docs-sphinx/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport json\nimport datetime\nimport runpy\nimport sys\nimport subprocess\nimport pathlib\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Awkward Array\"\ncopyright = f\"{datetime.datetime.now().year}, Awkward Array development team\"\nauthor = \"Jim Pivarski\"\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named \"sphinx.ext.*\") or your custom\n# ones.\nextensions = [\n \"sphinx_copybutton\",\n \"sphinx_design\",\n \"sphinx_external_toc\",\n \"sphinx.ext.intersphinx\",\n \"myst_nb\",\n # Preserve old links\n \"sphinx_reredirects\",\n \"jupyterlite_sphinx\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"_templates\", \"Thumbs.db\", \"jupyter_execute\", \".*\"]\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_context = {\n \"github_user\": \"scikit-hep\",\n \"github_repo\": \"awkward\",\n # TODO: set this\n \"github_version\": os.environ.get(\"READTHEDOCS_VERSION\", \"main\"),\n \"doc_path\": \"docs-sphinx\",\n}\nhtml_theme = \"pydata_sphinx_theme\"\nhtml_show_sourcelink = True\nhtml_theme_options = {\n \"logo\": {\n \"image_light\": \"image/logo-300px.png\",\n \"image_dark\": \"image/logo-300px-white.png\",\n },\n \"github_url\": \"https://github.com/scikit-hep/awkward\",\n # Add light/dark mode and documentation version switcher:\n \"navbar_end\": [\"theme-switcher\", \"navbar-icon-links\"],\n \"footer_items\": [\"copyright\", \"sphinx-version\", \"funding\"],\n \"icon_links\": [\n {\n \"name\": \"PyPI\",\n \"url\": \"https://pypi.org/project/awkward\",\n \"icon\": \"fab fa-python\",\n }\n ],\n \"use_edit_page_button\": True,\n \"external_links\": [\n {\n \"name\": \"Contributor guide\",\n \"url\": \"https://github.com/scikit-hep/awkward/blob/main/CONTRIBUTING.md\",\n },\n {\n \"name\": \"Release history\",\n \"url\": \"https://github.com/scikit-hep/awkward/releases\",\n },\n ],\n \"analytics\": {\n \"plausible_analytics_domain\": \"awkward-array.org\",\n \"plausible_analytics_url\": \"https://views.scientific-python.org/js/plausible.js\"\n }\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"css/awkward.css\"]\n\n# MyST settings\nmyst_enable_extensions = [\n \"colon_fence\",\n]\n\nnb_execution_mode = \"cache\"\nnb_execution_raise_on_error = True\n# unpkg is currently _very_ slow\nnb_ipywidgets_js = {\n # Load RequireJS, used by the IPywidgets for dependency management\n \"https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js\": {\n \"integrity\": \"sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=\",\n \"crossorigin\": \"anonymous\",\n },\n # Load IPywidgets bundle for embedding.\n \"https://cdn.jsdelivr.net/npm/@jupyter-widgets/[email protected]/dist/embed-amd.js\": {\n \"data-jupyter-widgets-cdn\": \"https://cdn.jsdelivr.net/npm/\",\n \"crossorigin\": \"anonymous\",\n },\n}\n# Additional stuff\nmaster_doc = \"index\"\n\n# Cross-reference existing Python objects\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable\", None),\n \"numpy\": (\"https://numpy.org/doc/stable\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy\", None),\n \"numba\": (\"https://numba.pydata.org/numba-doc/latest\", None),\n \"arrow\": (\"https://arrow.apache.org/docs/\", None),\n \"jax\": (\"https://jax.readthedocs.io/en/latest\", None),\n}\n\n# Preserve legacy routes\nwith open(\"redirects.json\") as f:\n redirects = json.load(f)\n\nredirect_html_template_file = \"_templates/redirect.html\"\n\n# JupyterLite configuration\njupyterlite_dir = \"./lite\"\n# Don't override ipynb format\njupyterlite_bind_ipynb_suffix = False\n# We've disabled localstorage, so we must provide the contents explicitly\njupyterlite_contents = [\"getting-started/demo/*\"]\n\nHERE = pathlib.Path(__file__).parent\n\n# Generate C++ bindings\nsubprocess.check_call(\n [\"doxygen\", str(HERE.parent / \"docs-doxygen\" / \"Doxyfile\")], cwd=HERE.parent\n)\n\n# Generate Python docstrings\nrunpy.run_path(HERE / \"prepare_docstrings.py\")\n\n# Generate kernel docs\nrunpy.run_path(HERE.parent / \"dev\" / \"generate-kerneldocs.py\")\n", "path": "docs-sphinx/conf.py"}]} | 3,001 | 133 |
gh_patches_debug_65 | rasdani/github-patches | git_diff | optuna__optuna-3342 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mention each tutorial page in API pages at least once
<!-- Please write a clear and concise description of what content in https://optuna.readthedocs.io/ is an issue. -->
[Optuna's tutorial](https://optuna.readthedocs.io/en/stable/tutorial/index.html) is a good source to understand Optuna's functionality with concrete examples. However, some tutorial pages might not be mentioned in corresponding Optuna's API pages.
## Description
Please add a `note section` or `see also section` to mention the following tutorial pages.
- ~[ ] [Lightweight, versatile, and platform agnostic architecture](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/001_first.html)~
- ~this is is a little bit conceptual page, so it might not be necessary to be linked.~
- [x] [Pythonic Search Space](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html)
- [x] [Efficient Optimization Algorithms](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/003_efficient_optimization_algorithms.html)
- [x] [Easy Parallelization](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/004_distributed.html)
- [x] [Quick Visualization for Hyperparameter Optimization Analysis](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/005_visualization.html)
- linked from https://optuna.readthedocs.io/en/stable/reference/visualization/index.html
- [x] [Saving/Resuming Study with RDB Backend](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/001_rdb.html)
- linked from https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html#optuna.study.create_study
- [x] [Multi-objective Optimization with Optuna] (https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/002_multi_objective.html) https://github.com/optuna/optuna/pull/3339
- [x] [User Attributes](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/003_attributes.html)
- [x] [Command-Line Interface](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/004_cli.html)
- linked from https://optuna.readthedocs.io/en/stable/reference/index.html
- [x] [User-Defined Sampler](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/005_user_defined_sampler.html)
- linked from https://optuna.readthedocs.io/en/stable/reference/samplers.html
- [x] [User-Defined Pruner](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/006_user_defined_pruner.html)
- linked from https://optuna.readthedocs.io/en/stable/reference/pruners.html
- [x] [Callback for Study.optimize](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/007_optuna_callback.html)
- linked from `callbacks`'s section in https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize
- [ ] [Specify Hyperparameters Manually](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/008_specify_params.html)
- [x] [the first section](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize) can be linked from `Study.enqueue_trial`
- [ ] [the second section](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/008_specify_params.html#second-scenario-have-optuna-utilize-already-evaluated-hyperparameters) can be linked from `Study.add_trial` (see https://github.com/optuna/optuna/pull/3346)
- [x] [Ask-and-Tell Interface](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/009_ask_and_tell.html)
- [x] [Re-use the best values](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/010_reuse_best_trial.html) https://github.com/optuna/optuna/pull/3396
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tutorial/20_recipes/005_user_defined_sampler.py`
Content:
```
1 """
2 .. _sampler:
3
4 User-Defined Sampler
5 ====================
6
7 Thanks to user-defined samplers, you can:
8
9 - experiment your own sampling algorithms,
10 - implement task-specific algorithms to refine the optimization performance, or
11 - wrap other optimization libraries to integrate them into Optuna pipelines (e.g., :class:`~optuna.integration.SkoptSampler`).
12
13 This section describes the internal behavior of sampler classes and shows an example of implementing a user-defined sampler.
14
15
16 Overview of Sampler
17 -------------------
18
19 A sampler has the responsibility to determine the parameter values to be evaluated in a trial.
20 When a `suggest` API (e.g., :func:`~optuna.trial.Trial.suggest_float`) is called inside an objective function, the corresponding distribution object (e.g., :class:`~optuna.distributions.UniformDistribution`) is created internally. A sampler samples a parameter value from the distribution. The sampled value is returned to the caller of the `suggest` API and evaluated in the objective function.
21
22 To create a new sampler, you need to define a class that inherits :class:`~optuna.samplers.BaseSampler`.
23 The base class has three abstract methods;
24 :meth:`~optuna.samplers.BaseSampler.infer_relative_search_space`,
25 :meth:`~optuna.samplers.BaseSampler.sample_relative`, and
26 :meth:`~optuna.samplers.BaseSampler.sample_independent`.
27
28 As the method names imply, Optuna supports two types of sampling: one is **relative sampling** that can consider the correlation of the parameters in a trial, and the other is **independent sampling** that samples each parameter independently.
29
30 At the beginning of a trial, :meth:`~optuna.samplers.BaseSampler.infer_relative_search_space` is called to provide the relative search space for the trial. Then, :meth:`~optuna.samplers.BaseSampler.sample_relative` is invoked to sample relative parameters from the search space. During the execution of the objective function, :meth:`~optuna.samplers.BaseSampler.sample_independent` is used to sample parameters that don't belong to the relative search space.
31
32 .. note::
33 Please refer to the document of :class:`~optuna.samplers.BaseSampler` for further details.
34
35
36 An Example: Implementing SimulatedAnnealingSampler
37 --------------------------------------------------
38
39 For example, the following code defines a sampler based on
40 `Simulated Annealing (SA) <https://en.wikipedia.org/wiki/Simulated_annealing>`_:
41 """
42
43 import numpy as np
44 import optuna
45
46
47 class SimulatedAnnealingSampler(optuna.samplers.BaseSampler):
48 def __init__(self, temperature=100):
49 self._rng = np.random.RandomState()
50 self._temperature = temperature # Current temperature.
51 self._current_trial = None # Current state.
52
53 def sample_relative(self, study, trial, search_space):
54 if search_space == {}:
55 return {}
56
57 # Simulated Annealing algorithm.
58 # 1. Calculate transition probability.
59 prev_trial = study.trials[-2]
60 if self._current_trial is None or prev_trial.value <= self._current_trial.value:
61 probability = 1.0
62 else:
63 probability = np.exp(
64 (self._current_trial.value - prev_trial.value) / self._temperature
65 )
66 self._temperature *= 0.9 # Decrease temperature.
67
68 # 2. Transit the current state if the previous result is accepted.
69 if self._rng.uniform(0, 1) < probability:
70 self._current_trial = prev_trial
71
72 # 3. Sample parameters from the neighborhood of the current point.
73 # The sampled parameters will be used during the next execution of
74 # the objective function passed to the study.
75 params = {}
76 for param_name, param_distribution in search_space.items():
77 if not isinstance(param_distribution, optuna.distributions.UniformDistribution):
78 raise NotImplementedError("Only suggest_float() is supported")
79
80 current_value = self._current_trial.params[param_name]
81 width = (param_distribution.high - param_distribution.low) * 0.1
82 neighbor_low = max(current_value - width, param_distribution.low)
83 neighbor_high = min(current_value + width, param_distribution.high)
84 params[param_name] = self._rng.uniform(neighbor_low, neighbor_high)
85
86 return params
87
88 # The rest are unrelated to SA algorithm: boilerplate
89 def infer_relative_search_space(self, study, trial):
90 return optuna.samplers.intersection_search_space(study)
91
92 def sample_independent(self, study, trial, param_name, param_distribution):
93 independent_sampler = optuna.samplers.RandomSampler()
94 return independent_sampler.sample_independent(study, trial, param_name, param_distribution)
95
96
97 ###################################################################################################
98 # .. note::
99 # In favor of code simplicity, the above implementation doesn't support some features (e.g., maximization).
100 # If you're interested in how to support those features, please see
101 # `examples/samplers/simulated_annealing.py
102 # <https://github.com/optuna/optuna-examples/blob/main/samplers/simulated_annealing_sampler.py>`_.
103 #
104 #
105 # You can use ``SimulatedAnnealingSampler`` in the same way as built-in samplers as follows:
106
107
108 def objective(trial):
109 x = trial.suggest_float("x", -10, 10)
110 y = trial.suggest_float("y", -5, 5)
111 return x**2 + y
112
113
114 sampler = SimulatedAnnealingSampler()
115 study = optuna.create_study(sampler=sampler)
116 study.optimize(objective, n_trials=100)
117
118 best_trial = study.best_trial
119 print("Best value: ", best_trial.value)
120 print("Parameters that achieve the best value: ", best_trial.params)
121
122
123 ###################################################################################################
124 # In this optimization, the values of ``x`` and ``y`` parameters are sampled by using
125 # ``SimulatedAnnealingSampler.sample_relative`` method.
126 #
127 # .. note::
128 # Strictly speaking, in the first trial,
129 # ``SimulatedAnnealingSampler.sample_independent`` method is used to sample parameter values.
130 # Because :func:`~optuna.samplers.intersection_search_space` used in
131 # ``SimulatedAnnealingSampler.infer_relative_search_space`` cannot infer the search space
132 # if there are no complete trials.
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tutorial/20_recipes/005_user_defined_sampler.py b/tutorial/20_recipes/005_user_defined_sampler.py
--- a/tutorial/20_recipes/005_user_defined_sampler.py
+++ b/tutorial/20_recipes/005_user_defined_sampler.py
@@ -1,5 +1,5 @@
"""
-.. _sampler:
+.. _user_defined_sampler:
User-Defined Sampler
====================
| {"golden_diff": "diff --git a/tutorial/20_recipes/005_user_defined_sampler.py b/tutorial/20_recipes/005_user_defined_sampler.py\n--- a/tutorial/20_recipes/005_user_defined_sampler.py\n+++ b/tutorial/20_recipes/005_user_defined_sampler.py\n@@ -1,5 +1,5 @@\n \"\"\"\n-.. _sampler:\n+.. _user_defined_sampler:\n \n User-Defined Sampler\n ====================\n", "issue": "Mention each tutorial page in API pages at least once\n<!-- Please write a clear and concise description of what content in https://optuna.readthedocs.io/ is an issue. -->\r\n\r\n[Optuna's tutorial](https://optuna.readthedocs.io/en/stable/tutorial/index.html) is a good source to understand Optuna's functionality with concrete examples. However, some tutorial pages might not be mentioned in corresponding Optuna's API pages. \r\n\r\n\r\n## Description\r\n\r\nPlease add a `note section` or `see also section` to mention the following tutorial pages.\r\n\r\n- ~[ ] [Lightweight, versatile, and platform agnostic architecture](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/001_first.html)~\r\n - ~this is is a little bit conceptual page, so it might not be necessary to be linked.~ \r\n- [x] [Pythonic Search Space](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html)\r\n- [x] [Efficient Optimization Algorithms](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/003_efficient_optimization_algorithms.html)\r\n- [x] [Easy Parallelization](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/004_distributed.html)\r\n- [x] [Quick Visualization for Hyperparameter Optimization Analysis](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/005_visualization.html) \r\n - linked from https://optuna.readthedocs.io/en/stable/reference/visualization/index.html\r\n- [x] [Saving/Resuming Study with RDB Backend](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/001_rdb.html)\r\n - linked from https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html#optuna.study.create_study\r\n- [x] [Multi-objective Optimization with Optuna] (https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/002_multi_objective.html) https://github.com/optuna/optuna/pull/3339\r\n- [x] [User Attributes](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/003_attributes.html)\r\n- [x] [Command-Line Interface](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/004_cli.html)\r\n - linked from https://optuna.readthedocs.io/en/stable/reference/index.html\r\n- [x] [User-Defined Sampler](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/005_user_defined_sampler.html)\r\n - linked from https://optuna.readthedocs.io/en/stable/reference/samplers.html\r\n- [x] [User-Defined Pruner](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/006_user_defined_pruner.html)\r\n - linked from https://optuna.readthedocs.io/en/stable/reference/pruners.html\r\n- [x] [Callback for Study.optimize](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/007_optuna_callback.html)\r\n - linked from `callbacks`'s section in https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize\r\n- [ ] [Specify Hyperparameters Manually](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/008_specify_params.html)\r\n - [x] [the first section](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize) can be linked from `Study.enqueue_trial` \r\n - [ ] [the second section](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/008_specify_params.html#second-scenario-have-optuna-utilize-already-evaluated-hyperparameters) can be linked from `Study.add_trial`\u3000(see https://github.com/optuna/optuna/pull/3346)\r\n- [x] [Ask-and-Tell Interface](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/009_ask_and_tell.html)\r\n- [x] [Re-use the best values](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/010_reuse_best_trial.html) https://github.com/optuna/optuna/pull/3396\n", "before_files": [{"content": "\"\"\"\n.. _sampler:\n\nUser-Defined Sampler\n====================\n\nThanks to user-defined samplers, you can:\n\n- experiment your own sampling algorithms,\n- implement task-specific algorithms to refine the optimization performance, or\n- wrap other optimization libraries to integrate them into Optuna pipelines (e.g., :class:`~optuna.integration.SkoptSampler`).\n\nThis section describes the internal behavior of sampler classes and shows an example of implementing a user-defined sampler.\n\n\nOverview of Sampler\n-------------------\n\nA sampler has the responsibility to determine the parameter values to be evaluated in a trial.\nWhen a `suggest` API (e.g., :func:`~optuna.trial.Trial.suggest_float`) is called inside an objective function, the corresponding distribution object (e.g., :class:`~optuna.distributions.UniformDistribution`) is created internally. A sampler samples a parameter value from the distribution. The sampled value is returned to the caller of the `suggest` API and evaluated in the objective function.\n\nTo create a new sampler, you need to define a class that inherits :class:`~optuna.samplers.BaseSampler`.\nThe base class has three abstract methods;\n:meth:`~optuna.samplers.BaseSampler.infer_relative_search_space`,\n:meth:`~optuna.samplers.BaseSampler.sample_relative`, and\n:meth:`~optuna.samplers.BaseSampler.sample_independent`.\n\nAs the method names imply, Optuna supports two types of sampling: one is **relative sampling** that can consider the correlation of the parameters in a trial, and the other is **independent sampling** that samples each parameter independently.\n\nAt the beginning of a trial, :meth:`~optuna.samplers.BaseSampler.infer_relative_search_space` is called to provide the relative search space for the trial. Then, :meth:`~optuna.samplers.BaseSampler.sample_relative` is invoked to sample relative parameters from the search space. During the execution of the objective function, :meth:`~optuna.samplers.BaseSampler.sample_independent` is used to sample parameters that don't belong to the relative search space.\n\n.. note::\n Please refer to the document of :class:`~optuna.samplers.BaseSampler` for further details.\n\n\nAn Example: Implementing SimulatedAnnealingSampler\n--------------------------------------------------\n\nFor example, the following code defines a sampler based on\n`Simulated Annealing (SA) <https://en.wikipedia.org/wiki/Simulated_annealing>`_:\n\"\"\"\n\nimport numpy as np\nimport optuna\n\n\nclass SimulatedAnnealingSampler(optuna.samplers.BaseSampler):\n def __init__(self, temperature=100):\n self._rng = np.random.RandomState()\n self._temperature = temperature # Current temperature.\n self._current_trial = None # Current state.\n\n def sample_relative(self, study, trial, search_space):\n if search_space == {}:\n return {}\n\n # Simulated Annealing algorithm.\n # 1. Calculate transition probability.\n prev_trial = study.trials[-2]\n if self._current_trial is None or prev_trial.value <= self._current_trial.value:\n probability = 1.0\n else:\n probability = np.exp(\n (self._current_trial.value - prev_trial.value) / self._temperature\n )\n self._temperature *= 0.9 # Decrease temperature.\n\n # 2. Transit the current state if the previous result is accepted.\n if self._rng.uniform(0, 1) < probability:\n self._current_trial = prev_trial\n\n # 3. Sample parameters from the neighborhood of the current point.\n # The sampled parameters will be used during the next execution of\n # the objective function passed to the study.\n params = {}\n for param_name, param_distribution in search_space.items():\n if not isinstance(param_distribution, optuna.distributions.UniformDistribution):\n raise NotImplementedError(\"Only suggest_float() is supported\")\n\n current_value = self._current_trial.params[param_name]\n width = (param_distribution.high - param_distribution.low) * 0.1\n neighbor_low = max(current_value - width, param_distribution.low)\n neighbor_high = min(current_value + width, param_distribution.high)\n params[param_name] = self._rng.uniform(neighbor_low, neighbor_high)\n\n return params\n\n # The rest are unrelated to SA algorithm: boilerplate\n def infer_relative_search_space(self, study, trial):\n return optuna.samplers.intersection_search_space(study)\n\n def sample_independent(self, study, trial, param_name, param_distribution):\n independent_sampler = optuna.samplers.RandomSampler()\n return independent_sampler.sample_independent(study, trial, param_name, param_distribution)\n\n\n###################################################################################################\n# .. note::\n# In favor of code simplicity, the above implementation doesn't support some features (e.g., maximization).\n# If you're interested in how to support those features, please see\n# `examples/samplers/simulated_annealing.py\n# <https://github.com/optuna/optuna-examples/blob/main/samplers/simulated_annealing_sampler.py>`_.\n#\n#\n# You can use ``SimulatedAnnealingSampler`` in the same way as built-in samplers as follows:\n\n\ndef objective(trial):\n x = trial.suggest_float(\"x\", -10, 10)\n y = trial.suggest_float(\"y\", -5, 5)\n return x**2 + y\n\n\nsampler = SimulatedAnnealingSampler()\nstudy = optuna.create_study(sampler=sampler)\nstudy.optimize(objective, n_trials=100)\n\nbest_trial = study.best_trial\nprint(\"Best value: \", best_trial.value)\nprint(\"Parameters that achieve the best value: \", best_trial.params)\n\n\n###################################################################################################\n# In this optimization, the values of ``x`` and ``y`` parameters are sampled by using\n# ``SimulatedAnnealingSampler.sample_relative`` method.\n#\n# .. note::\n# Strictly speaking, in the first trial,\n# ``SimulatedAnnealingSampler.sample_independent`` method is used to sample parameter values.\n# Because :func:`~optuna.samplers.intersection_search_space` used in\n# ``SimulatedAnnealingSampler.infer_relative_search_space`` cannot infer the search space\n# if there are no complete trials.\n", "path": "tutorial/20_recipes/005_user_defined_sampler.py"}], "after_files": [{"content": "\"\"\"\n.. _user_defined_sampler:\n\nUser-Defined Sampler\n====================\n\nThanks to user-defined samplers, you can:\n\n- experiment your own sampling algorithms,\n- implement task-specific algorithms to refine the optimization performance, or\n- wrap other optimization libraries to integrate them into Optuna pipelines (e.g., :class:`~optuna.integration.SkoptSampler`).\n\nThis section describes the internal behavior of sampler classes and shows an example of implementing a user-defined sampler.\n\n\nOverview of Sampler\n-------------------\n\nA sampler has the responsibility to determine the parameter values to be evaluated in a trial.\nWhen a `suggest` API (e.g., :func:`~optuna.trial.Trial.suggest_float`) is called inside an objective function, the corresponding distribution object (e.g., :class:`~optuna.distributions.UniformDistribution`) is created internally. A sampler samples a parameter value from the distribution. The sampled value is returned to the caller of the `suggest` API and evaluated in the objective function.\n\nTo create a new sampler, you need to define a class that inherits :class:`~optuna.samplers.BaseSampler`.\nThe base class has three abstract methods;\n:meth:`~optuna.samplers.BaseSampler.infer_relative_search_space`,\n:meth:`~optuna.samplers.BaseSampler.sample_relative`, and\n:meth:`~optuna.samplers.BaseSampler.sample_independent`.\n\nAs the method names imply, Optuna supports two types of sampling: one is **relative sampling** that can consider the correlation of the parameters in a trial, and the other is **independent sampling** that samples each parameter independently.\n\nAt the beginning of a trial, :meth:`~optuna.samplers.BaseSampler.infer_relative_search_space` is called to provide the relative search space for the trial. Then, :meth:`~optuna.samplers.BaseSampler.sample_relative` is invoked to sample relative parameters from the search space. During the execution of the objective function, :meth:`~optuna.samplers.BaseSampler.sample_independent` is used to sample parameters that don't belong to the relative search space.\n\n.. note::\n Please refer to the document of :class:`~optuna.samplers.BaseSampler` for further details.\n\n\nAn Example: Implementing SimulatedAnnealingSampler\n--------------------------------------------------\n\nFor example, the following code defines a sampler based on\n`Simulated Annealing (SA) <https://en.wikipedia.org/wiki/Simulated_annealing>`_:\n\"\"\"\n\nimport numpy as np\nimport optuna\n\n\nclass SimulatedAnnealingSampler(optuna.samplers.BaseSampler):\n def __init__(self, temperature=100):\n self._rng = np.random.RandomState()\n self._temperature = temperature # Current temperature.\n self._current_trial = None # Current state.\n\n def sample_relative(self, study, trial, search_space):\n if search_space == {}:\n return {}\n\n # Simulated Annealing algorithm.\n # 1. Calculate transition probability.\n prev_trial = study.trials[-2]\n if self._current_trial is None or prev_trial.value <= self._current_trial.value:\n probability = 1.0\n else:\n probability = np.exp(\n (self._current_trial.value - prev_trial.value) / self._temperature\n )\n self._temperature *= 0.9 # Decrease temperature.\n\n # 2. Transit the current state if the previous result is accepted.\n if self._rng.uniform(0, 1) < probability:\n self._current_trial = prev_trial\n\n # 3. Sample parameters from the neighborhood of the current point.\n # The sampled parameters will be used during the next execution of\n # the objective function passed to the study.\n params = {}\n for param_name, param_distribution in search_space.items():\n if not isinstance(param_distribution, optuna.distributions.UniformDistribution):\n raise NotImplementedError(\"Only suggest_float() is supported\")\n\n current_value = self._current_trial.params[param_name]\n width = (param_distribution.high - param_distribution.low) * 0.1\n neighbor_low = max(current_value - width, param_distribution.low)\n neighbor_high = min(current_value + width, param_distribution.high)\n params[param_name] = self._rng.uniform(neighbor_low, neighbor_high)\n\n return params\n\n # The rest are unrelated to SA algorithm: boilerplate\n def infer_relative_search_space(self, study, trial):\n return optuna.samplers.intersection_search_space(study)\n\n def sample_independent(self, study, trial, param_name, param_distribution):\n independent_sampler = optuna.samplers.RandomSampler()\n return independent_sampler.sample_independent(study, trial, param_name, param_distribution)\n\n\n###################################################################################################\n# .. note::\n# In favor of code simplicity, the above implementation doesn't support some features (e.g., maximization).\n# If you're interested in how to support those features, please see\n# `examples/samplers/simulated_annealing.py\n# <https://github.com/optuna/optuna-examples/blob/main/samplers/simulated_annealing_sampler.py>`_.\n#\n#\n# You can use ``SimulatedAnnealingSampler`` in the same way as built-in samplers as follows:\n\n\ndef objective(trial):\n x = trial.suggest_float(\"x\", -10, 10)\n y = trial.suggest_float(\"y\", -5, 5)\n return x**2 + y\n\n\nsampler = SimulatedAnnealingSampler()\nstudy = optuna.create_study(sampler=sampler)\nstudy.optimize(objective, n_trials=100)\n\nbest_trial = study.best_trial\nprint(\"Best value: \", best_trial.value)\nprint(\"Parameters that achieve the best value: \", best_trial.params)\n\n\n###################################################################################################\n# In this optimization, the values of ``x`` and ``y`` parameters are sampled by using\n# ``SimulatedAnnealingSampler.sample_relative`` method.\n#\n# .. note::\n# Strictly speaking, in the first trial,\n# ``SimulatedAnnealingSampler.sample_independent`` method is used to sample parameter values.\n# Because :func:`~optuna.samplers.intersection_search_space` used in\n# ``SimulatedAnnealingSampler.infer_relative_search_space`` cannot infer the search space\n# if there are no complete trials.\n", "path": "tutorial/20_recipes/005_user_defined_sampler.py"}]} | 2,898 | 100 |
gh_patches_debug_61631 | rasdani/github-patches | git_diff | pytorch__ignite-773 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
progress bar output seems to be off
Soi I am using the Progressbar from the contrib module for indicating progress in the training loop. Currently the batch size is 1 for an exemplary dataset of 16 samples.
But for some reason the progressbar seems to skip the first step and I am not sure if this is due to an issue related to my dataloading or simply a displaying problem.
iter counter 0
Epoch [1/500]: [0/8] 0%| iter counter 1
Epoch [1/500]: [2/8] 25%| iter counter 2
Epoch [1/500]: [3/8] 38%| iter counter 3
Epoch [1/500]: [4/8] 50%| iter counter 4
Epoch [1/500]: [5/8] 62%| iter counter 5
Epoch [1/500]: [6/8] 75%|
iter counter 6
Epoch [1/500]: [7/8] 88%|
iter counter 7
Epoch [1/500]: [8/8] 100%|
is this a known problem or am I missing sth here? the iter counter is taken from the engine _run_once_on_dataset method
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/contrib/handlers/tqdm_logger.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import warnings
3
4 import torch
5
6 from ignite.engine import Events
7 from ignite.engine.engine import EventWithFilter
8 from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler
9
10
11 class ProgressBar(BaseLogger):
12 """
13 TQDM progress bar handler to log training progress and computed metrics.
14
15 Args:
16 persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)
17 bar_format (str, optional): Specify a custom bar string formatting. May impact performance.
18 [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].
19 Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where
20 l_bar='{desc}: {percentage:3.0f}%|' and
21 r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the
22 formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.
23 **tqdm_kwargs: kwargs passed to tqdm progress bar.
24 By default, progress bar description displays "Epoch [5/10]" where 5 is the current epoch and 10 is the
25 number of epochs. If tqdm_kwargs defines `desc`, e.g. "Predictions", than the description is
26 "Predictions [5/10]" if number of epochs is more than one otherwise it is simply "Predictions".
27
28 Examples:
29
30 Simple progress bar
31
32 .. code-block:: python
33
34 trainer = create_supervised_trainer(model, optimizer, loss)
35
36 pbar = ProgressBar()
37 pbar.attach(trainer)
38
39 # Progress bar will looks like
40 # Epoch [2/50]: [64/128] 50%|█████ [06:17<12:34]
41
42 Log output to a file instead of stderr (tqdm's default output)
43
44 .. code-block:: python
45
46 trainer = create_supervised_trainer(model, optimizer, loss)
47
48 log_file = open("output.log", "w")
49 pbar = ProgressBar(file=log_file)
50 pbar.attach(trainer)
51
52 Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`
53 (such as :class:`~ignite.metrics.RunningAverage`)
54
55 .. code-block:: python
56
57 trainer = create_supervised_trainer(model, optimizer, loss)
58
59 RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')
60
61 pbar = ProgressBar()
62 pbar.attach(trainer, ['loss'])
63
64 # Progress bar will looks like
65 # Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34]
66
67 Directly attach the engine's output
68
69 .. code-block:: python
70
71 trainer = create_supervised_trainer(model, optimizer, loss)
72
73 pbar = ProgressBar()
74 pbar.attach(trainer, output_transform=lambda x: {'loss': x})
75
76 # Progress bar will looks like
77 # Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34]
78
79 Note:
80 When adding attaching the progress bar to an engine, it is recommend that you replace
81 every print operation in the engine's handlers triggered every iteration with
82 ``pbar.log_message`` to guarantee the correct format of the stdout.
83
84 Note:
85 When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,
86 please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.
87 Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set
88 to an empty string value.
89
90 """
91
92 _events_order = [
93 Events.STARTED,
94 Events.EPOCH_STARTED,
95 Events.ITERATION_STARTED,
96 Events.ITERATION_COMPLETED,
97 Events.EPOCH_COMPLETED,
98 Events.COMPLETED
99 ]
100
101 def __init__(self, persist=False,
102 bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',
103 **tqdm_kwargs):
104
105 try:
106 from tqdm.autonotebook import tqdm
107 except ImportError:
108 raise RuntimeError("This contrib module requires tqdm to be installed. "
109 "Please install it with command: \n pip install tqdm")
110
111 self.pbar_cls = tqdm
112 self.pbar = None
113 self.persist = persist
114 self.bar_format = bar_format
115 self.tqdm_kwargs = tqdm_kwargs
116
117 def _reset(self, pbar_total):
118 self.pbar = self.pbar_cls(
119 total=pbar_total,
120 leave=self.persist,
121 bar_format=self.bar_format,
122 **self.tqdm_kwargs
123 )
124
125 def _close(self, engine):
126 if self.pbar:
127 self.pbar.close()
128 self.pbar = None
129
130 @staticmethod
131 def _compare_lt(event1, event2):
132 if isinstance(event1, EventWithFilter):
133 event1 = event1.event
134 if isinstance(event2, EventWithFilter):
135 event2 = event2.event
136 i1 = ProgressBar._events_order.index(event1)
137 i2 = ProgressBar._events_order.index(event2)
138 return i1 < i2
139
140 def log_message(self, message):
141 """
142 Logs a message, preserving the progress bar correct output format.
143
144 Args:
145 message (str): string you wish to log.
146 """
147 from tqdm import tqdm
148 tqdm.write(message, **self.tqdm_kwargs)
149
150 def attach(self, engine, metric_names=None, output_transform=None,
151 event_name=Events.ITERATION_COMPLETED,
152 closing_event_name=Events.EPOCH_COMPLETED):
153 """
154 Attaches the progress bar to an engine object.
155
156 Args:
157 engine (Engine): engine object.
158 metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
159 metrics.
160 output_transform (callable, optional): a function to select what you want to print from the engine's
161 output. This function may return either a dictionary with entries in the format of ``{name: value}``,
162 or a single scalar, which will be displayed with the default name `output`.
163 event_name: event's name on which the progress bar advances. Valid events are from
164 :class:`~ignite.engine.Events`.
165 closing_event_name: event's name on which the progress bar is closed. Valid events are from
166 :class:`~ignite.engine.Events`.
167
168 Note: accepted output value types are numbers, 0d and 1d torch tensors and strings
169
170 """
171 desc = self.tqdm_kwargs.get("desc", "Epoch")
172
173 if not isinstance(event_name, (Events, EventWithFilter)):
174 raise ValueError("Logging event should be only `ignite.engine.Events`")
175
176 if isinstance(closing_event_name, EventWithFilter):
177 raise ValueError("Closing event should not use any event filter")
178
179 if not self._compare_lt(event_name, closing_event_name):
180 raise ValueError("Logging event {} should be called before closing event {}"
181 .format(event_name, closing_event_name))
182
183 log_handler = _OutputHandler(desc, metric_names, output_transform,
184 closing_event_name=closing_event_name)
185 # if event_name is EventWithFilter, filter is passed here
186 super(ProgressBar, self).attach(engine, log_handler, event_name)
187 engine.add_event_handler(closing_event_name, self._close)
188
189
190 class _OutputHandler(BaseOutputHandler):
191 """Helper handler to log engine's output and/or metrics
192
193 Args:
194 description (str): progress bar description.
195 metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
196 metrics.
197 output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
198 For example, `output_transform = lambda output: output`
199 This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot
200 with corresponding keys.
201 closing_event_name: event's name on which the progress bar is closed. Valid events are from
202 :class:`~ignite.engine.Events` or any `event_name` added by
203 :meth:`~ignite.engine.Engine.register_events`.
204
205 """
206
207 def __init__(self, description, metric_names=None, output_transform=None,
208 closing_event_name=Events.EPOCH_COMPLETED):
209 if metric_names is None and output_transform is None:
210 # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler
211 metric_names = []
212 super(_OutputHandler, self).__init__(description, metric_names, output_transform,
213 another_engine=None, global_step_transform=None)
214 self.closing_event_name = closing_event_name
215
216 @staticmethod
217 def get_max_number_events(event_name, engine):
218 if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):
219 return len(engine.state.dataloader)
220 if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):
221 return engine.state.max_epochs
222 return 1
223
224 def __call__(self, engine, logger, event_name):
225
226 pbar_total = self.get_max_number_events(event_name, engine)
227 if logger.pbar is None:
228 logger._reset(pbar_total=pbar_total)
229
230 desc = self.tag
231 max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)
232 if max_num_of_closing_events > 1:
233 global_step = engine.state.get_event_attrib_value(self.closing_event_name)
234 desc += " [{}/{}]".format(global_step, max_num_of_closing_events)
235 logger.pbar.set_description(desc)
236
237 metrics = self._setup_output_metrics(engine)
238
239 rendered_metrics = {}
240 for key, value in metrics.items():
241 if isinstance(value, torch.Tensor):
242 if value.ndimension() == 0:
243 rendered_metrics[key] = value.item()
244 elif value.ndimension() == 1:
245 for i, v in enumerate(value):
246 k = "{}_{}".format(key, i)
247 rendered_metrics[k] = v.item()
248 else:
249 warnings.warn("ProgressBar can not log "
250 "tensor with {} dimensions".format(value.ndimension()))
251 else:
252 rendered_metrics[key] = value
253
254 if rendered_metrics:
255 logger.pbar.set_postfix(**rendered_metrics)
256
257 global_step = engine.state.get_event_attrib_value(event_name)
258 global_step = (global_step - 1) % pbar_total + 1
259 logger.pbar.update(global_step - logger.pbar.n)
260
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py
--- a/ignite/contrib/handlers/tqdm_logger.py
+++ b/ignite/contrib/handlers/tqdm_logger.py
@@ -119,6 +119,7 @@
total=pbar_total,
leave=self.persist,
bar_format=self.bar_format,
+ initial=1,
**self.tqdm_kwargs
)
| {"golden_diff": "diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py\n--- a/ignite/contrib/handlers/tqdm_logger.py\n+++ b/ignite/contrib/handlers/tqdm_logger.py\n@@ -119,6 +119,7 @@\n total=pbar_total,\n leave=self.persist,\n bar_format=self.bar_format,\n+ initial=1,\n **self.tqdm_kwargs\n )\n", "issue": "progress bar output seems to be off\nSoi I am using the Progressbar from the contrib module for indicating progress in the training loop. Currently the batch size is 1 for an exemplary dataset of 16 samples. \r\n\r\nBut for some reason the progressbar seems to skip the first step and I am not sure if this is due to an issue related to my dataloading or simply a displaying problem. \r\n\r\niter counter 0\r\nEpoch [1/500]: [0/8] 0%| iter counter 1\r\nEpoch [1/500]: [2/8] 25%| iter counter 2\r\nEpoch [1/500]: [3/8] 38%| iter counter 3\r\nEpoch [1/500]: [4/8] 50%| iter counter 4\r\nEpoch [1/500]: [5/8] 62%| iter counter 5\r\nEpoch [1/500]: [6/8] 75%| \r\niter counter 6\r\nEpoch [1/500]: [7/8] 88%| \r\niter counter 7\r\nEpoch [1/500]: [8/8] 100%|\r\n\r\nis this a known problem or am I missing sth here? the iter counter is taken from the engine _run_once_on_dataset method\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\nfrom ignite.engine.engine import EventWithFilter\nfrom ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler\n\n\nclass ProgressBar(BaseLogger):\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Args:\n persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)\n bar_format (str, optional): Specify a custom bar string formatting. May impact performance.\n [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].\n Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the\n formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.\n **tqdm_kwargs: kwargs passed to tqdm progress bar.\n By default, progress bar description displays \"Epoch [5/10]\" where 5 is the current epoch and 10 is the\n number of epochs. If tqdm_kwargs defines `desc`, e.g. \"Predictions\", than the description is\n \"Predictions [5/10]\" if number of epochs is more than one otherwise it is simply \"Predictions\".\n\n Examples:\n\n Simple progress bar\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer)\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 [06:17<12:34]\n\n Log output to a file instead of stderr (tqdm's default output)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n log_file = open(\"output.log\", \"w\")\n pbar = ProgressBar(file=log_file)\n pbar.attach(trainer)\n\n Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`\n (such as :class:`~ignite.metrics.RunningAverage`)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Directly attach the engine's output\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer, output_transform=lambda x: {'loss': x})\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n\n Note:\n When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,\n please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.\n Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set\n to an empty string value.\n\n \"\"\"\n\n _events_order = [\n Events.STARTED,\n Events.EPOCH_STARTED,\n Events.ITERATION_STARTED,\n Events.ITERATION_COMPLETED,\n Events.EPOCH_COMPLETED,\n Events.COMPLETED\n ]\n\n def __init__(self, persist=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',\n **tqdm_kwargs):\n\n try:\n from tqdm.autonotebook import tqdm\n except ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed. \"\n \"Please install it with command: \\n pip install tqdm\")\n\n self.pbar_cls = tqdm\n self.pbar = None\n self.persist = persist\n self.bar_format = bar_format\n self.tqdm_kwargs = tqdm_kwargs\n\n def _reset(self, pbar_total):\n self.pbar = self.pbar_cls(\n total=pbar_total,\n leave=self.persist,\n bar_format=self.bar_format,\n **self.tqdm_kwargs\n )\n\n def _close(self, engine):\n if self.pbar:\n self.pbar.close()\n self.pbar = None\n\n @staticmethod\n def _compare_lt(event1, event2):\n if isinstance(event1, EventWithFilter):\n event1 = event1.event\n if isinstance(event2, EventWithFilter):\n event2 = event2.event\n i1 = ProgressBar._events_order.index(event1)\n i2 = ProgressBar._events_order.index(event2)\n return i1 < i2\n\n def log_message(self, message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format.\n\n Args:\n message (str): string you wish to log.\n \"\"\"\n from tqdm import tqdm\n tqdm.write(message, **self.tqdm_kwargs)\n\n def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n \"\"\"\n Attaches the progress bar to an engine object.\n\n Args:\n engine (Engine): engine object.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): a function to select what you want to print from the engine's\n output. This function may return either a dictionary with entries in the format of ``{name: value}``,\n or a single scalar, which will be displayed with the default name `output`.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events`.\n\n Note: accepted output value types are numbers, 0d and 1d torch tensors and strings\n\n \"\"\"\n desc = self.tqdm_kwargs.get(\"desc\", \"Epoch\")\n\n if not isinstance(event_name, (Events, EventWithFilter)):\n raise ValueError(\"Logging event should be only `ignite.engine.Events`\")\n\n if isinstance(closing_event_name, EventWithFilter):\n raise ValueError(\"Closing event should not use any event filter\")\n\n if not self._compare_lt(event_name, closing_event_name):\n raise ValueError(\"Logging event {} should be called before closing event {}\"\n .format(event_name, closing_event_name))\n\n log_handler = _OutputHandler(desc, metric_names, output_transform,\n closing_event_name=closing_event_name)\n # if event_name is EventWithFilter, filter is passed here\n super(ProgressBar, self).attach(engine, log_handler, event_name)\n engine.add_event_handler(closing_event_name, self._close)\n\n\nclass _OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output and/or metrics\n\n Args:\n description (str): progress bar description.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot\n with corresponding keys.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n\n \"\"\"\n\n def __init__(self, description, metric_names=None, output_transform=None,\n closing_event_name=Events.EPOCH_COMPLETED):\n if metric_names is None and output_transform is None:\n # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler\n metric_names = []\n super(_OutputHandler, self).__init__(description, metric_names, output_transform,\n another_engine=None, global_step_transform=None)\n self.closing_event_name = closing_event_name\n\n @staticmethod\n def get_max_number_events(event_name, engine):\n if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):\n return len(engine.state.dataloader)\n if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):\n return engine.state.max_epochs\n return 1\n\n def __call__(self, engine, logger, event_name):\n\n pbar_total = self.get_max_number_events(event_name, engine)\n if logger.pbar is None:\n logger._reset(pbar_total=pbar_total)\n\n desc = self.tag\n max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)\n if max_num_of_closing_events > 1:\n global_step = engine.state.get_event_attrib_value(self.closing_event_name)\n desc += \" [{}/{}]\".format(global_step, max_num_of_closing_events)\n logger.pbar.set_description(desc)\n\n metrics = self._setup_output_metrics(engine)\n\n rendered_metrics = {}\n for key, value in metrics.items():\n if isinstance(value, torch.Tensor):\n if value.ndimension() == 0:\n rendered_metrics[key] = value.item()\n elif value.ndimension() == 1:\n for i, v in enumerate(value):\n k = \"{}_{}\".format(key, i)\n rendered_metrics[k] = v.item()\n else:\n warnings.warn(\"ProgressBar can not log \"\n \"tensor with {} dimensions\".format(value.ndimension()))\n else:\n rendered_metrics[key] = value\n\n if rendered_metrics:\n logger.pbar.set_postfix(**rendered_metrics)\n\n global_step = engine.state.get_event_attrib_value(event_name)\n global_step = (global_step - 1) % pbar_total + 1\n logger.pbar.update(global_step - logger.pbar.n)\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\nfrom ignite.engine.engine import EventWithFilter\nfrom ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler\n\n\nclass ProgressBar(BaseLogger):\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Args:\n persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)\n bar_format (str, optional): Specify a custom bar string formatting. May impact performance.\n [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].\n Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the\n formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.\n **tqdm_kwargs: kwargs passed to tqdm progress bar.\n By default, progress bar description displays \"Epoch [5/10]\" where 5 is the current epoch and 10 is the\n number of epochs. If tqdm_kwargs defines `desc`, e.g. \"Predictions\", than the description is\n \"Predictions [5/10]\" if number of epochs is more than one otherwise it is simply \"Predictions\".\n\n Examples:\n\n Simple progress bar\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer)\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 [06:17<12:34]\n\n Log output to a file instead of stderr (tqdm's default output)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n log_file = open(\"output.log\", \"w\")\n pbar = ProgressBar(file=log_file)\n pbar.attach(trainer)\n\n Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`\n (such as :class:`~ignite.metrics.RunningAverage`)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Directly attach the engine's output\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer, output_transform=lambda x: {'loss': x})\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n\n Note:\n When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,\n please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.\n Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set\n to an empty string value.\n\n \"\"\"\n\n _events_order = [\n Events.STARTED,\n Events.EPOCH_STARTED,\n Events.ITERATION_STARTED,\n Events.ITERATION_COMPLETED,\n Events.EPOCH_COMPLETED,\n Events.COMPLETED\n ]\n\n def __init__(self, persist=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',\n **tqdm_kwargs):\n\n try:\n from tqdm.autonotebook import tqdm\n except ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed. \"\n \"Please install it with command: \\n pip install tqdm\")\n\n self.pbar_cls = tqdm\n self.pbar = None\n self.persist = persist\n self.bar_format = bar_format\n self.tqdm_kwargs = tqdm_kwargs\n\n def _reset(self, pbar_total):\n self.pbar = self.pbar_cls(\n total=pbar_total,\n leave=self.persist,\n bar_format=self.bar_format,\n initial=1,\n **self.tqdm_kwargs\n )\n\n def _close(self, engine):\n if self.pbar:\n self.pbar.close()\n self.pbar = None\n\n @staticmethod\n def _compare_lt(event1, event2):\n if isinstance(event1, EventWithFilter):\n event1 = event1.event\n if isinstance(event2, EventWithFilter):\n event2 = event2.event\n i1 = ProgressBar._events_order.index(event1)\n i2 = ProgressBar._events_order.index(event2)\n return i1 < i2\n\n def log_message(self, message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format.\n\n Args:\n message (str): string you wish to log.\n \"\"\"\n from tqdm import tqdm\n tqdm.write(message, **self.tqdm_kwargs)\n\n def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n \"\"\"\n Attaches the progress bar to an engine object.\n\n Args:\n engine (Engine): engine object.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): a function to select what you want to print from the engine's\n output. This function may return either a dictionary with entries in the format of ``{name: value}``,\n or a single scalar, which will be displayed with the default name `output`.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events`.\n\n Note: accepted output value types are numbers, 0d and 1d torch tensors and strings\n\n \"\"\"\n desc = self.tqdm_kwargs.get(\"desc\", \"Epoch\")\n\n if not isinstance(event_name, (Events, EventWithFilter)):\n raise ValueError(\"Logging event should be only `ignite.engine.Events`\")\n\n if isinstance(closing_event_name, EventWithFilter):\n raise ValueError(\"Closing event should not use any event filter\")\n\n if not self._compare_lt(event_name, closing_event_name):\n raise ValueError(\"Logging event {} should be called before closing event {}\"\n .format(event_name, closing_event_name))\n\n log_handler = _OutputHandler(desc, metric_names, output_transform,\n closing_event_name=closing_event_name)\n # if event_name is EventWithFilter, filter is passed here\n super(ProgressBar, self).attach(engine, log_handler, event_name)\n engine.add_event_handler(closing_event_name, self._close)\n\n\nclass _OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output and/or metrics\n\n Args:\n description (str): progress bar description.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot\n with corresponding keys.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n\n \"\"\"\n\n def __init__(self, description, metric_names=None, output_transform=None,\n closing_event_name=Events.EPOCH_COMPLETED):\n if metric_names is None and output_transform is None:\n # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler\n metric_names = []\n super(_OutputHandler, self).__init__(description, metric_names, output_transform,\n another_engine=None, global_step_transform=None)\n self.closing_event_name = closing_event_name\n\n @staticmethod\n def get_max_number_events(event_name, engine):\n if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):\n return len(engine.state.dataloader)\n if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):\n return engine.state.max_epochs\n return 1\n\n def __call__(self, engine, logger, event_name):\n\n pbar_total = self.get_max_number_events(event_name, engine)\n if logger.pbar is None:\n logger._reset(pbar_total=pbar_total)\n\n desc = self.tag\n max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)\n if max_num_of_closing_events > 1:\n global_step = engine.state.get_event_attrib_value(self.closing_event_name)\n desc += \" [{}/{}]\".format(global_step, max_num_of_closing_events)\n logger.pbar.set_description(desc)\n\n metrics = self._setup_output_metrics(engine)\n\n rendered_metrics = {}\n for key, value in metrics.items():\n if isinstance(value, torch.Tensor):\n if value.ndimension() == 0:\n rendered_metrics[key] = value.item()\n elif value.ndimension() == 1:\n for i, v in enumerate(value):\n k = \"{}_{}\".format(key, i)\n rendered_metrics[k] = v.item()\n else:\n warnings.warn(\"ProgressBar can not log \"\n \"tensor with {} dimensions\".format(value.ndimension()))\n else:\n rendered_metrics[key] = value\n\n if rendered_metrics:\n logger.pbar.set_postfix(**rendered_metrics)\n\n global_step = engine.state.get_event_attrib_value(event_name)\n global_step = (global_step - 1) % pbar_total + 1\n logger.pbar.update(global_step - logger.pbar.n)\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}]} | 3,698 | 108 |
gh_patches_debug_12381 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-3101 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unnecessary css file tries to load while editing a page
While editing a page an unnecessary css file tries to load:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/patterns/tinymce.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from lxml import html
3 from plone.app.layout.navigation.root import getNavigationRootObject
4 from plone.app.theming.utils import theming_policy
5 from plone.registry.interfaces import IRegistry
6 from Products.CMFCore.utils import getToolByName
7 from Products.CMFPlone.interfaces import IFilterSchema
8 from Products.CMFPlone.interfaces import ITinyMCESchema
9 from Products.CMFPlone.utils import get_portal
10 from Products.CMFPlone.utils import safe_unicode
11 from zope.component import getUtility
12
13 import json
14
15
16 class TinyMCESettingsGenerator(object):
17
18 def __init__(self, context, request):
19 self.context = context
20 self.request = request
21 self.settings = getUtility(IRegistry).forInterface(
22 ITinyMCESchema,
23 prefix="plone",
24 check=False
25 )
26 self.filter_settings = getUtility(IRegistry).forInterface(
27 IFilterSchema,
28 prefix="plone",
29 check=False
30 )
31 self.nav_root = getNavigationRootObject(
32 self.context,
33 get_portal(),
34 )
35 self.nav_root_url = self.nav_root.absolute_url()
36
37 def get_theme(self):
38 return theming_policy().get_theme()
39
40 def get_content_css(self, style_css=''):
41 files = [
42 '{0}/++plone++static/plone-compiled.css'.format(self.nav_root_url)
43 ]
44 if style_css:
45 files.extend(style_css.split(','))
46 content_css = self.settings.content_css or []
47 for url in content_css:
48 if url and url.strip():
49 files.append('/'.join([self.nav_root_url, url.strip()]))
50 theme = self.get_theme()
51 tinymce_content_css = getattr(theme, 'tinymce_content_css', None)
52 if tinymce_content_css is not None:
53 for path in theme.tinymce_content_css.split(','):
54 if path.startswith('http://') or path.startswith('https://'):
55 files.append(path)
56 else:
57 files.append(self.nav_root_url + path)
58
59 return ','.join(files)
60
61 def get_style_format(self, txt, _type='format', base=None):
62 parts = txt.strip().split('|')
63 if len(parts) < 2:
64 return
65 if base is None:
66 val = {}
67 else:
68 val = base.copy()
69 val.update({
70 'title': parts[0],
71 _type: parts[1]
72 })
73 if len(parts) > 2:
74 val['icon'] = parts[2]
75 return val
76
77 def get_styles(self, styles, _type='format', base=None):
78 result = []
79 for style in styles:
80 style = self.get_style_format(style, _type, base)
81 if not style:
82 continue
83 result.append(style)
84 return result
85
86 def get_all_style_formats(self):
87 header_styles = self.settings.header_styles or []
88 block_styles = self.settings.block_styles or []
89 inline_styles = self.settings.inline_styles or []
90 alignment_styles = self.settings.alignment_styles or []
91 table_styles = self.settings.table_styles or []
92 style_formats = [{
93 'title': 'Headers',
94 'items': self.get_styles(header_styles)
95 }, {
96 'title': 'Block',
97 'items': self.get_styles(block_styles)
98 }, {
99 'title': 'Inline',
100 'items': self.get_styles(inline_styles)
101 }, {
102 'title': 'Alignment',
103 'items': self.get_styles(alignment_styles)
104 }, {
105 'title': 'Tables',
106 'items': self.get_styles(
107 table_styles, 'classes', {'selector': 'table'})
108 }]
109 return [sf for sf in style_formats if sf['items']]
110
111 def get_tiny_config(self):
112 settings = self.settings
113 importcss_file_filter = '%s/++plone++static/tinymce-styles.css' % (
114 self.nav_root_url
115 )
116
117 theme = self.get_theme()
118 if theme and getattr(theme, 'tinymce_styles_css', None):
119 importcss_file_filter += ',%s/%s' % (
120 self.nav_root_url,
121 theme.tinymce_styles_css.lstrip('/'))
122
123 tiny_config = {
124 'resize': 'both' if settings.resizing else False,
125 'content_css': self.get_content_css(importcss_file_filter),
126 'plugins': [
127 'plonelink',
128 'ploneimage',
129 'importcss'
130 ] + settings.plugins,
131 'external_plugins': {},
132 'toolbar': settings.toolbar,
133 'entity_encoding': settings.entity_encoding,
134 'importcss_append': True,
135 'importcss_file_filter': importcss_file_filter,
136 'browser_spellcheck': True
137 }
138 toolbar_additions = settings.custom_buttons or []
139
140 if settings.editor_height:
141 tiny_config['height'] = settings.editor_height
142 if settings.autoresize:
143 tiny_config['plugins'].append('autoresize')
144 tiny_config['autoresize_max_height'] = 1000 # hard coded?
145 if settings.editor_width:
146 tiny_config['width'] = settings.editor_width
147
148 # specific plugin options
149 if 'contextmenu' in settings.plugins:
150 tiny_config['contextmenu'] = "plonelink ploneimage inserttable |"\
151 " cell row column deletetable"
152
153 if settings.libraries_spellchecker_choice == 'AtD':
154 mtool = getToolByName(self.context, 'portal_membership')
155 member = mtool.getAuthenticatedMember()
156 member_id = member.getId()
157 if member_id:
158 if 'compat3x' not in tiny_config['plugins']:
159 tiny_config['plugins'].append('compat3x')
160 tiny_config['external_plugins']['AtD'] = (
161 '{0}/++plone++static/tinymce-AtD-plugin/'
162 'editor_plugin.js'.format(self.nav_root_url)
163 )
164 # None when Anonymous User
165 tiny_config['atd_rpc_id'] = 'plone-' + member_id
166 tiny_config['atd_rpc_url'] = self.nav_root_url
167 tiny_config['atd_show_types'] = ','.join(
168 settings.libraries_atd_show_types
169 )
170 tiny_config['atd_ignore_strings'] = ','.join(
171 settings.libraries_atd_ignore_strings
172 )
173 toolbar_additions.append('AtD')
174 elif settings.libraries_spellchecker_choice == 'AtD':
175 tiny_config['browser_spellcheck'] = True
176
177 if toolbar_additions:
178 tiny_config['toolbar'] += ' | {0}'.format(
179 ' '.join(toolbar_additions)
180 )
181
182 for plugin in settings.custom_plugins or []:
183 parts = plugin.split('|')
184 if len(parts) != 2:
185 continue
186 tiny_config['external_plugins'][parts[0]] = parts[1]
187
188 tiny_config['style_formats'] = self.get_all_style_formats()
189 if settings.formats:
190 try:
191 tiny_config['formats'] = json.loads(settings.formats)
192 except ValueError:
193 pass
194
195 if settings.menubar:
196 tiny_config['menubar'] = settings.menubar
197 if settings.menu:
198 try:
199 tiny_config['menu'] = json.loads(settings.menu)
200 except ValueError:
201 pass
202
203 if hasattr(settings, 'templates') and settings.templates:
204 try:
205 tiny_config['templates'] = json.loads(settings.templates)
206 except ValueError:
207 pass
208
209 # add safe_html settings, which are useed in backend for filtering:
210 if not self.filter_settings.disable_filtering:
211 valid_tags = self.filter_settings.valid_tags
212 nasty_tags = self.filter_settings.nasty_tags
213 custom_attributes = self.filter_settings.custom_attributes
214 safe_attributes = [
215 safe_unicode(attr) for attr in html.defs.safe_attrs]
216 valid_attributes = safe_attributes + custom_attributes
217 # valid_elements : 'a[href|target=_blank],strong/b,div[align],br'
218 tiny_valid_elements = []
219 for tag in valid_tags:
220 tag_str = "%s[%s]" % (tag, "|".join(valid_attributes))
221 tiny_valid_elements.append(tag_str)
222 # We want to remove the nasty tag including the content in the
223 # backend, so TinyMCE should allow them here.
224 for tag in nasty_tags:
225 tag_str = "%s[%s]" % (tag, "|".join(valid_attributes))
226 tiny_valid_elements.append(tag_str)
227 tiny_config['valid_elements'] = ",".join(tiny_valid_elements)
228
229 if settings.other_settings:
230 try:
231 tiny_config.update(json.loads(settings.other_settings))
232 except ValueError:
233 pass
234
235 return tiny_config
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/patterns/tinymce.py b/Products/CMFPlone/patterns/tinymce.py
--- a/Products/CMFPlone/patterns/tinymce.py
+++ b/Products/CMFPlone/patterns/tinymce.py
@@ -49,7 +49,7 @@
files.append('/'.join([self.nav_root_url, url.strip()]))
theme = self.get_theme()
tinymce_content_css = getattr(theme, 'tinymce_content_css', None)
- if tinymce_content_css is not None:
+ if tinymce_content_css is not None and tinymce_content_css != '':
for path in theme.tinymce_content_css.split(','):
if path.startswith('http://') or path.startswith('https://'):
files.append(path)
| {"golden_diff": "diff --git a/Products/CMFPlone/patterns/tinymce.py b/Products/CMFPlone/patterns/tinymce.py\n--- a/Products/CMFPlone/patterns/tinymce.py\n+++ b/Products/CMFPlone/patterns/tinymce.py\n@@ -49,7 +49,7 @@\n files.append('/'.join([self.nav_root_url, url.strip()]))\n theme = self.get_theme()\n tinymce_content_css = getattr(theme, 'tinymce_content_css', None)\n- if tinymce_content_css is not None:\n+ if tinymce_content_css is not None and tinymce_content_css != '':\n for path in theme.tinymce_content_css.split(','):\n if path.startswith('http://') or path.startswith('https://'):\n files.append(path)\n", "issue": "Unnecessary css file tries to load while editing a page\nWhile editing a page an unnecessary css file tries to load:\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom lxml import html\nfrom plone.app.layout.navigation.root import getNavigationRootObject\nfrom plone.app.theming.utils import theming_policy\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.interfaces import IFilterSchema\nfrom Products.CMFPlone.interfaces import ITinyMCESchema\nfrom Products.CMFPlone.utils import get_portal\nfrom Products.CMFPlone.utils import safe_unicode\nfrom zope.component import getUtility\n\nimport json\n\n\nclass TinyMCESettingsGenerator(object):\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n self.settings = getUtility(IRegistry).forInterface(\n ITinyMCESchema,\n prefix=\"plone\",\n check=False\n )\n self.filter_settings = getUtility(IRegistry).forInterface(\n IFilterSchema,\n prefix=\"plone\",\n check=False\n )\n self.nav_root = getNavigationRootObject(\n self.context,\n get_portal(),\n )\n self.nav_root_url = self.nav_root.absolute_url()\n\n def get_theme(self):\n return theming_policy().get_theme()\n\n def get_content_css(self, style_css=''):\n files = [\n '{0}/++plone++static/plone-compiled.css'.format(self.nav_root_url)\n ]\n if style_css:\n files.extend(style_css.split(','))\n content_css = self.settings.content_css or []\n for url in content_css:\n if url and url.strip():\n files.append('/'.join([self.nav_root_url, url.strip()]))\n theme = self.get_theme()\n tinymce_content_css = getattr(theme, 'tinymce_content_css', None)\n if tinymce_content_css is not None:\n for path in theme.tinymce_content_css.split(','):\n if path.startswith('http://') or path.startswith('https://'):\n files.append(path)\n else:\n files.append(self.nav_root_url + path)\n\n return ','.join(files)\n\n def get_style_format(self, txt, _type='format', base=None):\n parts = txt.strip().split('|')\n if len(parts) < 2:\n return\n if base is None:\n val = {}\n else:\n val = base.copy()\n val.update({\n 'title': parts[0],\n _type: parts[1]\n })\n if len(parts) > 2:\n val['icon'] = parts[2]\n return val\n\n def get_styles(self, styles, _type='format', base=None):\n result = []\n for style in styles:\n style = self.get_style_format(style, _type, base)\n if not style:\n continue\n result.append(style)\n return result\n\n def get_all_style_formats(self):\n header_styles = self.settings.header_styles or []\n block_styles = self.settings.block_styles or []\n inline_styles = self.settings.inline_styles or []\n alignment_styles = self.settings.alignment_styles or []\n table_styles = self.settings.table_styles or []\n style_formats = [{\n 'title': 'Headers',\n 'items': self.get_styles(header_styles)\n }, {\n 'title': 'Block',\n 'items': self.get_styles(block_styles)\n }, {\n 'title': 'Inline',\n 'items': self.get_styles(inline_styles)\n }, {\n 'title': 'Alignment',\n 'items': self.get_styles(alignment_styles)\n }, {\n 'title': 'Tables',\n 'items': self.get_styles(\n table_styles, 'classes', {'selector': 'table'})\n }]\n return [sf for sf in style_formats if sf['items']]\n\n def get_tiny_config(self):\n settings = self.settings\n importcss_file_filter = '%s/++plone++static/tinymce-styles.css' % (\n self.nav_root_url\n )\n\n theme = self.get_theme()\n if theme and getattr(theme, 'tinymce_styles_css', None):\n importcss_file_filter += ',%s/%s' % (\n self.nav_root_url,\n theme.tinymce_styles_css.lstrip('/'))\n\n tiny_config = {\n 'resize': 'both' if settings.resizing else False,\n 'content_css': self.get_content_css(importcss_file_filter),\n 'plugins': [\n 'plonelink',\n 'ploneimage',\n 'importcss'\n ] + settings.plugins,\n 'external_plugins': {},\n 'toolbar': settings.toolbar,\n 'entity_encoding': settings.entity_encoding,\n 'importcss_append': True,\n 'importcss_file_filter': importcss_file_filter,\n 'browser_spellcheck': True\n }\n toolbar_additions = settings.custom_buttons or []\n\n if settings.editor_height:\n tiny_config['height'] = settings.editor_height\n if settings.autoresize:\n tiny_config['plugins'].append('autoresize')\n tiny_config['autoresize_max_height'] = 1000 # hard coded?\n if settings.editor_width:\n tiny_config['width'] = settings.editor_width\n\n # specific plugin options\n if 'contextmenu' in settings.plugins:\n tiny_config['contextmenu'] = \"plonelink ploneimage inserttable |\"\\\n \" cell row column deletetable\"\n\n if settings.libraries_spellchecker_choice == 'AtD':\n mtool = getToolByName(self.context, 'portal_membership')\n member = mtool.getAuthenticatedMember()\n member_id = member.getId()\n if member_id:\n if 'compat3x' not in tiny_config['plugins']:\n tiny_config['plugins'].append('compat3x')\n tiny_config['external_plugins']['AtD'] = (\n '{0}/++plone++static/tinymce-AtD-plugin/'\n 'editor_plugin.js'.format(self.nav_root_url)\n )\n # None when Anonymous User\n tiny_config['atd_rpc_id'] = 'plone-' + member_id\n tiny_config['atd_rpc_url'] = self.nav_root_url\n tiny_config['atd_show_types'] = ','.join(\n settings.libraries_atd_show_types\n )\n tiny_config['atd_ignore_strings'] = ','.join(\n settings.libraries_atd_ignore_strings\n )\n toolbar_additions.append('AtD')\n elif settings.libraries_spellchecker_choice == 'AtD':\n tiny_config['browser_spellcheck'] = True\n\n if toolbar_additions:\n tiny_config['toolbar'] += ' | {0}'.format(\n ' '.join(toolbar_additions)\n )\n\n for plugin in settings.custom_plugins or []:\n parts = plugin.split('|')\n if len(parts) != 2:\n continue\n tiny_config['external_plugins'][parts[0]] = parts[1]\n\n tiny_config['style_formats'] = self.get_all_style_formats()\n if settings.formats:\n try:\n tiny_config['formats'] = json.loads(settings.formats)\n except ValueError:\n pass\n\n if settings.menubar:\n tiny_config['menubar'] = settings.menubar\n if settings.menu:\n try:\n tiny_config['menu'] = json.loads(settings.menu)\n except ValueError:\n pass\n\n if hasattr(settings, 'templates') and settings.templates:\n try:\n tiny_config['templates'] = json.loads(settings.templates)\n except ValueError:\n pass\n\n # add safe_html settings, which are useed in backend for filtering:\n if not self.filter_settings.disable_filtering:\n valid_tags = self.filter_settings.valid_tags\n nasty_tags = self.filter_settings.nasty_tags\n custom_attributes = self.filter_settings.custom_attributes\n safe_attributes = [\n safe_unicode(attr) for attr in html.defs.safe_attrs]\n valid_attributes = safe_attributes + custom_attributes\n # valid_elements : 'a[href|target=_blank],strong/b,div[align],br'\n tiny_valid_elements = []\n for tag in valid_tags:\n tag_str = \"%s[%s]\" % (tag, \"|\".join(valid_attributes))\n tiny_valid_elements.append(tag_str)\n # We want to remove the nasty tag including the content in the\n # backend, so TinyMCE should allow them here.\n for tag in nasty_tags:\n tag_str = \"%s[%s]\" % (tag, \"|\".join(valid_attributes))\n tiny_valid_elements.append(tag_str)\n tiny_config['valid_elements'] = \",\".join(tiny_valid_elements)\n\n if settings.other_settings:\n try:\n tiny_config.update(json.loads(settings.other_settings))\n except ValueError:\n pass\n\n return tiny_config\n", "path": "Products/CMFPlone/patterns/tinymce.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom lxml import html\nfrom plone.app.layout.navigation.root import getNavigationRootObject\nfrom plone.app.theming.utils import theming_policy\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.interfaces import IFilterSchema\nfrom Products.CMFPlone.interfaces import ITinyMCESchema\nfrom Products.CMFPlone.utils import get_portal\nfrom Products.CMFPlone.utils import safe_unicode\nfrom zope.component import getUtility\n\nimport json\n\n\nclass TinyMCESettingsGenerator(object):\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n self.settings = getUtility(IRegistry).forInterface(\n ITinyMCESchema,\n prefix=\"plone\",\n check=False\n )\n self.filter_settings = getUtility(IRegistry).forInterface(\n IFilterSchema,\n prefix=\"plone\",\n check=False\n )\n self.nav_root = getNavigationRootObject(\n self.context,\n get_portal(),\n )\n self.nav_root_url = self.nav_root.absolute_url()\n\n def get_theme(self):\n return theming_policy().get_theme()\n\n def get_content_css(self, style_css=''):\n files = [\n '{0}/++plone++static/plone-compiled.css'.format(self.nav_root_url)\n ]\n if style_css:\n files.extend(style_css.split(','))\n content_css = self.settings.content_css or []\n for url in content_css:\n if url and url.strip():\n files.append('/'.join([self.nav_root_url, url.strip()]))\n theme = self.get_theme()\n tinymce_content_css = getattr(theme, 'tinymce_content_css', None)\n if tinymce_content_css is not None and tinymce_content_css != '':\n for path in theme.tinymce_content_css.split(','):\n if path.startswith('http://') or path.startswith('https://'):\n files.append(path)\n else:\n files.append(self.nav_root_url + path)\n\n return ','.join(files)\n\n def get_style_format(self, txt, _type='format', base=None):\n parts = txt.strip().split('|')\n if len(parts) < 2:\n return\n if base is None:\n val = {}\n else:\n val = base.copy()\n val.update({\n 'title': parts[0],\n _type: parts[1]\n })\n if len(parts) > 2:\n val['icon'] = parts[2]\n return val\n\n def get_styles(self, styles, _type='format', base=None):\n result = []\n for style in styles:\n style = self.get_style_format(style, _type, base)\n if not style:\n continue\n result.append(style)\n return result\n\n def get_all_style_formats(self):\n header_styles = self.settings.header_styles or []\n block_styles = self.settings.block_styles or []\n inline_styles = self.settings.inline_styles or []\n alignment_styles = self.settings.alignment_styles or []\n table_styles = self.settings.table_styles or []\n style_formats = [{\n 'title': 'Headers',\n 'items': self.get_styles(header_styles)\n }, {\n 'title': 'Block',\n 'items': self.get_styles(block_styles)\n }, {\n 'title': 'Inline',\n 'items': self.get_styles(inline_styles)\n }, {\n 'title': 'Alignment',\n 'items': self.get_styles(alignment_styles)\n }, {\n 'title': 'Tables',\n 'items': self.get_styles(\n table_styles, 'classes', {'selector': 'table'})\n }]\n return [sf for sf in style_formats if sf['items']]\n\n def get_tiny_config(self):\n settings = self.settings\n importcss_file_filter = '%s/++plone++static/tinymce-styles.css' % (\n self.nav_root_url\n )\n\n theme = self.get_theme()\n if theme and getattr(theme, 'tinymce_styles_css', None):\n importcss_file_filter += ',%s/%s' % (\n self.nav_root_url,\n theme.tinymce_styles_css.lstrip('/'))\n\n tiny_config = {\n 'resize': 'both' if settings.resizing else False,\n 'content_css': self.get_content_css(importcss_file_filter),\n 'plugins': [\n 'plonelink',\n 'ploneimage',\n 'importcss'\n ] + settings.plugins,\n 'external_plugins': {},\n 'toolbar': settings.toolbar,\n 'entity_encoding': settings.entity_encoding,\n 'importcss_append': True,\n 'importcss_file_filter': importcss_file_filter,\n 'browser_spellcheck': True\n }\n toolbar_additions = settings.custom_buttons or []\n\n if settings.editor_height:\n tiny_config['height'] = settings.editor_height\n if settings.autoresize:\n tiny_config['plugins'].append('autoresize')\n tiny_config['autoresize_max_height'] = 1000 # hard coded?\n if settings.editor_width:\n tiny_config['width'] = settings.editor_width\n\n # specific plugin options\n if 'contextmenu' in settings.plugins:\n tiny_config['contextmenu'] = \"plonelink ploneimage inserttable |\"\\\n \" cell row column deletetable\"\n\n if settings.libraries_spellchecker_choice == 'AtD':\n mtool = getToolByName(self.context, 'portal_membership')\n member = mtool.getAuthenticatedMember()\n member_id = member.getId()\n if member_id:\n if 'compat3x' not in tiny_config['plugins']:\n tiny_config['plugins'].append('compat3x')\n tiny_config['external_plugins']['AtD'] = (\n '{0}/++plone++static/tinymce-AtD-plugin/'\n 'editor_plugin.js'.format(self.nav_root_url)\n )\n # None when Anonymous User\n tiny_config['atd_rpc_id'] = 'plone-' + member_id\n tiny_config['atd_rpc_url'] = self.nav_root_url\n tiny_config['atd_show_types'] = ','.join(\n settings.libraries_atd_show_types\n )\n tiny_config['atd_ignore_strings'] = ','.join(\n settings.libraries_atd_ignore_strings\n )\n toolbar_additions.append('AtD')\n elif settings.libraries_spellchecker_choice == 'AtD':\n tiny_config['browser_spellcheck'] = True\n\n if toolbar_additions:\n tiny_config['toolbar'] += ' | {0}'.format(\n ' '.join(toolbar_additions)\n )\n\n for plugin in settings.custom_plugins or []:\n parts = plugin.split('|')\n if len(parts) != 2:\n continue\n tiny_config['external_plugins'][parts[0]] = parts[1]\n\n tiny_config['style_formats'] = self.get_all_style_formats()\n if settings.formats:\n try:\n tiny_config['formats'] = json.loads(settings.formats)\n except ValueError:\n pass\n\n if settings.menubar:\n tiny_config['menubar'] = settings.menubar\n if settings.menu:\n try:\n tiny_config['menu'] = json.loads(settings.menu)\n except ValueError:\n pass\n\n if hasattr(settings, 'templates') and settings.templates:\n try:\n tiny_config['templates'] = json.loads(settings.templates)\n except ValueError:\n pass\n\n # add safe_html settings, which are useed in backend for filtering:\n if not self.filter_settings.disable_filtering:\n valid_tags = self.filter_settings.valid_tags\n nasty_tags = self.filter_settings.nasty_tags\n custom_attributes = self.filter_settings.custom_attributes\n safe_attributes = [\n safe_unicode(attr) for attr in html.defs.safe_attrs]\n valid_attributes = safe_attributes + custom_attributes\n # valid_elements : 'a[href|target=_blank],strong/b,div[align],br'\n tiny_valid_elements = []\n for tag in valid_tags:\n tag_str = \"%s[%s]\" % (tag, \"|\".join(valid_attributes))\n tiny_valid_elements.append(tag_str)\n # We want to remove the nasty tag including the content in the\n # backend, so TinyMCE should allow them here.\n for tag in nasty_tags:\n tag_str = \"%s[%s]\" % (tag, \"|\".join(valid_attributes))\n tiny_valid_elements.append(tag_str)\n tiny_config['valid_elements'] = \",\".join(tiny_valid_elements)\n\n if settings.other_settings:\n try:\n tiny_config.update(json.loads(settings.other_settings))\n except ValueError:\n pass\n\n return tiny_config\n", "path": "Products/CMFPlone/patterns/tinymce.py"}]} | 2,792 | 181 |
gh_patches_debug_430 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-1630 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow async magic __call__
# Thesis
`__call__` method should be asyncable.
```python
import asyncio
import os
class RequestHandler(object):
async def __call__(self, reader, writer):
ipc_input = (await reader.readline()).decode('utf8').strip()
writer.write('You sent: "{0}"\n'.format(ipc_input).encode('utf8'))
async def bootstrap():
handler = RequestHandler()
server = await asyncio.start_unix_server(handler, path='/var/tmp/my_sock')
os.chmod('/var/tmp/my_sock', 0o666)
while True:
try:
await asyncio.sleep(120)
except asyncio.CancelledError:
server.close()
break
asyncio.run(bootstrap())
```
## Reasoning
Much like how `__call__` can be a generator there is no reason it can't be async.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/constants.py`
Content:
```
1 """
2 This module contains list of white- and black-listed ``python`` members.
3
4 We add values here when we want to make them public.
5 Or when a value is reused in several places.
6 Then, we automatically have to add it here and document it.
7
8 Other constants that are not used across modules
9 and does not require to be documented can be defined where they are used.
10
11 All values here must be documented with ``#:`` comments.
12 """
13
14 import math
15 import re
16
17 from typing_extensions import Final
18
19 #: List of functions we forbid to use.
20 FUNCTIONS_BLACKLIST: Final = frozenset((
21 # Code generation:
22 'eval',
23 'exec',
24 'compile',
25
26 # Termination:
27 'exit',
28 'quit',
29
30 # Magic:
31 'globals',
32 'locals',
33 'vars',
34 'dir',
35
36 # IO:
37 'print',
38 'pprint',
39 'input',
40 'breakpoint',
41
42 # Attribute access:
43 'hasattr',
44 'delattr',
45
46 # Gratis:
47 'copyright',
48 'help',
49 'credits',
50
51 # Dynamic imports:
52 '__import__',
53
54 # OOP:
55 'staticmethod',
56
57 # Mypy:
58 'reveal_type',
59 ))
60
61 #: List of module metadata we forbid to use.
62 MODULE_METADATA_VARIABLES_BLACKLIST: Final = frozenset((
63 '__author__',
64 '__all__',
65 '__version__',
66 '__about__',
67 ))
68
69 #: List of variable names we forbid to use.
70 VARIABLE_NAMES_BLACKLIST: Final = frozenset((
71 # Meaningless words:
72 'data',
73 'result',
74 'results',
75 'item',
76 'items',
77 'value',
78 'values',
79 'val',
80 'vals',
81 'var',
82 'vars',
83 'variable',
84 'content',
85 'contents',
86 'info',
87 'handle',
88 'handler',
89 'file',
90 'obj',
91 'objects',
92 'objs',
93 'some',
94 'do',
95 'param',
96 'params',
97 'parameters',
98
99 # Confuseables:
100 'no',
101 'true',
102 'false',
103
104 # Names from examples:
105 'foo',
106 'bar',
107 'baz',
108 ))
109
110 #: List of characters sequences that are hard to read.
111 UNREADABLE_CHARACTER_COMBINATIONS: Final = frozenset((
112 '1l',
113 '1I',
114 '0O',
115 'O0',
116 # Not included: 'lI', 'l1', 'Il'
117 # Because these names are quite common in real words.
118 ))
119
120 #: List of special names that are used only as first argument in methods.
121 SPECIAL_ARGUMENT_NAMES_WHITELIST: Final = frozenset((
122 'self',
123 'cls',
124 'mcs',
125 ))
126
127 #: List of all magic methods from the python docs.
128 ALL_MAGIC_METHODS: Final = frozenset((
129 '__new__',
130 '__init__',
131 '__del__',
132
133 '__repr__',
134 '__str__',
135 '__bytes__',
136 '__format__',
137
138 '__lt__',
139 '__le__',
140 '__eq__',
141 '__ne__',
142 '__gt__',
143 '__ge__',
144
145 '__hash__',
146 '__bool__',
147
148 '__getattr__',
149 '__getattribute__',
150 '__setattr__',
151 '__delattr__',
152 '__dir__',
153
154 '__get__',
155 '__set__',
156 '__delete__',
157 '__set_name__',
158
159 '__init_subclass__',
160 '__instancecheck__',
161 '__subclasscheck__',
162 '__class_getitem__',
163
164 '__call__',
165 '__len__',
166 '__length_hint__',
167 '__getitem__',
168 '__setitem__',
169 '__delitem__',
170 '__missing__',
171 '__iter__',
172 '__reversed__',
173 '__contains__',
174
175 '__add__',
176 '__sub__',
177 '__mul__',
178 '__matmul__',
179 '__truediv__',
180 '__floordiv__',
181 '__mod__',
182 '__divmod__',
183 '__pow__',
184 '__lshift__',
185 '__rshift__',
186 '__and__',
187 '__xor__',
188 '__or__',
189 '__radd__',
190 '__rsub__',
191 '__rmul__',
192 '__rmatmul__',
193 '__rtruediv__',
194 '__rfloordiv__',
195 '__rmod__',
196 '__rdivmod__',
197 '__rpow__',
198 '__rlshift__',
199 '__rrshift__',
200 '__rand__',
201 '__rxor__',
202 '__ror__',
203 '__iadd__',
204 '__isub__',
205 '__imul__',
206 '__imatmul__',
207 '__itruediv__',
208 '__ifloordiv__',
209 '__imod__',
210 '__ipow__',
211 '__ilshift__',
212 '__irshift__',
213 '__iand__',
214 '__ixor__',
215 '__ior__',
216 '__neg__',
217 '__pos__',
218 '__abs__',
219 '__invert__',
220 '__complex__',
221 '__int__',
222 '__float__',
223 '__index__',
224 '__round__',
225 '__trunc__',
226 '__floor__',
227 '__ceil__',
228
229 '__enter__',
230 '__exit__',
231
232 '__await__',
233 '__aiter__',
234 '__anext__',
235 '__aenter__',
236 '__aexit__',
237 ))
238
239 #: List of magic methods that are forbidden to use.
240 MAGIC_METHODS_BLACKLIST: Final = frozenset((
241 # Since we don't use `del`:
242 '__del__',
243 '__delitem__',
244 '__delete__',
245
246 # Since we don't use `pickle`:
247 '__reduce__',
248 '__reduce_ex__',
249
250 '__dir__', # since we don't use `dir()`
251 '__delattr__', # since we don't use `delattr()`
252 ))
253
254 #: List of magic methods that are not allowed to be generators.
255 YIELD_MAGIC_METHODS_BLACKLIST: Final = ALL_MAGIC_METHODS.difference({
256 # Allowed to be used with ``yield`` keyword:
257 '__call__', # Fixes Issue:146
258 '__iter__',
259 })
260
261 #: List of magic methods that are not allowed to be async.
262 ASYNC_MAGIC_METHODS_BLACKLIST: Final = ALL_MAGIC_METHODS.difference({
263 # In order of appearance on
264 # https://docs.python.org/3/reference/datamodel.html#basic-customization
265 # Allowed magic methods are:
266 '__anext__',
267 '__aenter__',
268 '__aexit__',
269 })
270
271 #: List of builtin classes that are allowed to subclass.
272 ALLOWED_BUILTIN_CLASSES: Final = frozenset((
273 'type',
274 'object',
275 ))
276
277 #: List of nested functions' names we allow to use.
278 NESTED_FUNCTIONS_WHITELIST: Final = frozenset((
279 'decorator',
280 'factory',
281 'wrapper',
282 ))
283
284 #: List of allowed ``__future__`` imports.
285 FUTURE_IMPORTS_WHITELIST: Final = frozenset((
286 'annotations',
287 'generator_stop',
288 ))
289
290 #: List of blacklisted module names.
291 MODULE_NAMES_BLACKLIST: Final = frozenset((
292 'util',
293 'utils',
294 'utilities',
295 'helpers',
296 ))
297
298 #: List of allowed module magic names.
299 MAGIC_MODULE_NAMES_WHITELIST: Final = frozenset((
300 '__init__',
301 '__main__',
302 ))
303
304 #: List of bad magic module functions.
305 MAGIC_MODULE_NAMES_BLACKLIST: Final = frozenset((
306 '__getattr__',
307 '__dir__',
308 ))
309
310 #: Regex pattern to name modules.
311 MODULE_NAME_PATTERN: Final = re.compile(r'^_?_?[a-z][a-z\d_]*[a-z\d](__)?$')
312
313 #: Common numbers that are allowed to be used without being called "magic".
314 MAGIC_NUMBERS_WHITELIST: Final = frozenset((
315 0, # both int and float
316 0.1,
317 0.5,
318 1.0,
319 100,
320 1000,
321 1024, # bytes
322 24, # hours
323 60, # seconds, minutes
324
325 1j, # imaginary part of a complex number
326 ))
327
328 #: Maximum amount of ``pragma`` no-cover comments per module.
329 MAX_NO_COVER_COMMENTS: Final = 5
330
331 #: Maximum length of ``yield`` ``tuple`` expressions.
332 MAX_LEN_YIELD_TUPLE: Final = 5
333
334 #: Maximum number of compare nodes in a single expression.
335 MAX_COMPARES: Final = 2
336
337 #: Maximum number of conditions in a single ``if`` or ``while`` statement.
338 MAX_CONDITIONS: Final = 4
339
340 #: Maximum number of `elif` blocks in a single `if` condition:
341 MAX_ELIFS: Final = 3
342
343 #: Maximum number of ``except`` cases in a single ``try`` clause.
344 MAX_EXCEPT_CASES: Final = 3
345
346 #: Approximate constants which real values should be imported from math module.
347 MATH_APPROXIMATE_CONSTANTS: Final = frozenset((
348 math.pi,
349 math.e,
350 math.tau,
351 ))
352
353 #: List of vague method names that may cause confusion if imported as is:
354 VAGUE_IMPORTS_BLACKLIST: Final = frozenset((
355 'read',
356 'write',
357 'load',
358 'loads',
359 'dump',
360 'dumps',
361 'parse',
362 'safe_load',
363 'safe_dump',
364 'load_all',
365 'dump_all',
366 'safe_load_all',
367 'safe_dump_all',
368 ))
369
370 #: List of literals without arguments we forbid to use.
371 LITERALS_BLACKLIST: Final = frozenset((
372 'int',
373 'float',
374 'str',
375 'bytes',
376 'bool',
377 'complex',
378 ))
379
380 #: List of functions in which arguments must be tuples.
381 TUPLE_ARGUMENTS_METHODS: Final = frozenset((
382 'frozenset',
383 ))
384
385 #: Conditions that can appear in the ``if`` statement to allow nested imports.
386 ALLOWED_NESTED_IMPORTS_CONDITIONS: Final = frozenset((
387 'TYPE_CHECKING',
388 ))
389
390 #: List of commonly used aliases
391 ALIAS_NAMES_WHITELIST: Final = frozenset((
392 'np',
393 'pd',
394 'df',
395 'plt',
396 'sns',
397 'tf',
398 'cv',
399 ))
400
401 # Internal variables
402 # ==================
403
404 # Please, do not touch values beyond this line!
405 # ---------------------------------------------
406
407 # They are not publicly documented since they are not used by the end user.
408 # But, we still need them to be defined here.
409
410 # Used as a default filename, when it is not passed by flake8:
411 STDIN: Final = 'stdin'
412
413 # Used to specify as a placeholder for `__init__`:
414 INIT: Final = '__init__'
415
416 # Used to determine when we are running on Windows:
417 WINDOWS_OS: Final = 'nt'
418
419 # Used as a placeholder for special `_` variable:
420 UNUSED_PLACEHOLDER: Final = '_'
421
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wemake_python_styleguide/constants.py b/wemake_python_styleguide/constants.py
--- a/wemake_python_styleguide/constants.py
+++ b/wemake_python_styleguide/constants.py
@@ -266,6 +266,7 @@
'__anext__',
'__aenter__',
'__aexit__',
+ '__call__',
})
#: List of builtin classes that are allowed to subclass.
| {"golden_diff": "diff --git a/wemake_python_styleguide/constants.py b/wemake_python_styleguide/constants.py\n--- a/wemake_python_styleguide/constants.py\n+++ b/wemake_python_styleguide/constants.py\n@@ -266,6 +266,7 @@\n '__anext__',\n '__aenter__',\n '__aexit__',\n+ '__call__',\n })\n \n #: List of builtin classes that are allowed to subclass.\n", "issue": "Allow async magic __call__\n# Thesis\r\n\r\n`__call__` method should be asyncable.\r\n\r\n```python\r\nimport asyncio\r\nimport os\r\n\r\nclass RequestHandler(object):\r\n async def __call__(self, reader, writer):\r\n ipc_input = (await reader.readline()).decode('utf8').strip()\r\n writer.write('You sent: \"{0}\"\\n'.format(ipc_input).encode('utf8'))\r\n\r\nasync def bootstrap():\r\n handler = RequestHandler()\r\n server = await asyncio.start_unix_server(handler, path='/var/tmp/my_sock')\r\n os.chmod('/var/tmp/my_sock', 0o666)\r\n while True:\r\n try:\r\n await asyncio.sleep(120)\r\n except asyncio.CancelledError:\r\n server.close()\r\n break\r\n\r\nasyncio.run(bootstrap())\r\n```\r\n\r\n## Reasoning\r\n\r\nMuch like how `__call__` can be a generator there is no reason it can't be async.\r\n\n", "before_files": [{"content": "\"\"\"\nThis module contains list of white- and black-listed ``python`` members.\n\nWe add values here when we want to make them public.\nOr when a value is reused in several places.\nThen, we automatically have to add it here and document it.\n\nOther constants that are not used across modules\nand does not require to be documented can be defined where they are used.\n\nAll values here must be documented with ``#:`` comments.\n\"\"\"\n\nimport math\nimport re\n\nfrom typing_extensions import Final\n\n#: List of functions we forbid to use.\nFUNCTIONS_BLACKLIST: Final = frozenset((\n # Code generation:\n 'eval',\n 'exec',\n 'compile',\n\n # Termination:\n 'exit',\n 'quit',\n\n # Magic:\n 'globals',\n 'locals',\n 'vars',\n 'dir',\n\n # IO:\n 'print',\n 'pprint',\n 'input',\n 'breakpoint',\n\n # Attribute access:\n 'hasattr',\n 'delattr',\n\n # Gratis:\n 'copyright',\n 'help',\n 'credits',\n\n # Dynamic imports:\n '__import__',\n\n # OOP:\n 'staticmethod',\n\n # Mypy:\n 'reveal_type',\n))\n\n#: List of module metadata we forbid to use.\nMODULE_METADATA_VARIABLES_BLACKLIST: Final = frozenset((\n '__author__',\n '__all__',\n '__version__',\n '__about__',\n))\n\n#: List of variable names we forbid to use.\nVARIABLE_NAMES_BLACKLIST: Final = frozenset((\n # Meaningless words:\n 'data',\n 'result',\n 'results',\n 'item',\n 'items',\n 'value',\n 'values',\n 'val',\n 'vals',\n 'var',\n 'vars',\n 'variable',\n 'content',\n 'contents',\n 'info',\n 'handle',\n 'handler',\n 'file',\n 'obj',\n 'objects',\n 'objs',\n 'some',\n 'do',\n 'param',\n 'params',\n 'parameters',\n\n # Confuseables:\n 'no',\n 'true',\n 'false',\n\n # Names from examples:\n 'foo',\n 'bar',\n 'baz',\n))\n\n#: List of characters sequences that are hard to read.\nUNREADABLE_CHARACTER_COMBINATIONS: Final = frozenset((\n '1l',\n '1I',\n '0O',\n 'O0',\n # Not included: 'lI', 'l1', 'Il'\n # Because these names are quite common in real words.\n))\n\n#: List of special names that are used only as first argument in methods.\nSPECIAL_ARGUMENT_NAMES_WHITELIST: Final = frozenset((\n 'self',\n 'cls',\n 'mcs',\n))\n\n#: List of all magic methods from the python docs.\nALL_MAGIC_METHODS: Final = frozenset((\n '__new__',\n '__init__',\n '__del__',\n\n '__repr__',\n '__str__',\n '__bytes__',\n '__format__',\n\n '__lt__',\n '__le__',\n '__eq__',\n '__ne__',\n '__gt__',\n '__ge__',\n\n '__hash__',\n '__bool__',\n\n '__getattr__',\n '__getattribute__',\n '__setattr__',\n '__delattr__',\n '__dir__',\n\n '__get__',\n '__set__',\n '__delete__',\n '__set_name__',\n\n '__init_subclass__',\n '__instancecheck__',\n '__subclasscheck__',\n '__class_getitem__',\n\n '__call__',\n '__len__',\n '__length_hint__',\n '__getitem__',\n '__setitem__',\n '__delitem__',\n '__missing__',\n '__iter__',\n '__reversed__',\n '__contains__',\n\n '__add__',\n '__sub__',\n '__mul__',\n '__matmul__',\n '__truediv__',\n '__floordiv__',\n '__mod__',\n '__divmod__',\n '__pow__',\n '__lshift__',\n '__rshift__',\n '__and__',\n '__xor__',\n '__or__',\n '__radd__',\n '__rsub__',\n '__rmul__',\n '__rmatmul__',\n '__rtruediv__',\n '__rfloordiv__',\n '__rmod__',\n '__rdivmod__',\n '__rpow__',\n '__rlshift__',\n '__rrshift__',\n '__rand__',\n '__rxor__',\n '__ror__',\n '__iadd__',\n '__isub__',\n '__imul__',\n '__imatmul__',\n '__itruediv__',\n '__ifloordiv__',\n '__imod__',\n '__ipow__',\n '__ilshift__',\n '__irshift__',\n '__iand__',\n '__ixor__',\n '__ior__',\n '__neg__',\n '__pos__',\n '__abs__',\n '__invert__',\n '__complex__',\n '__int__',\n '__float__',\n '__index__',\n '__round__',\n '__trunc__',\n '__floor__',\n '__ceil__',\n\n '__enter__',\n '__exit__',\n\n '__await__',\n '__aiter__',\n '__anext__',\n '__aenter__',\n '__aexit__',\n))\n\n#: List of magic methods that are forbidden to use.\nMAGIC_METHODS_BLACKLIST: Final = frozenset((\n # Since we don't use `del`:\n '__del__',\n '__delitem__',\n '__delete__',\n\n # Since we don't use `pickle`:\n '__reduce__',\n '__reduce_ex__',\n\n '__dir__', # since we don't use `dir()`\n '__delattr__', # since we don't use `delattr()`\n))\n\n#: List of magic methods that are not allowed to be generators.\nYIELD_MAGIC_METHODS_BLACKLIST: Final = ALL_MAGIC_METHODS.difference({\n # Allowed to be used with ``yield`` keyword:\n '__call__', # Fixes Issue:146\n '__iter__',\n})\n\n#: List of magic methods that are not allowed to be async.\nASYNC_MAGIC_METHODS_BLACKLIST: Final = ALL_MAGIC_METHODS.difference({\n # In order of appearance on\n # https://docs.python.org/3/reference/datamodel.html#basic-customization\n # Allowed magic methods are:\n '__anext__',\n '__aenter__',\n '__aexit__',\n})\n\n#: List of builtin classes that are allowed to subclass.\nALLOWED_BUILTIN_CLASSES: Final = frozenset((\n 'type',\n 'object',\n))\n\n#: List of nested functions' names we allow to use.\nNESTED_FUNCTIONS_WHITELIST: Final = frozenset((\n 'decorator',\n 'factory',\n 'wrapper',\n))\n\n#: List of allowed ``__future__`` imports.\nFUTURE_IMPORTS_WHITELIST: Final = frozenset((\n 'annotations',\n 'generator_stop',\n))\n\n#: List of blacklisted module names.\nMODULE_NAMES_BLACKLIST: Final = frozenset((\n 'util',\n 'utils',\n 'utilities',\n 'helpers',\n))\n\n#: List of allowed module magic names.\nMAGIC_MODULE_NAMES_WHITELIST: Final = frozenset((\n '__init__',\n '__main__',\n))\n\n#: List of bad magic module functions.\nMAGIC_MODULE_NAMES_BLACKLIST: Final = frozenset((\n '__getattr__',\n '__dir__',\n))\n\n#: Regex pattern to name modules.\nMODULE_NAME_PATTERN: Final = re.compile(r'^_?_?[a-z][a-z\\d_]*[a-z\\d](__)?$')\n\n#: Common numbers that are allowed to be used without being called \"magic\".\nMAGIC_NUMBERS_WHITELIST: Final = frozenset((\n 0, # both int and float\n 0.1,\n 0.5,\n 1.0,\n 100,\n 1000,\n 1024, # bytes\n 24, # hours\n 60, # seconds, minutes\n\n 1j, # imaginary part of a complex number\n))\n\n#: Maximum amount of ``pragma`` no-cover comments per module.\nMAX_NO_COVER_COMMENTS: Final = 5\n\n#: Maximum length of ``yield`` ``tuple`` expressions.\nMAX_LEN_YIELD_TUPLE: Final = 5\n\n#: Maximum number of compare nodes in a single expression.\nMAX_COMPARES: Final = 2\n\n#: Maximum number of conditions in a single ``if`` or ``while`` statement.\nMAX_CONDITIONS: Final = 4\n\n#: Maximum number of `elif` blocks in a single `if` condition:\nMAX_ELIFS: Final = 3\n\n#: Maximum number of ``except`` cases in a single ``try`` clause.\nMAX_EXCEPT_CASES: Final = 3\n\n#: Approximate constants which real values should be imported from math module.\nMATH_APPROXIMATE_CONSTANTS: Final = frozenset((\n math.pi,\n math.e,\n math.tau,\n))\n\n#: List of vague method names that may cause confusion if imported as is:\nVAGUE_IMPORTS_BLACKLIST: Final = frozenset((\n 'read',\n 'write',\n 'load',\n 'loads',\n 'dump',\n 'dumps',\n 'parse',\n 'safe_load',\n 'safe_dump',\n 'load_all',\n 'dump_all',\n 'safe_load_all',\n 'safe_dump_all',\n))\n\n#: List of literals without arguments we forbid to use.\nLITERALS_BLACKLIST: Final = frozenset((\n 'int',\n 'float',\n 'str',\n 'bytes',\n 'bool',\n 'complex',\n))\n\n#: List of functions in which arguments must be tuples.\nTUPLE_ARGUMENTS_METHODS: Final = frozenset((\n 'frozenset',\n))\n\n#: Conditions that can appear in the ``if`` statement to allow nested imports.\nALLOWED_NESTED_IMPORTS_CONDITIONS: Final = frozenset((\n 'TYPE_CHECKING',\n))\n\n#: List of commonly used aliases\nALIAS_NAMES_WHITELIST: Final = frozenset((\n 'np',\n 'pd',\n 'df',\n 'plt',\n 'sns',\n 'tf',\n 'cv',\n))\n\n# Internal variables\n# ==================\n\n# Please, do not touch values beyond this line!\n# ---------------------------------------------\n\n# They are not publicly documented since they are not used by the end user.\n# But, we still need them to be defined here.\n\n# Used as a default filename, when it is not passed by flake8:\nSTDIN: Final = 'stdin'\n\n# Used to specify as a placeholder for `__init__`:\nINIT: Final = '__init__'\n\n# Used to determine when we are running on Windows:\nWINDOWS_OS: Final = 'nt'\n\n# Used as a placeholder for special `_` variable:\nUNUSED_PLACEHOLDER: Final = '_'\n", "path": "wemake_python_styleguide/constants.py"}], "after_files": [{"content": "\"\"\"\nThis module contains list of white- and black-listed ``python`` members.\n\nWe add values here when we want to make them public.\nOr when a value is reused in several places.\nThen, we automatically have to add it here and document it.\n\nOther constants that are not used across modules\nand does not require to be documented can be defined where they are used.\n\nAll values here must be documented with ``#:`` comments.\n\"\"\"\n\nimport math\nimport re\n\nfrom typing_extensions import Final\n\n#: List of functions we forbid to use.\nFUNCTIONS_BLACKLIST: Final = frozenset((\n # Code generation:\n 'eval',\n 'exec',\n 'compile',\n\n # Termination:\n 'exit',\n 'quit',\n\n # Magic:\n 'globals',\n 'locals',\n 'vars',\n 'dir',\n\n # IO:\n 'print',\n 'pprint',\n 'input',\n 'breakpoint',\n\n # Attribute access:\n 'hasattr',\n 'delattr',\n\n # Gratis:\n 'copyright',\n 'help',\n 'credits',\n\n # Dynamic imports:\n '__import__',\n\n # OOP:\n 'staticmethod',\n\n # Mypy:\n 'reveal_type',\n))\n\n#: List of module metadata we forbid to use.\nMODULE_METADATA_VARIABLES_BLACKLIST: Final = frozenset((\n '__author__',\n '__all__',\n '__version__',\n '__about__',\n))\n\n#: List of variable names we forbid to use.\nVARIABLE_NAMES_BLACKLIST: Final = frozenset((\n # Meaningless words:\n 'data',\n 'result',\n 'results',\n 'item',\n 'items',\n 'value',\n 'values',\n 'val',\n 'vals',\n 'var',\n 'vars',\n 'variable',\n 'content',\n 'contents',\n 'info',\n 'handle',\n 'handler',\n 'file',\n 'obj',\n 'objects',\n 'objs',\n 'some',\n 'do',\n 'param',\n 'params',\n 'parameters',\n\n # Confuseables:\n 'no',\n 'true',\n 'false',\n\n # Names from examples:\n 'foo',\n 'bar',\n 'baz',\n))\n\n#: List of characters sequences that are hard to read.\nUNREADABLE_CHARACTER_COMBINATIONS: Final = frozenset((\n '1l',\n '1I',\n '0O',\n 'O0',\n # Not included: 'lI', 'l1', 'Il'\n # Because these names are quite common in real words.\n))\n\n#: List of special names that are used only as first argument in methods.\nSPECIAL_ARGUMENT_NAMES_WHITELIST: Final = frozenset((\n 'self',\n 'cls',\n 'mcs',\n))\n\n#: List of all magic methods from the python docs.\nALL_MAGIC_METHODS: Final = frozenset((\n '__new__',\n '__init__',\n '__del__',\n\n '__repr__',\n '__str__',\n '__bytes__',\n '__format__',\n\n '__lt__',\n '__le__',\n '__eq__',\n '__ne__',\n '__gt__',\n '__ge__',\n\n '__hash__',\n '__bool__',\n\n '__getattr__',\n '__getattribute__',\n '__setattr__',\n '__delattr__',\n '__dir__',\n\n '__get__',\n '__set__',\n '__delete__',\n '__set_name__',\n\n '__init_subclass__',\n '__instancecheck__',\n '__subclasscheck__',\n '__class_getitem__',\n\n '__call__',\n '__len__',\n '__length_hint__',\n '__getitem__',\n '__setitem__',\n '__delitem__',\n '__missing__',\n '__iter__',\n '__reversed__',\n '__contains__',\n\n '__add__',\n '__sub__',\n '__mul__',\n '__matmul__',\n '__truediv__',\n '__floordiv__',\n '__mod__',\n '__divmod__',\n '__pow__',\n '__lshift__',\n '__rshift__',\n '__and__',\n '__xor__',\n '__or__',\n '__radd__',\n '__rsub__',\n '__rmul__',\n '__rmatmul__',\n '__rtruediv__',\n '__rfloordiv__',\n '__rmod__',\n '__rdivmod__',\n '__rpow__',\n '__rlshift__',\n '__rrshift__',\n '__rand__',\n '__rxor__',\n '__ror__',\n '__iadd__',\n '__isub__',\n '__imul__',\n '__imatmul__',\n '__itruediv__',\n '__ifloordiv__',\n '__imod__',\n '__ipow__',\n '__ilshift__',\n '__irshift__',\n '__iand__',\n '__ixor__',\n '__ior__',\n '__neg__',\n '__pos__',\n '__abs__',\n '__invert__',\n '__complex__',\n '__int__',\n '__float__',\n '__index__',\n '__round__',\n '__trunc__',\n '__floor__',\n '__ceil__',\n\n '__enter__',\n '__exit__',\n\n '__await__',\n '__aiter__',\n '__anext__',\n '__aenter__',\n '__aexit__',\n))\n\n#: List of magic methods that are forbidden to use.\nMAGIC_METHODS_BLACKLIST: Final = frozenset((\n # Since we don't use `del`:\n '__del__',\n '__delitem__',\n '__delete__',\n\n # Since we don't use `pickle`:\n '__reduce__',\n '__reduce_ex__',\n\n '__dir__', # since we don't use `dir()`\n '__delattr__', # since we don't use `delattr()`\n))\n\n#: List of magic methods that are not allowed to be generators.\nYIELD_MAGIC_METHODS_BLACKLIST: Final = ALL_MAGIC_METHODS.difference({\n # Allowed to be used with ``yield`` keyword:\n '__call__', # Fixes Issue:146\n '__iter__',\n})\n\n#: List of magic methods that are not allowed to be async.\nASYNC_MAGIC_METHODS_BLACKLIST: Final = ALL_MAGIC_METHODS.difference({\n # In order of appearance on\n # https://docs.python.org/3/reference/datamodel.html#basic-customization\n # Allowed magic methods are:\n '__anext__',\n '__aenter__',\n '__aexit__',\n '__call__',\n})\n\n#: List of builtin classes that are allowed to subclass.\nALLOWED_BUILTIN_CLASSES: Final = frozenset((\n 'type',\n 'object',\n))\n\n#: List of nested functions' names we allow to use.\nNESTED_FUNCTIONS_WHITELIST: Final = frozenset((\n 'decorator',\n 'factory',\n 'wrapper',\n))\n\n#: List of allowed ``__future__`` imports.\nFUTURE_IMPORTS_WHITELIST: Final = frozenset((\n 'annotations',\n 'generator_stop',\n))\n\n#: List of blacklisted module names.\nMODULE_NAMES_BLACKLIST: Final = frozenset((\n 'util',\n 'utils',\n 'utilities',\n 'helpers',\n))\n\n#: List of allowed module magic names.\nMAGIC_MODULE_NAMES_WHITELIST: Final = frozenset((\n '__init__',\n '__main__',\n))\n\n#: List of bad magic module functions.\nMAGIC_MODULE_NAMES_BLACKLIST: Final = frozenset((\n '__getattr__',\n '__dir__',\n))\n\n#: Regex pattern to name modules.\nMODULE_NAME_PATTERN: Final = re.compile(r'^_?_?[a-z][a-z\\d_]*[a-z\\d](__)?$')\n\n#: Common numbers that are allowed to be used without being called \"magic\".\nMAGIC_NUMBERS_WHITELIST: Final = frozenset((\n 0, # both int and float\n 0.1,\n 0.5,\n 1.0,\n 100,\n 1000,\n 1024, # bytes\n 24, # hours\n 60, # seconds, minutes\n\n 1j, # imaginary part of a complex number\n))\n\n#: Maximum amount of ``pragma`` no-cover comments per module.\nMAX_NO_COVER_COMMENTS: Final = 5\n\n#: Maximum length of ``yield`` ``tuple`` expressions.\nMAX_LEN_YIELD_TUPLE: Final = 5\n\n#: Maximum number of compare nodes in a single expression.\nMAX_COMPARES: Final = 2\n\n#: Maximum number of conditions in a single ``if`` or ``while`` statement.\nMAX_CONDITIONS: Final = 4\n\n#: Maximum number of `elif` blocks in a single `if` condition:\nMAX_ELIFS: Final = 3\n\n#: Maximum number of ``except`` cases in a single ``try`` clause.\nMAX_EXCEPT_CASES: Final = 3\n\n#: Approximate constants which real values should be imported from math module.\nMATH_APPROXIMATE_CONSTANTS: Final = frozenset((\n math.pi,\n math.e,\n math.tau,\n))\n\n#: List of vague method names that may cause confusion if imported as is:\nVAGUE_IMPORTS_BLACKLIST: Final = frozenset((\n 'read',\n 'write',\n 'load',\n 'loads',\n 'dump',\n 'dumps',\n 'parse',\n 'safe_load',\n 'safe_dump',\n 'load_all',\n 'dump_all',\n 'safe_load_all',\n 'safe_dump_all',\n))\n\n#: List of literals without arguments we forbid to use.\nLITERALS_BLACKLIST: Final = frozenset((\n 'int',\n 'float',\n 'str',\n 'bytes',\n 'bool',\n 'complex',\n))\n\n#: List of functions in which arguments must be tuples.\nTUPLE_ARGUMENTS_METHODS: Final = frozenset((\n 'frozenset',\n))\n\n#: Conditions that can appear in the ``if`` statement to allow nested imports.\nALLOWED_NESTED_IMPORTS_CONDITIONS: Final = frozenset((\n 'TYPE_CHECKING',\n))\n\n#: List of commonly used aliases\nALIAS_NAMES_WHITELIST: Final = frozenset((\n 'np',\n 'pd',\n 'df',\n 'plt',\n 'sns',\n 'tf',\n 'cv',\n))\n\n# Internal variables\n# ==================\n\n# Please, do not touch values beyond this line!\n# ---------------------------------------------\n\n# They are not publicly documented since they are not used by the end user.\n# But, we still need them to be defined here.\n\n# Used as a default filename, when it is not passed by flake8:\nSTDIN: Final = 'stdin'\n\n# Used to specify as a placeholder for `__init__`:\nINIT: Final = '__init__'\n\n# Used to determine when we are running on Windows:\nWINDOWS_OS: Final = 'nt'\n\n# Used as a placeholder for special `_` variable:\nUNUSED_PLACEHOLDER: Final = '_'\n", "path": "wemake_python_styleguide/constants.py"}]} | 4,027 | 95 |
gh_patches_debug_27570 | rasdani/github-patches | git_diff | rotki__rotki-62 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Etherscan balance query for more than 20 accounts won't work
Etherscan has a 20 account limit per query.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rotkehlchen/ethchain.py`
Content:
```
1 import os
2 from web3 import Web3, HTTPProvider
3 from requests import ConnectionError
4
5 from rotkehlchen.utils import from_wei, rlk_jsonloads, request_get
6 from rotkehlchen.fval import FVal
7
8 import logging
9 logger = logging.getLogger(__name__)
10
11
12 class Ethchain(object):
13 def __init__(self, ethrpc_port, attempt_connect=True):
14 self.web3 = None
15 self.rpc_port = ethrpc_port
16 self.connected = False
17 if attempt_connect:
18 self.attempt_connect(ethrpc_port)
19
20 def attempt_connect(self, ethrpc_port, mainnet_check=True):
21 if self.rpc_port == ethrpc_port and self.connected:
22 # We are already connected
23 return True, 'Already connected to an ethereum node'
24
25 if self.web3:
26 del self.web3
27
28 try:
29 self.web3 = Web3(HTTPProvider('http://localhost:{}'.format(ethrpc_port)))
30 except ConnectionError:
31 logger.warn('Could not connect to a local ethereum node. Will use etherscan only')
32 self.connected = False
33 return False, 'Failed to connect to ethereum node at port {}'.format(ethrpc_port)
34
35 if self.web3.isConnected():
36 dir_path = os.path.dirname(os.path.realpath(__file__))
37 with open(os.path.join(dir_path, 'data', 'token_abi.json'), 'r') as f:
38 self.token_abi = rlk_jsonloads(f.read())
39
40 # Also make sure we are actually connected to the Ethereum mainnet
41 if mainnet_check:
42 genesis_hash = self.web3.eth.getBlock(0)['hash'].hex()
43 target = '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3'
44 if genesis_hash != target:
45 logger.warn(
46 'Connected to a local ethereum node but it is not on the ethereum mainnet'
47 )
48 self.connected = False
49 message = (
50 'Connected to ethereum node at port {} but it is not on '
51 'the ethereum mainnet'.format(ethrpc_port)
52 )
53 return False, message
54
55 self.connected = True
56 return True, ''
57 else:
58 logger.warn('Could not connect to a local ethereum node. Will use etherscan only')
59 self.connected = False
60 message = 'Failed to connect to ethereum node at port {}'.format(ethrpc_port)
61
62 # If we get here we did not connnect
63 return False, message
64
65 def set_rpc_port(self, port):
66 """ Attempts to set the RPC port for the ethereum client.
67
68 Returns a tuple (result, message)
69 - result: Boolean for success or failure of changing the rpc port
70 - message: A message containing information on what happened. Can
71 be populated both in case of success or failure"""
72 result, message = self.attempt_connect(port)
73 if result:
74 self.ethrpc_port = port
75 return result, message
76
77 def get_eth_balance(self, account):
78 if not self.connected:
79 eth_resp = request_get(
80 'https://api.etherscan.io/api?module=account&action=balance&address=%s'
81 % account
82 )
83 if eth_resp['status'] != 1:
84 raise ValueError('Failed to query etherscan for accounts balance')
85 amount = FVal(eth_resp['result'])
86 return from_wei(amount)
87 else:
88 return from_wei(self.web3.eth.getBalance(account))
89
90 def get_multieth_balance(self, accounts):
91 """Returns a dict with keys being accounts and balances in ETH"""
92 balances = {}
93 if not self.connected:
94 # TODO: accounts.length should be less than 20. If more we gotta do
95 # multiple calls
96 eth_resp = request_get(
97 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' %
98 ','.join(accounts)
99 )
100 if eth_resp['status'] != 1:
101 raise ValueError('Failed to query etherscan for accounts balance')
102 eth_accounts = eth_resp['result']
103 for account_entry in eth_accounts:
104 amount = FVal(account_entry['balance'])
105 balances[account_entry['account']] = from_wei(amount)
106
107 else:
108 for account in accounts:
109 amount = FVal(self.web3.eth.getBalance(account))
110 balances[account] = from_wei(amount)
111
112 return balances
113
114 def get_multitoken_balance(self, token_symbol, token_address, token_decimals, accounts):
115 """Return a dictionary with keys being accounts and value balances of token
116 Balance value is normalized through the token decimals.
117 """
118 balances = {}
119 if self.connected:
120 token_contract = self.web3.eth.contract(
121 address=token_address,
122 abi=self.token_abi
123 )
124 for account in accounts:
125 token_amount = FVal(token_contract.functions.balanceOf(account).call())
126 if token_amount != 0:
127 balances[account] = token_amount / (FVal(10) ** FVal(token_decimals))
128 else:
129 for account in accounts:
130 print('Checking token {} for account {}'.format(token_symbol, account))
131 resp = request_get(
132 'https://api.etherscan.io/api?module=account&action='
133 'tokenbalance&contractaddress={}&address={}'.format(
134 token_address,
135 account,
136 ))
137 if resp['status'] != 1:
138 raise ValueError(
139 'Failed to query etherscan for {} token balance of {}'.format(
140 token_symbol,
141 account,
142 ))
143 token_amount = FVal(resp['result'])
144 if token_amount != 0:
145 balances[account] = token_amount / (FVal(10) ** FVal(token_decimals))
146
147 return balances
148
149 def get_token_balance(self, token_symbol, token_address, token_decimals, account):
150 res = self.get_multitoken_balance(token_symbol, token_address, token_decimals, [account])
151 return res.get(account, 0)
152
153 def get_block_by_number(self, num):
154 if not self.connected:
155 return None
156
157 return self.web3.eth.getBlock(num)
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rotkehlchen/ethchain.py b/rotkehlchen/ethchain.py
--- a/rotkehlchen/ethchain.py
+++ b/rotkehlchen/ethchain.py
@@ -90,19 +90,25 @@
def get_multieth_balance(self, accounts):
"""Returns a dict with keys being accounts and balances in ETH"""
balances = {}
+
if not self.connected:
- # TODO: accounts.length should be less than 20. If more we gotta do
- # multiple calls
- eth_resp = request_get(
- 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' %
- ','.join(accounts)
- )
- if eth_resp['status'] != 1:
- raise ValueError('Failed to query etherscan for accounts balance')
- eth_accounts = eth_resp['result']
- for account_entry in eth_accounts:
- amount = FVal(account_entry['balance'])
- balances[account_entry['account']] = from_wei(amount)
+ if len(accounts) > 20:
+ new_accounts = [accounts[x:x+2] for x in range(0, len(accounts), 2)]
+ else:
+ new_accounts = [accounts]
+
+ for account_slice in new_accounts:
+ eth_resp = request_get(
+ 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' %
+ ','.join(account_slice)
+ )
+ if eth_resp['status'] != 1:
+ raise ValueError('Failed to query etherscan for accounts balance')
+ eth_accounts = eth_resp['result']
+
+ for account_entry in eth_accounts:
+ amount = FVal(account_entry['balance'])
+ balances[account_entry['account']] = from_wei(amount)
else:
for account in accounts:
| {"golden_diff": "diff --git a/rotkehlchen/ethchain.py b/rotkehlchen/ethchain.py\n--- a/rotkehlchen/ethchain.py\n+++ b/rotkehlchen/ethchain.py\n@@ -90,19 +90,25 @@\n def get_multieth_balance(self, accounts):\n \"\"\"Returns a dict with keys being accounts and balances in ETH\"\"\"\n balances = {}\n+\n if not self.connected:\n- # TODO: accounts.length should be less than 20. If more we gotta do\n- # multiple calls\n- eth_resp = request_get(\n- 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' %\n- ','.join(accounts)\n- )\n- if eth_resp['status'] != 1:\n- raise ValueError('Failed to query etherscan for accounts balance')\n- eth_accounts = eth_resp['result']\n- for account_entry in eth_accounts:\n- amount = FVal(account_entry['balance'])\n- balances[account_entry['account']] = from_wei(amount)\n+ if len(accounts) > 20:\n+ new_accounts = [accounts[x:x+2] for x in range(0, len(accounts), 2)]\n+ else:\n+ new_accounts = [accounts]\n+\n+ for account_slice in new_accounts:\n+ eth_resp = request_get(\n+ 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' %\n+ ','.join(account_slice)\n+ )\n+ if eth_resp['status'] != 1:\n+ raise ValueError('Failed to query etherscan for accounts balance')\n+ eth_accounts = eth_resp['result']\n+\n+ for account_entry in eth_accounts:\n+ amount = FVal(account_entry['balance'])\n+ balances[account_entry['account']] = from_wei(amount)\n \n else:\n for account in accounts:\n", "issue": "Etherscan balance query for more than 20 accounts won't work\nEtherscan has a 20 account limit per query.\n", "before_files": [{"content": "import os\nfrom web3 import Web3, HTTPProvider\nfrom requests import ConnectionError\n\nfrom rotkehlchen.utils import from_wei, rlk_jsonloads, request_get\nfrom rotkehlchen.fval import FVal\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass Ethchain(object):\n def __init__(self, ethrpc_port, attempt_connect=True):\n self.web3 = None\n self.rpc_port = ethrpc_port\n self.connected = False\n if attempt_connect:\n self.attempt_connect(ethrpc_port)\n\n def attempt_connect(self, ethrpc_port, mainnet_check=True):\n if self.rpc_port == ethrpc_port and self.connected:\n # We are already connected\n return True, 'Already connected to an ethereum node'\n\n if self.web3:\n del self.web3\n\n try:\n self.web3 = Web3(HTTPProvider('http://localhost:{}'.format(ethrpc_port)))\n except ConnectionError:\n logger.warn('Could not connect to a local ethereum node. Will use etherscan only')\n self.connected = False\n return False, 'Failed to connect to ethereum node at port {}'.format(ethrpc_port)\n\n if self.web3.isConnected():\n dir_path = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(dir_path, 'data', 'token_abi.json'), 'r') as f:\n self.token_abi = rlk_jsonloads(f.read())\n\n # Also make sure we are actually connected to the Ethereum mainnet\n if mainnet_check:\n genesis_hash = self.web3.eth.getBlock(0)['hash'].hex()\n target = '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3'\n if genesis_hash != target:\n logger.warn(\n 'Connected to a local ethereum node but it is not on the ethereum mainnet'\n )\n self.connected = False\n message = (\n 'Connected to ethereum node at port {} but it is not on '\n 'the ethereum mainnet'.format(ethrpc_port)\n )\n return False, message\n\n self.connected = True\n return True, ''\n else:\n logger.warn('Could not connect to a local ethereum node. Will use etherscan only')\n self.connected = False\n message = 'Failed to connect to ethereum node at port {}'.format(ethrpc_port)\n\n # If we get here we did not connnect\n return False, message\n\n def set_rpc_port(self, port):\n \"\"\" Attempts to set the RPC port for the ethereum client.\n\n Returns a tuple (result, message)\n - result: Boolean for success or failure of changing the rpc port\n - message: A message containing information on what happened. Can\n be populated both in case of success or failure\"\"\"\n result, message = self.attempt_connect(port)\n if result:\n self.ethrpc_port = port\n return result, message\n\n def get_eth_balance(self, account):\n if not self.connected:\n eth_resp = request_get(\n 'https://api.etherscan.io/api?module=account&action=balance&address=%s'\n % account\n )\n if eth_resp['status'] != 1:\n raise ValueError('Failed to query etherscan for accounts balance')\n amount = FVal(eth_resp['result'])\n return from_wei(amount)\n else:\n return from_wei(self.web3.eth.getBalance(account))\n\n def get_multieth_balance(self, accounts):\n \"\"\"Returns a dict with keys being accounts and balances in ETH\"\"\"\n balances = {}\n if not self.connected:\n # TODO: accounts.length should be less than 20. If more we gotta do\n # multiple calls\n eth_resp = request_get(\n 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' %\n ','.join(accounts)\n )\n if eth_resp['status'] != 1:\n raise ValueError('Failed to query etherscan for accounts balance')\n eth_accounts = eth_resp['result']\n for account_entry in eth_accounts:\n amount = FVal(account_entry['balance'])\n balances[account_entry['account']] = from_wei(amount)\n\n else:\n for account in accounts:\n amount = FVal(self.web3.eth.getBalance(account))\n balances[account] = from_wei(amount)\n\n return balances\n\n def get_multitoken_balance(self, token_symbol, token_address, token_decimals, accounts):\n \"\"\"Return a dictionary with keys being accounts and value balances of token\n Balance value is normalized through the token decimals.\n \"\"\"\n balances = {}\n if self.connected:\n token_contract = self.web3.eth.contract(\n address=token_address,\n abi=self.token_abi\n )\n for account in accounts:\n token_amount = FVal(token_contract.functions.balanceOf(account).call())\n if token_amount != 0:\n balances[account] = token_amount / (FVal(10) ** FVal(token_decimals))\n else:\n for account in accounts:\n print('Checking token {} for account {}'.format(token_symbol, account))\n resp = request_get(\n 'https://api.etherscan.io/api?module=account&action='\n 'tokenbalance&contractaddress={}&address={}'.format(\n token_address,\n account,\n ))\n if resp['status'] != 1:\n raise ValueError(\n 'Failed to query etherscan for {} token balance of {}'.format(\n token_symbol,\n account,\n ))\n token_amount = FVal(resp['result'])\n if token_amount != 0:\n balances[account] = token_amount / (FVal(10) ** FVal(token_decimals))\n\n return balances\n\n def get_token_balance(self, token_symbol, token_address, token_decimals, account):\n res = self.get_multitoken_balance(token_symbol, token_address, token_decimals, [account])\n return res.get(account, 0)\n\n def get_block_by_number(self, num):\n if not self.connected:\n return None\n\n return self.web3.eth.getBlock(num)\n", "path": "rotkehlchen/ethchain.py"}], "after_files": [{"content": "import os\nfrom web3 import Web3, HTTPProvider\nfrom requests import ConnectionError\n\nfrom rotkehlchen.utils import from_wei, rlk_jsonloads, request_get\nfrom rotkehlchen.fval import FVal\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass Ethchain(object):\n def __init__(self, ethrpc_port, attempt_connect=True):\n self.web3 = None\n self.rpc_port = ethrpc_port\n self.connected = False\n if attempt_connect:\n self.attempt_connect(ethrpc_port)\n\n def attempt_connect(self, ethrpc_port, mainnet_check=True):\n if self.rpc_port == ethrpc_port and self.connected:\n # We are already connected\n return True, 'Already connected to an ethereum node'\n\n if self.web3:\n del self.web3\n\n try:\n self.web3 = Web3(HTTPProvider('http://localhost:{}'.format(ethrpc_port)))\n except ConnectionError:\n logger.warn('Could not connect to a local ethereum node. Will use etherscan only')\n self.connected = False\n return False, 'Failed to connect to ethereum node at port {}'.format(ethrpc_port)\n\n if self.web3.isConnected():\n dir_path = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(dir_path, 'data', 'token_abi.json'), 'r') as f:\n self.token_abi = rlk_jsonloads(f.read())\n\n # Also make sure we are actually connected to the Ethereum mainnet\n if mainnet_check:\n genesis_hash = self.web3.eth.getBlock(0)['hash'].hex()\n target = '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3'\n if genesis_hash != target:\n logger.warn(\n 'Connected to a local ethereum node but it is not on the ethereum mainnet'\n )\n self.connected = False\n message = (\n 'Connected to ethereum node at port {} but it is not on '\n 'the ethereum mainnet'.format(ethrpc_port)\n )\n return False, message\n\n self.connected = True\n return True, ''\n else:\n logger.warn('Could not connect to a local ethereum node. Will use etherscan only')\n self.connected = False\n message = 'Failed to connect to ethereum node at port {}'.format(ethrpc_port)\n\n # If we get here we did not connnect\n return False, message\n\n def set_rpc_port(self, port):\n \"\"\" Attempts to set the RPC port for the ethereum client.\n\n Returns a tuple (result, message)\n - result: Boolean for success or failure of changing the rpc port\n - message: A message containing information on what happened. Can\n be populated both in case of success or failure\"\"\"\n result, message = self.attempt_connect(port)\n if result:\n self.ethrpc_port = port\n return result, message\n\n def get_eth_balance(self, account):\n if not self.connected:\n eth_resp = request_get(\n 'https://api.etherscan.io/api?module=account&action=balance&address=%s'\n % account\n )\n if eth_resp['status'] != 1:\n raise ValueError('Failed to query etherscan for accounts balance')\n amount = FVal(eth_resp['result'])\n return from_wei(amount)\n else:\n return from_wei(self.web3.eth.getBalance(account))\n\n def get_multieth_balance(self, accounts):\n \"\"\"Returns a dict with keys being accounts and balances in ETH\"\"\"\n balances = {}\n\n if not self.connected:\n if len(accounts) > 20:\n new_accounts = [accounts[x:x+2] for x in range(0, len(accounts), 2)]\n else:\n new_accounts = [accounts]\n\n for account_slice in new_accounts:\n eth_resp = request_get(\n 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' %\n ','.join(account_slice)\n )\n if eth_resp['status'] != 1:\n raise ValueError('Failed to query etherscan for accounts balance')\n eth_accounts = eth_resp['result']\n\n for account_entry in eth_accounts:\n amount = FVal(account_entry['balance'])\n balances[account_entry['account']] = from_wei(amount)\n\n else:\n for account in accounts:\n amount = FVal(self.web3.eth.getBalance(account))\n balances[account] = from_wei(amount)\n\n return balances\n\n def get_multitoken_balance(self, token_symbol, token_address, token_decimals, accounts):\n \"\"\"Return a dictionary with keys being accounts and value balances of token\n Balance value is normalized through the token decimals.\n \"\"\"\n balances = {}\n if self.connected:\n token_contract = self.web3.eth.contract(\n address=token_address,\n abi=self.token_abi\n )\n for account in accounts:\n token_amount = FVal(token_contract.functions.balanceOf(account).call())\n if token_amount != 0:\n balances[account] = token_amount / (FVal(10) ** FVal(token_decimals))\n else:\n for account in accounts:\n print('Checking token {} for account {}'.format(token_symbol, account))\n resp = request_get(\n 'https://api.etherscan.io/api?module=account&action='\n 'tokenbalance&contractaddress={}&address={}'.format(\n token_address,\n account,\n ))\n if resp['status'] != 1:\n raise ValueError(\n 'Failed to query etherscan for {} token balance of {}'.format(\n token_symbol,\n account,\n ))\n token_amount = FVal(resp['result'])\n if token_amount != 0:\n balances[account] = token_amount / (FVal(10) ** FVal(token_decimals))\n\n return balances\n\n def get_token_balance(self, token_symbol, token_address, token_decimals, account):\n res = self.get_multitoken_balance(token_symbol, token_address, token_decimals, [account])\n return res.get(account, 0)\n\n def get_block_by_number(self, num):\n if not self.connected:\n return None\n\n return self.web3.eth.getBlock(num)\n", "path": "rotkehlchen/ethchain.py"}]} | 2,014 | 424 |
gh_patches_debug_340 | rasdani/github-patches | git_diff | facebookresearch__nevergrad-705 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong dates in changelog
All the dates at https://github.com/facebookresearch/nevergrad/blob/master/CHANGELOG.md shows 2019, but seems it should be 2020.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 # Configuration file for the Sphinx documentation builder.
7 #
8 # This file only contains a selection of the most common options. For a full
9 # list see the documentation:
10 # http://www.sphinx-doc.org/en/master/config
11
12 # -- Path setup --------------------------------------------------------------
13
14 # If extensions (or modules to document with autodoc) are in another directory,
15 # add these directories to sys.path here. If the directory is relative to the
16 # documentation root, use os.path.abspath to make it absolute, like shown here.
17
18 import os
19 import sys
20 sys.path.insert(0, os.path.abspath('..'))
21
22
23 # -- Project information -----------------------------------------------------
24
25 project = 'nevergrad'
26 copyright = '2019, Facebook AI Research' # pylint: disable=redefined-builtin
27 author = 'Facebook AI Research'
28
29
30 # -- General configuration ---------------------------------------------------
31
32 # Add any Sphinx extension module names here, as strings. They can be
33 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 # ones.
35 extensions = ['sphinx.ext.autodoc',
36 'sphinx.ext.githubpages',
37 'sphinx.ext.coverage',
38 'sphinx.ext.napoleon',
39 'sphinx.ext.autosummary',
40 'recommonmark',
41 ]
42
43 source_suffix = {
44 '.rst': 'restructuredtext',
45 '.txt': 'markdown',
46 '.md': 'markdown',
47 }
48
49 master_doc = 'index'
50
51 # Add any paths that contain templates here, relative to this directory.
52 templates_path = []
53
54 # List of patterns, relative to source directory, that match files and
55 # directories to ignore when looking for source files.
56 # This pattern also affects html_static_path and html_extra_path.
57 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
58
59
60 # -- Options for HTML output -------------------------------------------------
61
62 # The theme to use for HTML and HTML Help pages. See the documentation for
63 # a list of builtin themes.
64 #
65 html_theme = 'sphinx_rtd_theme'
66
67 # Add any paths that contain custom static files (such as style sheets) here,
68 # relative to this directory. They are copied after the builtin static files,
69 # so a file named "default.css" will overwrite the builtin "default.css".
70 html_static_path = []
71
72 # -- Other --
73 linkcheck_ignore = [r'https://gecco-2020.sigevo.org/*']
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -70,4 +70,5 @@
html_static_path = []
# -- Other --
-linkcheck_ignore = [r'https://gecco-2020.sigevo.org/*']
+linkcheck_ignore = [r'https://gecco-2020.sigevo.org/*',
+ r'https://arxiv.org/abs/*'] # Transient certificate error :(
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -70,4 +70,5 @@\n html_static_path = []\n \n # -- Other --\n-linkcheck_ignore = [r'https://gecco-2020.sigevo.org/*']\n+linkcheck_ignore = [r'https://gecco-2020.sigevo.org/*',\n+ r'https://arxiv.org/abs/*'] # Transient certificate error :(\n", "issue": "Wrong dates in changelog\nAll the dates at https://github.com/facebookresearch/nevergrad/blob/master/CHANGELOG.md shows 2019, but seems it should be 2020.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'nevergrad'\ncopyright = '2019, Facebook AI Research' # pylint: disable=redefined-builtin\nauthor = 'Facebook AI Research'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.coverage',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosummary',\n 'recommonmark',\n ]\n\nsource_suffix = {\n '.rst': 'restructuredtext',\n '.txt': 'markdown',\n '.md': 'markdown',\n}\n\nmaster_doc = 'index'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = []\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# -- Other --\nlinkcheck_ignore = [r'https://gecco-2020.sigevo.org/*']\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'nevergrad'\ncopyright = '2019, Facebook AI Research' # pylint: disable=redefined-builtin\nauthor = 'Facebook AI Research'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.coverage',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosummary',\n 'recommonmark',\n ]\n\nsource_suffix = {\n '.rst': 'restructuredtext',\n '.txt': 'markdown',\n '.md': 'markdown',\n}\n\nmaster_doc = 'index'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = []\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# -- Other --\nlinkcheck_ignore = [r'https://gecco-2020.sigevo.org/*',\n r'https://arxiv.org/abs/*'] # Transient certificate error :(\n", "path": "docs/conf.py"}]} | 988 | 109 |
gh_patches_debug_27935 | rasdani/github-patches | git_diff | Cog-Creators__Red-DiscordBot-3516 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
warnaction add throws OSErrors
# Command bugs
<!--
Did you find a bug with a command? Fill out the following:
-->
#### Command name
warnaction
#### What cog is this command from?
Warnings
#### What were you expecting to happen?
an action was created when I try to add automated actions to warnings
#### What actually happened?
```
Exception in command 'warnaction add'
Traceback (most recent call last):
File "/root/redenv/lib/python3.8/site-packages/discord/ext/commands/core.py", line 83, in wrapped
File "/root/redenv/lib/python3.8/site-packages/redbot/cogs/warnings/warnings.py", line 103, in action_add
File "/root/redenv/lib/python3.8/site-packages/redbot/cogs/warnings/helpers.py", line 111, in get_command_for_exceeded_points
File "/root/redenv/lib/python3.8/site-packages/redbot/cogs/warnings/helpers.py", line 72, in get_command_from_input
File "/usr/lib/python3.8/inspect.py", line 985, in getsource
lines, lnum = getsourcelines(object)
File "/usr/lib/python3.8/inspect.py", line 967, in getsourcelines
lines, lnum = findsource(object)
File "/usr/lib/python3.8/inspect.py", line 798, in findsource
raise OSError('could not get source code')
OSError: could not get source code
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/root/redenv/lib/python3.8/site-packages/discord/ext/commands/bot.py", line 892, in invoke
File "/root/redenv/lib/python3.8/site-packages/redbot/core/commands/commands.py", line 794, in invoke
File "/root/redenv/lib/python3.8/site-packages/discord/ext/commands/core.py", line 1234, in invoke
File "/root/redenv/lib/python3.8/site-packages/discord/ext/commands/core.py", line 797, in invoke
File "/root/redenv/lib/python3.8/site-packages/discord/ext/commands/core.py", line 92, in wrapped
discord.ext.commands.errors.CommandInvokeError: Command raised an exception: OSError: could not get source code
```
#### How can we reproduce this issue?
Install Red-Bot
try to add an action to warnings
profit?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redbot/cogs/warnings/helpers.py`
Content:
```
1 from copy import copy
2 import asyncio
3 import inspect
4 import discord
5
6 from redbot.core import Config, checks, commands
7 from redbot.core.i18n import Translator
8 from redbot.core.utils.predicates import MessagePredicate
9
10 _ = Translator("Warnings", __file__)
11
12
13 async def warning_points_add_check(
14 config: Config, ctx: commands.Context, user: discord.Member, points: int
15 ):
16 """Handles any action that needs to be taken or not based on the points"""
17 guild = ctx.guild
18 guild_settings = config.guild(guild)
19 act = {}
20 async with guild_settings.actions() as registered_actions:
21 for a in registered_actions:
22 # Actions are sorted in decreasing order of points.
23 # The first action we find where the user is above the threshold will be the
24 # highest action we can take.
25 if points >= a["points"]:
26 act = a
27 break
28 if act and act["exceed_command"] is not None: # some action needs to be taken
29 await create_and_invoke_context(ctx, act["exceed_command"], user)
30
31
32 async def warning_points_remove_check(
33 config: Config, ctx: commands.Context, user: discord.Member, points: int
34 ):
35 guild = ctx.guild
36 guild_settings = config.guild(guild)
37 act = {}
38 async with guild_settings.actions() as registered_actions:
39 for a in registered_actions:
40 if points >= a["points"]:
41 act = a
42 else:
43 break
44 if act and act["drop_command"] is not None: # some action needs to be taken
45 await create_and_invoke_context(ctx, act["drop_command"], user)
46
47
48 async def create_and_invoke_context(
49 realctx: commands.Context, command_str: str, user: discord.Member
50 ):
51 m = copy(realctx.message)
52 m.content = command_str.format(user=user.mention, prefix=realctx.prefix)
53 fctx = await realctx.bot.get_context(m, cls=commands.Context)
54 try:
55 await realctx.bot.invoke(fctx)
56 except (commands.CheckFailure, commands.CommandOnCooldown):
57 await fctx.reinvoke()
58
59
60 def get_command_from_input(bot, userinput: str):
61 com = None
62 orig = userinput
63 while com is None:
64 com = bot.get_command(userinput)
65 if com is None:
66 userinput = " ".join(userinput.split(" ")[:-1])
67 if len(userinput) == 0:
68 break
69 if com is None:
70 return None, _("I could not find a command from that input!")
71
72 check_str = inspect.getsource(checks.is_owner)
73 if any(inspect.getsource(x) in check_str for x in com.checks):
74 # command the user specified has the is_owner check
75 return (
76 None,
77 _("That command requires bot owner. I can't allow you to use that for an action"),
78 )
79 return "{prefix}" + orig, None
80
81
82 async def get_command_for_exceeded_points(ctx: commands.Context):
83 """Gets the command to be executed when the user is at or exceeding
84 the points threshold for the action"""
85 await ctx.send(
86 _(
87 "Enter the command to be run when the user **exceeds the points for "
88 "this action to occur.**\n**If you do not wish to have a command run, enter** "
89 "`none`.\n\nEnter it exactly as you would if you were "
90 "actually trying to run the command, except don't put a prefix and "
91 "use `{user}` in place of any user/member arguments\n\n"
92 "WARNING: The command entered will be run without regard to checks or cooldowns. "
93 "Commands requiring bot owner are not allowed for security reasons.\n\n"
94 "Please wait 15 seconds before entering your response."
95 )
96 )
97 await asyncio.sleep(15)
98
99 await ctx.send(_("You may enter your response now."))
100
101 try:
102 msg = await ctx.bot.wait_for(
103 "message", check=MessagePredicate.same_context(ctx), timeout=30
104 )
105 except asyncio.TimeoutError:
106 return None
107 else:
108 if msg.content == "none":
109 return None
110
111 command, m = get_command_from_input(ctx.bot, msg.content)
112 if command is None:
113 await ctx.send(m)
114 return None
115
116 return command
117
118
119 async def get_command_for_dropping_points(ctx: commands.Context):
120 """
121 Gets the command to be executed when the user drops below the points
122 threshold
123
124 This is intended to be used for reversal of the action that was executed
125 when the user exceeded the threshold
126 """
127 await ctx.send(
128 _(
129 "Enter the command to be run when the user **returns to a value below "
130 "the points for this action to occur.** Please note that this is "
131 "intended to be used for reversal of the action taken when the user "
132 "exceeded the action's point value.\n**If you do not wish to have a command run "
133 "on dropping points, enter** `none`.\n\nEnter it exactly as you would "
134 "if you were actually trying to run the command, except don't put a prefix "
135 "and use `{user}` in place of any user/member arguments\n\n"
136 "WARNING: The command entered will be run without regard to checks or cooldowns. "
137 "Commands requiring bot owner are not allowed for security reasons.\n\n"
138 "Please wait 15 seconds before entering your response."
139 )
140 )
141 await asyncio.sleep(15)
142
143 await ctx.send(_("You may enter your response now."))
144
145 try:
146 msg = await ctx.bot.wait_for(
147 "message", check=MessagePredicate.same_context(ctx), timeout=30
148 )
149 except asyncio.TimeoutError:
150 return None
151 else:
152 if msg.content == "none":
153 return None
154 command, m = get_command_from_input(ctx.bot, msg.content)
155 if command is None:
156 await ctx.send(m)
157 return None
158
159 return command
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redbot/cogs/warnings/helpers.py b/redbot/cogs/warnings/helpers.py
--- a/redbot/cogs/warnings/helpers.py
+++ b/redbot/cogs/warnings/helpers.py
@@ -1,9 +1,9 @@
from copy import copy
import asyncio
-import inspect
import discord
from redbot.core import Config, checks, commands
+from redbot.core.commands.requires import PrivilegeLevel
from redbot.core.i18n import Translator
from redbot.core.utils.predicates import MessagePredicate
@@ -54,7 +54,9 @@
try:
await realctx.bot.invoke(fctx)
except (commands.CheckFailure, commands.CommandOnCooldown):
- await fctx.reinvoke()
+ # reinvoke bypasses checks and we don't want to run bot owner only commands here
+ if fctx.command.requires.privilege_level < PrivilegeLevel.BOT_OWNER:
+ await fctx.reinvoke()
def get_command_from_input(bot, userinput: str):
@@ -69,9 +71,7 @@
if com is None:
return None, _("I could not find a command from that input!")
- check_str = inspect.getsource(checks.is_owner)
- if any(inspect.getsource(x) in check_str for x in com.checks):
- # command the user specified has the is_owner check
+ if com.requires.privilege_level >= PrivilegeLevel.BOT_OWNER:
return (
None,
_("That command requires bot owner. I can't allow you to use that for an action"),
| {"golden_diff": "diff --git a/redbot/cogs/warnings/helpers.py b/redbot/cogs/warnings/helpers.py\n--- a/redbot/cogs/warnings/helpers.py\n+++ b/redbot/cogs/warnings/helpers.py\n@@ -1,9 +1,9 @@\n from copy import copy\n import asyncio\n-import inspect\n import discord\n \n from redbot.core import Config, checks, commands\n+from redbot.core.commands.requires import PrivilegeLevel\n from redbot.core.i18n import Translator\n from redbot.core.utils.predicates import MessagePredicate\n \n@@ -54,7 +54,9 @@\n try:\n await realctx.bot.invoke(fctx)\n except (commands.CheckFailure, commands.CommandOnCooldown):\n- await fctx.reinvoke()\n+ # reinvoke bypasses checks and we don't want to run bot owner only commands here\n+ if fctx.command.requires.privilege_level < PrivilegeLevel.BOT_OWNER:\n+ await fctx.reinvoke()\n \n \n def get_command_from_input(bot, userinput: str):\n@@ -69,9 +71,7 @@\n if com is None:\n return None, _(\"I could not find a command from that input!\")\n \n- check_str = inspect.getsource(checks.is_owner)\n- if any(inspect.getsource(x) in check_str for x in com.checks):\n- # command the user specified has the is_owner check\n+ if com.requires.privilege_level >= PrivilegeLevel.BOT_OWNER:\n return (\n None,\n _(\"That command requires bot owner. I can't allow you to use that for an action\"),\n", "issue": "warnaction add throws OSErrors\n# Command bugs\r\n\r\n<!-- \r\nDid you find a bug with a command? Fill out the following:\r\n-->\r\n\r\n#### Command name\r\n\r\nwarnaction\r\n\r\n#### What cog is this command from?\r\n\r\nWarnings\r\n\r\n#### What were you expecting to happen?\r\n\r\nan action was created when I try to add automated actions to warnings\r\n\r\n#### What actually happened?\r\n```\r\nException in command 'warnaction add'\r\nTraceback (most recent call last):\r\n File \"/root/redenv/lib/python3.8/site-packages/discord/ext/commands/core.py\", line 83, in wrapped\r\n File \"/root/redenv/lib/python3.8/site-packages/redbot/cogs/warnings/warnings.py\", line 103, in action_add\r\n File \"/root/redenv/lib/python3.8/site-packages/redbot/cogs/warnings/helpers.py\", line 111, in get_command_for_exceeded_points\r\n File \"/root/redenv/lib/python3.8/site-packages/redbot/cogs/warnings/helpers.py\", line 72, in get_command_from_input\r\n File \"/usr/lib/python3.8/inspect.py\", line 985, in getsource\r\n lines, lnum = getsourcelines(object)\r\n File \"/usr/lib/python3.8/inspect.py\", line 967, in getsourcelines\r\n lines, lnum = findsource(object)\r\n File \"/usr/lib/python3.8/inspect.py\", line 798, in findsource\r\n raise OSError('could not get source code')\r\nOSError: could not get source code\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/root/redenv/lib/python3.8/site-packages/discord/ext/commands/bot.py\", line 892, in invoke\r\n File \"/root/redenv/lib/python3.8/site-packages/redbot/core/commands/commands.py\", line 794, in invoke\r\n File \"/root/redenv/lib/python3.8/site-packages/discord/ext/commands/core.py\", line 1234, in invoke\r\n File \"/root/redenv/lib/python3.8/site-packages/discord/ext/commands/core.py\", line 797, in invoke\r\n File \"/root/redenv/lib/python3.8/site-packages/discord/ext/commands/core.py\", line 92, in wrapped\r\ndiscord.ext.commands.errors.CommandInvokeError: Command raised an exception: OSError: could not get source code\r\n```\r\n\r\n#### How can we reproduce this issue?\r\n\r\nInstall Red-Bot\r\ntry to add an action to warnings\r\nprofit?\n", "before_files": [{"content": "from copy import copy\nimport asyncio\nimport inspect\nimport discord\n\nfrom redbot.core import Config, checks, commands\nfrom redbot.core.i18n import Translator\nfrom redbot.core.utils.predicates import MessagePredicate\n\n_ = Translator(\"Warnings\", __file__)\n\n\nasync def warning_points_add_check(\n config: Config, ctx: commands.Context, user: discord.Member, points: int\n):\n \"\"\"Handles any action that needs to be taken or not based on the points\"\"\"\n guild = ctx.guild\n guild_settings = config.guild(guild)\n act = {}\n async with guild_settings.actions() as registered_actions:\n for a in registered_actions:\n # Actions are sorted in decreasing order of points.\n # The first action we find where the user is above the threshold will be the\n # highest action we can take.\n if points >= a[\"points\"]:\n act = a\n break\n if act and act[\"exceed_command\"] is not None: # some action needs to be taken\n await create_and_invoke_context(ctx, act[\"exceed_command\"], user)\n\n\nasync def warning_points_remove_check(\n config: Config, ctx: commands.Context, user: discord.Member, points: int\n):\n guild = ctx.guild\n guild_settings = config.guild(guild)\n act = {}\n async with guild_settings.actions() as registered_actions:\n for a in registered_actions:\n if points >= a[\"points\"]:\n act = a\n else:\n break\n if act and act[\"drop_command\"] is not None: # some action needs to be taken\n await create_and_invoke_context(ctx, act[\"drop_command\"], user)\n\n\nasync def create_and_invoke_context(\n realctx: commands.Context, command_str: str, user: discord.Member\n):\n m = copy(realctx.message)\n m.content = command_str.format(user=user.mention, prefix=realctx.prefix)\n fctx = await realctx.bot.get_context(m, cls=commands.Context)\n try:\n await realctx.bot.invoke(fctx)\n except (commands.CheckFailure, commands.CommandOnCooldown):\n await fctx.reinvoke()\n\n\ndef get_command_from_input(bot, userinput: str):\n com = None\n orig = userinput\n while com is None:\n com = bot.get_command(userinput)\n if com is None:\n userinput = \" \".join(userinput.split(\" \")[:-1])\n if len(userinput) == 0:\n break\n if com is None:\n return None, _(\"I could not find a command from that input!\")\n\n check_str = inspect.getsource(checks.is_owner)\n if any(inspect.getsource(x) in check_str for x in com.checks):\n # command the user specified has the is_owner check\n return (\n None,\n _(\"That command requires bot owner. I can't allow you to use that for an action\"),\n )\n return \"{prefix}\" + orig, None\n\n\nasync def get_command_for_exceeded_points(ctx: commands.Context):\n \"\"\"Gets the command to be executed when the user is at or exceeding\n the points threshold for the action\"\"\"\n await ctx.send(\n _(\n \"Enter the command to be run when the user **exceeds the points for \"\n \"this action to occur.**\\n**If you do not wish to have a command run, enter** \"\n \"`none`.\\n\\nEnter it exactly as you would if you were \"\n \"actually trying to run the command, except don't put a prefix and \"\n \"use `{user}` in place of any user/member arguments\\n\\n\"\n \"WARNING: The command entered will be run without regard to checks or cooldowns. \"\n \"Commands requiring bot owner are not allowed for security reasons.\\n\\n\"\n \"Please wait 15 seconds before entering your response.\"\n )\n )\n await asyncio.sleep(15)\n\n await ctx.send(_(\"You may enter your response now.\"))\n\n try:\n msg = await ctx.bot.wait_for(\n \"message\", check=MessagePredicate.same_context(ctx), timeout=30\n )\n except asyncio.TimeoutError:\n return None\n else:\n if msg.content == \"none\":\n return None\n\n command, m = get_command_from_input(ctx.bot, msg.content)\n if command is None:\n await ctx.send(m)\n return None\n\n return command\n\n\nasync def get_command_for_dropping_points(ctx: commands.Context):\n \"\"\"\n Gets the command to be executed when the user drops below the points\n threshold\n\n This is intended to be used for reversal of the action that was executed\n when the user exceeded the threshold\n \"\"\"\n await ctx.send(\n _(\n \"Enter the command to be run when the user **returns to a value below \"\n \"the points for this action to occur.** Please note that this is \"\n \"intended to be used for reversal of the action taken when the user \"\n \"exceeded the action's point value.\\n**If you do not wish to have a command run \"\n \"on dropping points, enter** `none`.\\n\\nEnter it exactly as you would \"\n \"if you were actually trying to run the command, except don't put a prefix \"\n \"and use `{user}` in place of any user/member arguments\\n\\n\"\n \"WARNING: The command entered will be run without regard to checks or cooldowns. \"\n \"Commands requiring bot owner are not allowed for security reasons.\\n\\n\"\n \"Please wait 15 seconds before entering your response.\"\n )\n )\n await asyncio.sleep(15)\n\n await ctx.send(_(\"You may enter your response now.\"))\n\n try:\n msg = await ctx.bot.wait_for(\n \"message\", check=MessagePredicate.same_context(ctx), timeout=30\n )\n except asyncio.TimeoutError:\n return None\n else:\n if msg.content == \"none\":\n return None\n command, m = get_command_from_input(ctx.bot, msg.content)\n if command is None:\n await ctx.send(m)\n return None\n\n return command\n", "path": "redbot/cogs/warnings/helpers.py"}], "after_files": [{"content": "from copy import copy\nimport asyncio\nimport discord\n\nfrom redbot.core import Config, checks, commands\nfrom redbot.core.commands.requires import PrivilegeLevel\nfrom redbot.core.i18n import Translator\nfrom redbot.core.utils.predicates import MessagePredicate\n\n_ = Translator(\"Warnings\", __file__)\n\n\nasync def warning_points_add_check(\n config: Config, ctx: commands.Context, user: discord.Member, points: int\n):\n \"\"\"Handles any action that needs to be taken or not based on the points\"\"\"\n guild = ctx.guild\n guild_settings = config.guild(guild)\n act = {}\n async with guild_settings.actions() as registered_actions:\n for a in registered_actions:\n # Actions are sorted in decreasing order of points.\n # The first action we find where the user is above the threshold will be the\n # highest action we can take.\n if points >= a[\"points\"]:\n act = a\n break\n if act and act[\"exceed_command\"] is not None: # some action needs to be taken\n await create_and_invoke_context(ctx, act[\"exceed_command\"], user)\n\n\nasync def warning_points_remove_check(\n config: Config, ctx: commands.Context, user: discord.Member, points: int\n):\n guild = ctx.guild\n guild_settings = config.guild(guild)\n act = {}\n async with guild_settings.actions() as registered_actions:\n for a in registered_actions:\n if points >= a[\"points\"]:\n act = a\n else:\n break\n if act and act[\"drop_command\"] is not None: # some action needs to be taken\n await create_and_invoke_context(ctx, act[\"drop_command\"], user)\n\n\nasync def create_and_invoke_context(\n realctx: commands.Context, command_str: str, user: discord.Member\n):\n m = copy(realctx.message)\n m.content = command_str.format(user=user.mention, prefix=realctx.prefix)\n fctx = await realctx.bot.get_context(m, cls=commands.Context)\n try:\n await realctx.bot.invoke(fctx)\n except (commands.CheckFailure, commands.CommandOnCooldown):\n # reinvoke bypasses checks and we don't want to run bot owner only commands here\n if fctx.command.requires.privilege_level < PrivilegeLevel.BOT_OWNER:\n await fctx.reinvoke()\n\n\ndef get_command_from_input(bot, userinput: str):\n com = None\n orig = userinput\n while com is None:\n com = bot.get_command(userinput)\n if com is None:\n userinput = \" \".join(userinput.split(\" \")[:-1])\n if len(userinput) == 0:\n break\n if com is None:\n return None, _(\"I could not find a command from that input!\")\n\n if com.requires.privilege_level >= PrivilegeLevel.BOT_OWNER:\n return (\n None,\n _(\"That command requires bot owner. I can't allow you to use that for an action\"),\n )\n return \"{prefix}\" + orig, None\n\n\nasync def get_command_for_exceeded_points(ctx: commands.Context):\n \"\"\"Gets the command to be executed when the user is at or exceeding\n the points threshold for the action\"\"\"\n await ctx.send(\n _(\n \"Enter the command to be run when the user **exceeds the points for \"\n \"this action to occur.**\\n**If you do not wish to have a command run, enter** \"\n \"`none`.\\n\\nEnter it exactly as you would if you were \"\n \"actually trying to run the command, except don't put a prefix and \"\n \"use `{user}` in place of any user/member arguments\\n\\n\"\n \"WARNING: The command entered will be run without regard to checks or cooldowns. \"\n \"Commands requiring bot owner are not allowed for security reasons.\\n\\n\"\n \"Please wait 15 seconds before entering your response.\"\n )\n )\n await asyncio.sleep(15)\n\n await ctx.send(_(\"You may enter your response now.\"))\n\n try:\n msg = await ctx.bot.wait_for(\n \"message\", check=MessagePredicate.same_context(ctx), timeout=30\n )\n except asyncio.TimeoutError:\n return None\n else:\n if msg.content == \"none\":\n return None\n\n command, m = get_command_from_input(ctx.bot, msg.content)\n if command is None:\n await ctx.send(m)\n return None\n\n return command\n\n\nasync def get_command_for_dropping_points(ctx: commands.Context):\n \"\"\"\n Gets the command to be executed when the user drops below the points\n threshold\n\n This is intended to be used for reversal of the action that was executed\n when the user exceeded the threshold\n \"\"\"\n await ctx.send(\n _(\n \"Enter the command to be run when the user **returns to a value below \"\n \"the points for this action to occur.** Please note that this is \"\n \"intended to be used for reversal of the action taken when the user \"\n \"exceeded the action's point value.\\n**If you do not wish to have a command run \"\n \"on dropping points, enter** `none`.\\n\\nEnter it exactly as you would \"\n \"if you were actually trying to run the command, except don't put a prefix \"\n \"and use `{user}` in place of any user/member arguments\\n\\n\"\n \"WARNING: The command entered will be run without regard to checks or cooldowns. \"\n \"Commands requiring bot owner are not allowed for security reasons.\\n\\n\"\n \"Please wait 15 seconds before entering your response.\"\n )\n )\n await asyncio.sleep(15)\n\n await ctx.send(_(\"You may enter your response now.\"))\n\n try:\n msg = await ctx.bot.wait_for(\n \"message\", check=MessagePredicate.same_context(ctx), timeout=30\n )\n except asyncio.TimeoutError:\n return None\n else:\n if msg.content == \"none\":\n return None\n command, m = get_command_from_input(ctx.bot, msg.content)\n if command is None:\n await ctx.send(m)\n return None\n\n return command\n", "path": "redbot/cogs/warnings/helpers.py"}]} | 2,503 | 343 |
gh_patches_debug_13293 | rasdani/github-patches | git_diff | ray-project__ray-8842 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Asyncio] InvalidStateError when multiple awaits on same oid
<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->
### What is the problem?
*Ray version and other system information (Python version, TensorFlow version, OS):*
### Reproduction (REQUIRED)
Please provide a script that can be run to reproduce the issue. The script should have **no external library dependencies** (i.e., use fake or mock data / environments):
If we cannot run your script, we cannot fix your issue.
```python
import ray
import time
ray.init()
@ray.remote
def f():
time.sleep(5)
oid = f.remote()
await asyncio.wait_for(oid, timeout=1)
await asyncio.wait_for(oid, timeout=1)
```
Output
```
Exception in callback get_async.<locals>.done_callback(<Future finis... result=None)>) at /Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py:65
handle: <Handle get_async.<locals>.done_callback(<Future finis... result=None)>) at /Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py:65>
Traceback (most recent call last):
File "/Users/simonmo/miniconda3/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py", line 83, in done_callback
user_future.set_result(result.result)
asyncio.base_futures.InvalidStateError: invalid state
```
- [ ] I have verified my script runs in a clean environment and reproduces the issue.
- [ ] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/async_compat.py`
Content:
```
1 """
2 This file should only be imported from Python 3.
3 It will raise SyntaxError when importing from Python 2.
4 """
5 import asyncio
6 from collections import namedtuple
7 import time
8 import inspect
9
10 import ray
11
12
13 def sync_to_async(func):
14 """Convert a blocking function to async function"""
15
16 if inspect.iscoroutinefunction(func):
17 return func
18
19 async def wrapper(*args, **kwargs):
20 return func(*args, **kwargs)
21
22 return wrapper
23
24
25 # Class encapsulate the get result from direct actor.
26 # Case 1: plasma_fallback_id=None, result=<Object>
27 # Case 2: plasma_fallback_id=ObjectID, result=None
28 AsyncGetResponse = namedtuple("AsyncGetResponse",
29 ["plasma_fallback_id", "result"])
30
31
32 def get_async(object_id):
33 """Asyncio compatible version of ray.get"""
34 # Delayed import because raylet import this file and
35 # it creates circular imports.
36 from ray.experimental.async_api import init as async_api_init, as_future
37 from ray.experimental.async_plasma import PlasmaObjectFuture
38
39 assert isinstance(object_id, ray.ObjectID), "Batched get is not supported."
40
41 # Setup
42 async_api_init()
43 loop = asyncio.get_event_loop()
44 core_worker = ray.worker.global_worker.core_worker
45
46 # Here's the callback used to implement async get logic.
47 # What we want:
48 # - If direct call, first try to get it from in memory store.
49 # If the object if promoted to plasma, retry it from plasma API.
50 # - If not direct call, directly use plasma API to get it.
51 user_future = loop.create_future()
52
53 # We have three future objects here.
54 # user_future is directly returned to the user from this function.
55 # and it will be eventually fulfilled by the final result.
56 # inner_future is the first attempt to retrieve the object. It can be
57 # fulfilled by either core_worker.get_async or plasma_api.as_future.
58 # When inner_future completes, done_callback will be invoked. This
59 # callback set the final object in user_future if the object hasn't
60 # been promoted by plasma, otherwise it will retry from plasma.
61 # retry_plasma_future is only created when we are getting objects that's
62 # promoted to plasma. It will also invoke the done_callback when it's
63 # fulfilled.
64
65 def done_callback(future):
66 result = future.result()
67 # Result from async plasma, transparently pass it to user future
68 if isinstance(future, PlasmaObjectFuture):
69 if isinstance(result, ray.exceptions.RayTaskError):
70 ray.worker.last_task_error_raise_time = time.time()
71 user_future.set_exception(result.as_instanceof_cause())
72 else:
73 user_future.set_result(result)
74 else:
75 # Result from direct call.
76 assert isinstance(result, AsyncGetResponse), result
77 if result.plasma_fallback_id is None:
78 if isinstance(result.result, ray.exceptions.RayTaskError):
79 ray.worker.last_task_error_raise_time = time.time()
80 user_future.set_exception(
81 result.result.as_instanceof_cause())
82 else:
83 user_future.set_result(result.result)
84 else:
85 # Schedule plasma to async get, use the the same callback.
86 retry_plasma_future = as_future(result.plasma_fallback_id)
87 retry_plasma_future.add_done_callback(done_callback)
88 # A hack to keep reference to the future so it doesn't get GC.
89 user_future.retry_plasma_future = retry_plasma_future
90
91 inner_future = loop.create_future()
92 # We must add the done_callback before sending to in_memory_store_get
93 inner_future.add_done_callback(done_callback)
94 core_worker.in_memory_store_get_async(object_id, inner_future)
95 # A hack to keep reference to inner_future so it doesn't get GC.
96 user_future.inner_future = inner_future
97 # A hack to keep a reference to the object ID for ref counting.
98 user_future.object_id = object_id
99
100 return user_future
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/async_compat.py b/python/ray/async_compat.py
--- a/python/ray/async_compat.py
+++ b/python/ray/async_compat.py
@@ -75,6 +75,11 @@
# Result from direct call.
assert isinstance(result, AsyncGetResponse), result
if result.plasma_fallback_id is None:
+ # If this future has result set already, we just need to
+ # skip the set result/exception procedure.
+ if user_future.done():
+ return
+
if isinstance(result.result, ray.exceptions.RayTaskError):
ray.worker.last_task_error_raise_time = time.time()
user_future.set_exception(
| {"golden_diff": "diff --git a/python/ray/async_compat.py b/python/ray/async_compat.py\n--- a/python/ray/async_compat.py\n+++ b/python/ray/async_compat.py\n@@ -75,6 +75,11 @@\n # Result from direct call.\n assert isinstance(result, AsyncGetResponse), result\n if result.plasma_fallback_id is None:\n+ # If this future has result set already, we just need to\n+ # skip the set result/exception procedure.\n+ if user_future.done():\n+ return\n+\n if isinstance(result.result, ray.exceptions.RayTaskError):\n ray.worker.last_task_error_raise_time = time.time()\n user_future.set_exception(\n", "issue": "[Asyncio] InvalidStateError when multiple awaits on same oid\n<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->\r\n\r\n### What is the problem?\r\n\r\n*Ray version and other system information (Python version, TensorFlow version, OS):*\r\n\r\n### Reproduction (REQUIRED)\r\nPlease provide a script that can be run to reproduce the issue. The script should have **no external library dependencies** (i.e., use fake or mock data / environments):\r\n\r\nIf we cannot run your script, we cannot fix your issue.\r\n```python\r\nimport ray\r\nimport time\r\n\r\nray.init()\r\[email protected]\r\ndef f():\r\n time.sleep(5)\r\n\r\noid = f.remote()\r\nawait asyncio.wait_for(oid, timeout=1)\r\nawait asyncio.wait_for(oid, timeout=1)\r\n```\r\n\r\nOutput\r\n```\r\nException in callback get_async.<locals>.done_callback(<Future finis... result=None)>) at /Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py:65\r\nhandle: <Handle get_async.<locals>.done_callback(<Future finis... result=None)>) at /Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py:65>\r\nTraceback (most recent call last):\r\n File \"/Users/simonmo/miniconda3/lib/python3.6/asyncio/events.py\", line 145, in _run\r\n self._callback(*self._args)\r\n File \"/Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py\", line 83, in done_callback\r\n user_future.set_result(result.result)\r\nasyncio.base_futures.InvalidStateError: invalid state\r\n```\r\n- [ ] I have verified my script runs in a clean environment and reproduces the issue.\r\n- [ ] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).\r\n\n", "before_files": [{"content": "\"\"\"\nThis file should only be imported from Python 3.\nIt will raise SyntaxError when importing from Python 2.\n\"\"\"\nimport asyncio\nfrom collections import namedtuple\nimport time\nimport inspect\n\nimport ray\n\n\ndef sync_to_async(func):\n \"\"\"Convert a blocking function to async function\"\"\"\n\n if inspect.iscoroutinefunction(func):\n return func\n\n async def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n\n# Class encapsulate the get result from direct actor.\n# Case 1: plasma_fallback_id=None, result=<Object>\n# Case 2: plasma_fallback_id=ObjectID, result=None\nAsyncGetResponse = namedtuple(\"AsyncGetResponse\",\n [\"plasma_fallback_id\", \"result\"])\n\n\ndef get_async(object_id):\n \"\"\"Asyncio compatible version of ray.get\"\"\"\n # Delayed import because raylet import this file and\n # it creates circular imports.\n from ray.experimental.async_api import init as async_api_init, as_future\n from ray.experimental.async_plasma import PlasmaObjectFuture\n\n assert isinstance(object_id, ray.ObjectID), \"Batched get is not supported.\"\n\n # Setup\n async_api_init()\n loop = asyncio.get_event_loop()\n core_worker = ray.worker.global_worker.core_worker\n\n # Here's the callback used to implement async get logic.\n # What we want:\n # - If direct call, first try to get it from in memory store.\n # If the object if promoted to plasma, retry it from plasma API.\n # - If not direct call, directly use plasma API to get it.\n user_future = loop.create_future()\n\n # We have three future objects here.\n # user_future is directly returned to the user from this function.\n # and it will be eventually fulfilled by the final result.\n # inner_future is the first attempt to retrieve the object. It can be\n # fulfilled by either core_worker.get_async or plasma_api.as_future.\n # When inner_future completes, done_callback will be invoked. This\n # callback set the final object in user_future if the object hasn't\n # been promoted by plasma, otherwise it will retry from plasma.\n # retry_plasma_future is only created when we are getting objects that's\n # promoted to plasma. It will also invoke the done_callback when it's\n # fulfilled.\n\n def done_callback(future):\n result = future.result()\n # Result from async plasma, transparently pass it to user future\n if isinstance(future, PlasmaObjectFuture):\n if isinstance(result, ray.exceptions.RayTaskError):\n ray.worker.last_task_error_raise_time = time.time()\n user_future.set_exception(result.as_instanceof_cause())\n else:\n user_future.set_result(result)\n else:\n # Result from direct call.\n assert isinstance(result, AsyncGetResponse), result\n if result.plasma_fallback_id is None:\n if isinstance(result.result, ray.exceptions.RayTaskError):\n ray.worker.last_task_error_raise_time = time.time()\n user_future.set_exception(\n result.result.as_instanceof_cause())\n else:\n user_future.set_result(result.result)\n else:\n # Schedule plasma to async get, use the the same callback.\n retry_plasma_future = as_future(result.plasma_fallback_id)\n retry_plasma_future.add_done_callback(done_callback)\n # A hack to keep reference to the future so it doesn't get GC.\n user_future.retry_plasma_future = retry_plasma_future\n\n inner_future = loop.create_future()\n # We must add the done_callback before sending to in_memory_store_get\n inner_future.add_done_callback(done_callback)\n core_worker.in_memory_store_get_async(object_id, inner_future)\n # A hack to keep reference to inner_future so it doesn't get GC.\n user_future.inner_future = inner_future\n # A hack to keep a reference to the object ID for ref counting.\n user_future.object_id = object_id\n\n return user_future\n", "path": "python/ray/async_compat.py"}], "after_files": [{"content": "\"\"\"\nThis file should only be imported from Python 3.\nIt will raise SyntaxError when importing from Python 2.\n\"\"\"\nimport asyncio\nfrom collections import namedtuple\nimport time\nimport inspect\n\nimport ray\n\n\ndef sync_to_async(func):\n \"\"\"Convert a blocking function to async function\"\"\"\n\n if inspect.iscoroutinefunction(func):\n return func\n\n async def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n\n# Class encapsulate the get result from direct actor.\n# Case 1: plasma_fallback_id=None, result=<Object>\n# Case 2: plasma_fallback_id=ObjectID, result=None\nAsyncGetResponse = namedtuple(\"AsyncGetResponse\",\n [\"plasma_fallback_id\", \"result\"])\n\n\ndef get_async(object_id):\n \"\"\"Asyncio compatible version of ray.get\"\"\"\n # Delayed import because raylet import this file and\n # it creates circular imports.\n from ray.experimental.async_api import init as async_api_init, as_future\n from ray.experimental.async_plasma import PlasmaObjectFuture\n\n assert isinstance(object_id, ray.ObjectID), \"Batched get is not supported.\"\n\n # Setup\n async_api_init()\n loop = asyncio.get_event_loop()\n core_worker = ray.worker.global_worker.core_worker\n\n # Here's the callback used to implement async get logic.\n # What we want:\n # - If direct call, first try to get it from in memory store.\n # If the object if promoted to plasma, retry it from plasma API.\n # - If not direct call, directly use plasma API to get it.\n user_future = loop.create_future()\n\n # We have three future objects here.\n # user_future is directly returned to the user from this function.\n # and it will be eventually fulfilled by the final result.\n # inner_future is the first attempt to retrieve the object. It can be\n # fulfilled by either core_worker.get_async or plasma_api.as_future.\n # When inner_future completes, done_callback will be invoked. This\n # callback set the final object in user_future if the object hasn't\n # been promoted by plasma, otherwise it will retry from plasma.\n # retry_plasma_future is only created when we are getting objects that's\n # promoted to plasma. It will also invoke the done_callback when it's\n # fulfilled.\n\n def done_callback(future):\n result = future.result()\n # Result from async plasma, transparently pass it to user future\n if isinstance(future, PlasmaObjectFuture):\n if isinstance(result, ray.exceptions.RayTaskError):\n ray.worker.last_task_error_raise_time = time.time()\n user_future.set_exception(result.as_instanceof_cause())\n else:\n user_future.set_result(result)\n else:\n # Result from direct call.\n assert isinstance(result, AsyncGetResponse), result\n if result.plasma_fallback_id is None:\n # If this future has result set already, we just need to\n # skip the set result/exception procedure.\n if user_future.done():\n return\n\n if isinstance(result.result, ray.exceptions.RayTaskError):\n ray.worker.last_task_error_raise_time = time.time()\n user_future.set_exception(\n result.result.as_instanceof_cause())\n else:\n user_future.set_result(result.result)\n else:\n # Schedule plasma to async get, use the the same callback.\n retry_plasma_future = as_future(result.plasma_fallback_id)\n retry_plasma_future.add_done_callback(done_callback)\n # A hack to keep reference to the future so it doesn't get GC.\n user_future.retry_plasma_future = retry_plasma_future\n\n inner_future = loop.create_future()\n # We must add the done_callback before sending to in_memory_store_get\n inner_future.add_done_callback(done_callback)\n core_worker.in_memory_store_get_async(object_id, inner_future)\n # A hack to keep reference to inner_future so it doesn't get GC.\n user_future.inner_future = inner_future\n # A hack to keep a reference to the object ID for ref counting.\n user_future.object_id = object_id\n\n return user_future\n", "path": "python/ray/async_compat.py"}]} | 1,726 | 152 |
gh_patches_debug_675 | rasdani/github-patches | git_diff | joke2k__faker-1423 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Faker adds path objects to sys.path_importer_cache, breaking other packages
* Faker version: 6.6.3
* OS: Gentoo Linux
After importing `faker`, entries with `PosixPath` objects are added as keys to `sys.path_importer_cache`. However, the keys are supposed to be regular `str`s there, and the wrong type breaks software relying on `str` methods being available, e.g. astroid:
```
___________________________________________ ClassNodeTest.test_slots_added_dynamically_still_inferred ____________________________________________
self = <tests.unittest_scoped_nodes.ClassNodeTest testMethod=test_slots_added_dynamically_still_inferred>
def tearDown(self):
del sys.path[0]
datadir = find("")
for key in list(sys.path_importer_cache):
> if key.startswith(datadir):
E AttributeError: 'PosixPath' object has no attribute 'startswith'
tests/resources.py:41: AttributeError
```
Note that since Faker installs a pytest plugin, it is autoloaded by default in all programs' test suites.
### Steps to reproduce
```
import sys
import faker
print(sys.path_importer_cache)
```
### Expected behavior
The printed dict should only contain `str` keys.
### Actual behavior
```
[...] PosixPath('/usr/lib/python3.9/site-packages/faker/providers/address'): FileFinder(PosixPath('/usr/lib/python3.9/site-packages/faker/providers/address')), PosixPath('/usr/lib/python3.9/site-packages/faker/providers/automotive'): FileFinder(PosixPath('/usr/lib/python3.9/site-packages/faker/providers/automotive')), [...]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/utils/loading.py`
Content:
```
1 import pkgutil
2 import sys
3
4 from importlib import import_module
5 from pathlib import Path
6 from types import ModuleType
7 from typing import List, Set
8
9
10 def get_path(module: ModuleType) -> str:
11 if getattr(sys, 'frozen', False):
12 # frozen
13
14 if getattr(sys, '_MEIPASS', False):
15 # PyInstaller
16 lib_dir = Path(getattr(sys, '_MEIPASS'))
17 else:
18 # others
19 lib_dir = Path(sys.executable).parent / 'lib'
20
21 path = lib_dir.joinpath(*module.__package__.split("."))
22 else:
23 # unfrozen
24 path = Path(module.__file__).parent
25 return path
26
27
28 def list_module(module: ModuleType) -> List[str]:
29 path = get_path(module)
30
31 if getattr(sys, '_MEIPASS', False):
32 # PyInstaller
33 return [file.parent.name for file in Path(path).glob('*/__init__.py')]
34 else:
35 return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]
36
37
38 def find_available_locales(providers: List[str]) -> List[str]:
39 available_locales: Set[str] = set()
40
41 for provider_path in providers:
42
43 provider_module = import_module(provider_path)
44 if getattr(provider_module, 'localized', False):
45 langs = list_module(provider_module)
46 available_locales.update(langs)
47 available_locales: List[str] = sorted(available_locales)
48 return available_locales
49
50
51 def find_available_providers(modules: List[ModuleType]) -> List[str]:
52 available_providers = set()
53 for providers_mod in modules:
54 if providers_mod.__package__:
55 providers = [
56 '.'.join([providers_mod.__package__, mod])
57 for mod in list_module(providers_mod) if mod != '__pycache__'
58 ]
59 available_providers.update(providers)
60 return sorted(available_providers)
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/utils/loading.py b/faker/utils/loading.py
--- a/faker/utils/loading.py
+++ b/faker/utils/loading.py
@@ -22,7 +22,7 @@
else:
# unfrozen
path = Path(module.__file__).parent
- return path
+ return str(path)
def list_module(module: ModuleType) -> List[str]:
| {"golden_diff": "diff --git a/faker/utils/loading.py b/faker/utils/loading.py\n--- a/faker/utils/loading.py\n+++ b/faker/utils/loading.py\n@@ -22,7 +22,7 @@\n else:\n # unfrozen\n path = Path(module.__file__).parent\n- return path\n+ return str(path)\n \n \n def list_module(module: ModuleType) -> List[str]:\n", "issue": "Faker adds path objects to sys.path_importer_cache, breaking other packages\n* Faker version: 6.6.3\r\n* OS: Gentoo Linux\r\n\r\nAfter importing `faker`, entries with `PosixPath` objects are added as keys to `sys.path_importer_cache`. However, the keys are supposed to be regular `str`s there, and the wrong type breaks software relying on `str` methods being available, e.g. astroid:\r\n\r\n```\r\n___________________________________________ ClassNodeTest.test_slots_added_dynamically_still_inferred ____________________________________________\r\n\r\nself = <tests.unittest_scoped_nodes.ClassNodeTest testMethod=test_slots_added_dynamically_still_inferred>\r\n\r\n def tearDown(self):\r\n del sys.path[0]\r\n datadir = find(\"\")\r\n for key in list(sys.path_importer_cache):\r\n> if key.startswith(datadir):\r\nE AttributeError: 'PosixPath' object has no attribute 'startswith'\r\n\r\ntests/resources.py:41: AttributeError\r\n```\r\n\r\nNote that since Faker installs a pytest plugin, it is autoloaded by default in all programs' test suites.\r\n\r\n### Steps to reproduce\r\n\r\n```\r\nimport sys\r\nimport faker\r\nprint(sys.path_importer_cache)\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe printed dict should only contain `str` keys.\r\n\r\n### Actual behavior\r\n\r\n```\r\n[...] PosixPath('/usr/lib/python3.9/site-packages/faker/providers/address'): FileFinder(PosixPath('/usr/lib/python3.9/site-packages/faker/providers/address')), PosixPath('/usr/lib/python3.9/site-packages/faker/providers/automotive'): FileFinder(PosixPath('/usr/lib/python3.9/site-packages/faker/providers/automotive')), [...]\r\n```\r\n\n", "before_files": [{"content": "import pkgutil\nimport sys\n\nfrom importlib import import_module\nfrom pathlib import Path\nfrom types import ModuleType\nfrom typing import List, Set\n\n\ndef get_path(module: ModuleType) -> str:\n if getattr(sys, 'frozen', False):\n # frozen\n\n if getattr(sys, '_MEIPASS', False):\n # PyInstaller\n lib_dir = Path(getattr(sys, '_MEIPASS'))\n else:\n # others\n lib_dir = Path(sys.executable).parent / 'lib'\n\n path = lib_dir.joinpath(*module.__package__.split(\".\"))\n else:\n # unfrozen\n path = Path(module.__file__).parent\n return path\n\n\ndef list_module(module: ModuleType) -> List[str]:\n path = get_path(module)\n\n if getattr(sys, '_MEIPASS', False):\n # PyInstaller\n return [file.parent.name for file in Path(path).glob('*/__init__.py')]\n else:\n return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\n\n\ndef find_available_locales(providers: List[str]) -> List[str]:\n available_locales: Set[str] = set()\n\n for provider_path in providers:\n\n provider_module = import_module(provider_path)\n if getattr(provider_module, 'localized', False):\n langs = list_module(provider_module)\n available_locales.update(langs)\n available_locales: List[str] = sorted(available_locales)\n return available_locales\n\n\ndef find_available_providers(modules: List[ModuleType]) -> List[str]:\n available_providers = set()\n for providers_mod in modules:\n if providers_mod.__package__:\n providers = [\n '.'.join([providers_mod.__package__, mod])\n for mod in list_module(providers_mod) if mod != '__pycache__'\n ]\n available_providers.update(providers)\n return sorted(available_providers)\n", "path": "faker/utils/loading.py"}], "after_files": [{"content": "import pkgutil\nimport sys\n\nfrom importlib import import_module\nfrom pathlib import Path\nfrom types import ModuleType\nfrom typing import List, Set\n\n\ndef get_path(module: ModuleType) -> str:\n if getattr(sys, 'frozen', False):\n # frozen\n\n if getattr(sys, '_MEIPASS', False):\n # PyInstaller\n lib_dir = Path(getattr(sys, '_MEIPASS'))\n else:\n # others\n lib_dir = Path(sys.executable).parent / 'lib'\n\n path = lib_dir.joinpath(*module.__package__.split(\".\"))\n else:\n # unfrozen\n path = Path(module.__file__).parent\n return str(path)\n\n\ndef list_module(module: ModuleType) -> List[str]:\n path = get_path(module)\n\n if getattr(sys, '_MEIPASS', False):\n # PyInstaller\n return [file.parent.name for file in Path(path).glob('*/__init__.py')]\n else:\n return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\n\n\ndef find_available_locales(providers: List[str]) -> List[str]:\n available_locales: Set[str] = set()\n\n for provider_path in providers:\n\n provider_module = import_module(provider_path)\n if getattr(provider_module, 'localized', False):\n langs = list_module(provider_module)\n available_locales.update(langs)\n available_locales: List[str] = sorted(available_locales)\n return available_locales\n\n\ndef find_available_providers(modules: List[ModuleType]) -> List[str]:\n available_providers = set()\n for providers_mod in modules:\n if providers_mod.__package__:\n providers = [\n '.'.join([providers_mod.__package__, mod])\n for mod in list_module(providers_mod) if mod != '__pycache__'\n ]\n available_providers.update(providers)\n return sorted(available_providers)\n", "path": "faker/utils/loading.py"}]} | 1,148 | 86 |
gh_patches_debug_11860 | rasdani/github-patches | git_diff | ipython__ipython-1996 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test failures and docs don't build on Mac OS X Lion
I have installed the latest ipython from github (f30a572977be51a1b928788540e323ab9bc9c58f) via `python setup.py install --user`.
There are a few test failures and the docs don't build:
http://dl.dropbox.com/u/4923986/bug_reports/ipython-tests.log
http://dl.dropbox.com/u/4923986/bug_reports/ipython-docs.log
Let me know if I should create separate tickets for some of these issues. It's hard for me to tell which are known and which are new. I know that the docs v3 problem wasn't there a few weeks ago.
Note that I am using iterm2 (http://www.iterm2.com/, Build 1.0.0.20110821) instead of the default Apple terminal, in case it matters.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/utils/sysinfo.py`
Content:
```
1 # encoding: utf-8
2 """
3 Utilities for getting information about IPython and the system it's running in.
4 """
5
6 #-----------------------------------------------------------------------------
7 # Copyright (C) 2008-2011 The IPython Development Team
8 #
9 # Distributed under the terms of the BSD License. The full license is in
10 # the file COPYING, distributed as part of this software.
11 #-----------------------------------------------------------------------------
12
13 #-----------------------------------------------------------------------------
14 # Imports
15 #-----------------------------------------------------------------------------
16
17 import os
18 import platform
19 import pprint
20 import sys
21 import subprocess
22
23 from ConfigParser import ConfigParser
24
25 from IPython.core import release
26 from IPython.utils import py3compat, _sysinfo
27
28 #-----------------------------------------------------------------------------
29 # Code
30 #-----------------------------------------------------------------------------
31
32 def pkg_commit_hash(pkg_path):
33 """Get short form of commit hash given directory `pkg_path`
34
35 We get the commit hash from (in order of preference):
36
37 * IPython.utils._sysinfo.commit
38 * git output, if we are in a git repository
39
40 If these fail, we return a not-found placeholder tuple
41
42 Parameters
43 ----------
44 pkg_path : str
45 directory containing package
46 only used for getting commit from active repo
47
48 Returns
49 -------
50 hash_from : str
51 Where we got the hash from - description
52 hash_str : str
53 short form of hash
54 """
55 # Try and get commit from written commit text file
56 if _sysinfo.commit:
57 return "installation", _sysinfo.commit
58
59 # maybe we are in a repository
60 proc = subprocess.Popen('git rev-parse --short HEAD',
61 stdout=subprocess.PIPE,
62 stderr=subprocess.PIPE,
63 cwd=pkg_path, shell=True)
64 repo_commit, _ = proc.communicate()
65 if repo_commit:
66 return 'repository', repo_commit.strip()
67 return '(none found)', '<not found>'
68
69
70 def pkg_info(pkg_path):
71 """Return dict describing the context of this package
72
73 Parameters
74 ----------
75 pkg_path : str
76 path containing __init__.py for package
77
78 Returns
79 -------
80 context : dict
81 with named parameters of interest
82 """
83 src, hsh = pkg_commit_hash(pkg_path)
84 return dict(
85 ipython_version=release.version,
86 ipython_path=pkg_path,
87 commit_source=src,
88 commit_hash=hsh,
89 sys_version=sys.version,
90 sys_executable=sys.executable,
91 sys_platform=sys.platform,
92 platform=platform.platform(),
93 os_name=os.name,
94 )
95
96
97 @py3compat.doctest_refactor_print
98 def sys_info():
99 """Return useful information about IPython and the system, as a string.
100
101 Example
102 -------
103 In [2]: print sys_info()
104 {'commit_hash': '144fdae', # random
105 'commit_source': 'repository',
106 'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython',
107 'ipython_version': '0.11.dev',
108 'os_name': 'posix',
109 'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick',
110 'sys_executable': '/usr/bin/python',
111 'sys_platform': 'linux2',
112 'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'}
113 """
114 p = os.path
115 path = p.dirname(p.abspath(p.join(__file__, '..')))
116 return pprint.pformat(pkg_info(path))
117
118
119 def _num_cpus_unix():
120 """Return the number of active CPUs on a Unix system."""
121 return os.sysconf("SC_NPROCESSORS_ONLN")
122
123
124 def _num_cpus_darwin():
125 """Return the number of active CPUs on a Darwin system."""
126 p = subprocess.Popen(['sysctl','-n','hw.ncpu'],stdout=subprocess.PIPE)
127 return p.stdout.read()
128
129
130 def _num_cpus_windows():
131 """Return the number of active CPUs on a Windows system."""
132 return os.environ.get("NUMBER_OF_PROCESSORS")
133
134
135 def num_cpus():
136 """Return the effective number of CPUs in the system as an integer.
137
138 This cross-platform function makes an attempt at finding the total number of
139 available CPUs in the system, as returned by various underlying system and
140 python calls.
141
142 If it can't find a sensible answer, it returns 1 (though an error *may* make
143 it return a large positive number that's actually incorrect).
144 """
145
146 # Many thanks to the Parallel Python project (http://www.parallelpython.com)
147 # for the names of the keys we needed to look up for this function. This
148 # code was inspired by their equivalent function.
149
150 ncpufuncs = {'Linux':_num_cpus_unix,
151 'Darwin':_num_cpus_darwin,
152 'Windows':_num_cpus_windows,
153 # On Vista, python < 2.5.2 has a bug and returns 'Microsoft'
154 # See http://bugs.python.org/issue1082 for details.
155 'Microsoft':_num_cpus_windows,
156 }
157
158 ncpufunc = ncpufuncs.get(platform.system(),
159 # default to unix version (Solaris, AIX, etc)
160 _num_cpus_unix)
161
162 try:
163 ncpus = max(1,int(ncpufunc()))
164 except:
165 ncpus = 1
166 return ncpus
167
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/utils/sysinfo.py b/IPython/utils/sysinfo.py
--- a/IPython/utils/sysinfo.py
+++ b/IPython/utils/sysinfo.py
@@ -23,7 +23,7 @@
from ConfigParser import ConfigParser
from IPython.core import release
-from IPython.utils import py3compat, _sysinfo
+from IPython.utils import py3compat, _sysinfo, encoding
#-----------------------------------------------------------------------------
# Code
@@ -91,6 +91,7 @@
sys_platform=sys.platform,
platform=platform.platform(),
os_name=os.name,
+ default_encoding=encoding.DEFAULT_ENCODING,
)
| {"golden_diff": "diff --git a/IPython/utils/sysinfo.py b/IPython/utils/sysinfo.py\n--- a/IPython/utils/sysinfo.py\n+++ b/IPython/utils/sysinfo.py\n@@ -23,7 +23,7 @@\n from ConfigParser import ConfigParser\n \n from IPython.core import release\n-from IPython.utils import py3compat, _sysinfo\n+from IPython.utils import py3compat, _sysinfo, encoding\n \n #-----------------------------------------------------------------------------\n # Code\n@@ -91,6 +91,7 @@\n sys_platform=sys.platform,\n platform=platform.platform(),\n os_name=os.name,\n+ default_encoding=encoding.DEFAULT_ENCODING,\n )\n", "issue": "Test failures and docs don't build on Mac OS X Lion\nI have installed the latest ipython from github (f30a572977be51a1b928788540e323ab9bc9c58f) via `python setup.py install --user`.\n\nThere are a few test failures and the docs don't build:\nhttp://dl.dropbox.com/u/4923986/bug_reports/ipython-tests.log\nhttp://dl.dropbox.com/u/4923986/bug_reports/ipython-docs.log\n\nLet me know if I should create separate tickets for some of these issues. It's hard for me to tell which are known and which are new. I know that the docs v3 problem wasn't there a few weeks ago. \n\nNote that I am using iterm2 (http://www.iterm2.com/, Build 1.0.0.20110821) instead of the default Apple terminal, in case it matters.\n\n", "before_files": [{"content": "# encoding: utf-8\n\"\"\"\nUtilities for getting information about IPython and the system it's running in.\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport os\nimport platform\nimport pprint\nimport sys\nimport subprocess\n\nfrom ConfigParser import ConfigParser\n\nfrom IPython.core import release\nfrom IPython.utils import py3compat, _sysinfo\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\ndef pkg_commit_hash(pkg_path):\n \"\"\"Get short form of commit hash given directory `pkg_path`\n\n We get the commit hash from (in order of preference):\n\n * IPython.utils._sysinfo.commit\n * git output, if we are in a git repository\n\n If these fail, we return a not-found placeholder tuple\n\n Parameters\n ----------\n pkg_path : str\n directory containing package\n only used for getting commit from active repo\n\n Returns\n -------\n hash_from : str\n Where we got the hash from - description\n hash_str : str\n short form of hash\n \"\"\"\n # Try and get commit from written commit text file\n if _sysinfo.commit:\n return \"installation\", _sysinfo.commit\n\n # maybe we are in a repository\n proc = subprocess.Popen('git rev-parse --short HEAD',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=pkg_path, shell=True)\n repo_commit, _ = proc.communicate()\n if repo_commit:\n return 'repository', repo_commit.strip()\n return '(none found)', '<not found>'\n\n\ndef pkg_info(pkg_path):\n \"\"\"Return dict describing the context of this package\n\n Parameters\n ----------\n pkg_path : str\n path containing __init__.py for package\n\n Returns\n -------\n context : dict\n with named parameters of interest\n \"\"\"\n src, hsh = pkg_commit_hash(pkg_path)\n return dict(\n ipython_version=release.version,\n ipython_path=pkg_path,\n commit_source=src,\n commit_hash=hsh,\n sys_version=sys.version,\n sys_executable=sys.executable,\n sys_platform=sys.platform,\n platform=platform.platform(),\n os_name=os.name,\n )\n\n\[email protected]_refactor_print\ndef sys_info():\n \"\"\"Return useful information about IPython and the system, as a string.\n\n Example\n -------\n In [2]: print sys_info()\n {'commit_hash': '144fdae', # random\n 'commit_source': 'repository',\n 'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython',\n 'ipython_version': '0.11.dev',\n 'os_name': 'posix',\n 'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick',\n 'sys_executable': '/usr/bin/python',\n 'sys_platform': 'linux2',\n 'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\\\n[GCC 4.4.5]'}\n \"\"\"\n p = os.path\n path = p.dirname(p.abspath(p.join(__file__, '..')))\n return pprint.pformat(pkg_info(path))\n\n\ndef _num_cpus_unix():\n \"\"\"Return the number of active CPUs on a Unix system.\"\"\"\n return os.sysconf(\"SC_NPROCESSORS_ONLN\")\n\n\ndef _num_cpus_darwin():\n \"\"\"Return the number of active CPUs on a Darwin system.\"\"\"\n p = subprocess.Popen(['sysctl','-n','hw.ncpu'],stdout=subprocess.PIPE)\n return p.stdout.read()\n\n\ndef _num_cpus_windows():\n \"\"\"Return the number of active CPUs on a Windows system.\"\"\"\n return os.environ.get(\"NUMBER_OF_PROCESSORS\")\n\n\ndef num_cpus():\n \"\"\"Return the effective number of CPUs in the system as an integer.\n\n This cross-platform function makes an attempt at finding the total number of\n available CPUs in the system, as returned by various underlying system and\n python calls.\n\n If it can't find a sensible answer, it returns 1 (though an error *may* make\n it return a large positive number that's actually incorrect).\n \"\"\"\n\n # Many thanks to the Parallel Python project (http://www.parallelpython.com)\n # for the names of the keys we needed to look up for this function. This\n # code was inspired by their equivalent function.\n\n ncpufuncs = {'Linux':_num_cpus_unix,\n 'Darwin':_num_cpus_darwin,\n 'Windows':_num_cpus_windows,\n # On Vista, python < 2.5.2 has a bug and returns 'Microsoft'\n # See http://bugs.python.org/issue1082 for details.\n 'Microsoft':_num_cpus_windows,\n }\n\n ncpufunc = ncpufuncs.get(platform.system(),\n # default to unix version (Solaris, AIX, etc)\n _num_cpus_unix)\n\n try:\n ncpus = max(1,int(ncpufunc()))\n except:\n ncpus = 1\n return ncpus\n\n", "path": "IPython/utils/sysinfo.py"}], "after_files": [{"content": "# encoding: utf-8\n\"\"\"\nUtilities for getting information about IPython and the system it's running in.\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport os\nimport platform\nimport pprint\nimport sys\nimport subprocess\n\nfrom ConfigParser import ConfigParser\n\nfrom IPython.core import release\nfrom IPython.utils import py3compat, _sysinfo, encoding\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\ndef pkg_commit_hash(pkg_path):\n \"\"\"Get short form of commit hash given directory `pkg_path`\n\n We get the commit hash from (in order of preference):\n\n * IPython.utils._sysinfo.commit\n * git output, if we are in a git repository\n\n If these fail, we return a not-found placeholder tuple\n\n Parameters\n ----------\n pkg_path : str\n directory containing package\n only used for getting commit from active repo\n\n Returns\n -------\n hash_from : str\n Where we got the hash from - description\n hash_str : str\n short form of hash\n \"\"\"\n # Try and get commit from written commit text file\n if _sysinfo.commit:\n return \"installation\", _sysinfo.commit\n\n # maybe we are in a repository\n proc = subprocess.Popen('git rev-parse --short HEAD',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=pkg_path, shell=True)\n repo_commit, _ = proc.communicate()\n if repo_commit:\n return 'repository', repo_commit.strip()\n return '(none found)', '<not found>'\n\n\ndef pkg_info(pkg_path):\n \"\"\"Return dict describing the context of this package\n\n Parameters\n ----------\n pkg_path : str\n path containing __init__.py for package\n\n Returns\n -------\n context : dict\n with named parameters of interest\n \"\"\"\n src, hsh = pkg_commit_hash(pkg_path)\n return dict(\n ipython_version=release.version,\n ipython_path=pkg_path,\n commit_source=src,\n commit_hash=hsh,\n sys_version=sys.version,\n sys_executable=sys.executable,\n sys_platform=sys.platform,\n platform=platform.platform(),\n os_name=os.name,\n default_encoding=encoding.DEFAULT_ENCODING,\n )\n\n\[email protected]_refactor_print\ndef sys_info():\n \"\"\"Return useful information about IPython and the system, as a string.\n\n Example\n -------\n In [2]: print sys_info()\n {'commit_hash': '144fdae', # random\n 'commit_source': 'repository',\n 'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython',\n 'ipython_version': '0.11.dev',\n 'os_name': 'posix',\n 'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick',\n 'sys_executable': '/usr/bin/python',\n 'sys_platform': 'linux2',\n 'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\\\n[GCC 4.4.5]'}\n \"\"\"\n p = os.path\n path = p.dirname(p.abspath(p.join(__file__, '..')))\n return pprint.pformat(pkg_info(path))\n\n\ndef _num_cpus_unix():\n \"\"\"Return the number of active CPUs on a Unix system.\"\"\"\n return os.sysconf(\"SC_NPROCESSORS_ONLN\")\n\n\ndef _num_cpus_darwin():\n \"\"\"Return the number of active CPUs on a Darwin system.\"\"\"\n p = subprocess.Popen(['sysctl','-n','hw.ncpu'],stdout=subprocess.PIPE)\n return p.stdout.read()\n\n\ndef _num_cpus_windows():\n \"\"\"Return the number of active CPUs on a Windows system.\"\"\"\n return os.environ.get(\"NUMBER_OF_PROCESSORS\")\n\n\ndef num_cpus():\n \"\"\"Return the effective number of CPUs in the system as an integer.\n\n This cross-platform function makes an attempt at finding the total number of\n available CPUs in the system, as returned by various underlying system and\n python calls.\n\n If it can't find a sensible answer, it returns 1 (though an error *may* make\n it return a large positive number that's actually incorrect).\n \"\"\"\n\n # Many thanks to the Parallel Python project (http://www.parallelpython.com)\n # for the names of the keys we needed to look up for this function. This\n # code was inspired by their equivalent function.\n\n ncpufuncs = {'Linux':_num_cpus_unix,\n 'Darwin':_num_cpus_darwin,\n 'Windows':_num_cpus_windows,\n # On Vista, python < 2.5.2 has a bug and returns 'Microsoft'\n # See http://bugs.python.org/issue1082 for details.\n 'Microsoft':_num_cpus_windows,\n }\n\n ncpufunc = ncpufuncs.get(platform.system(),\n # default to unix version (Solaris, AIX, etc)\n _num_cpus_unix)\n\n try:\n ncpus = max(1,int(ncpufunc()))\n except:\n ncpus = 1\n return ncpus\n\n", "path": "IPython/utils/sysinfo.py"}]} | 2,083 | 138 |
gh_patches_debug_33060 | rasdani/github-patches | git_diff | pypa__virtualenv-1794 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
activate_this.py failed for python2 virtualenvs
**Issue**
It seems recently pipenv introduced a new type of activate_this.py. On windows the content of activate_this.py has something like this:
```
prev_length = len(sys.path)
for lib in "'..\\Lib\\site-packages".split(os.pathsep):
path = os.path.realpath(os.path.join(bin_dir, lib))
site.addsitedir(path.decode("utf-8") if "'yes" else path)
sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]
```
As you can see the "'..\\Lib\\site-packages" is obviously wrong.
**Environment**
Provide at least:
- OS: Windows 10
- ``pip list`` of the host python where ``virtualenv`` is installed:
```console
virtualenv 20.0.18
virtualenv-clone 0.5.4
```
**Output of the virtual environment creation**
As I'm using virtualenv through pipenv, so I failed to grab the virtualenv output
```
[ ==] Creating virtual environment...created virtual environment CPython2.7.17.final.0-64 in 641ms
creator CPython2Windows(dest=C:\Users\win10\.virtualenvs\win10-obmjl69F, clear=False, global=False)
seeder FromAppData(download=False, pip=latest, setuptools=latest, wheel=latest, via=copy, app_data_dir=C:\Users\win10\AppData\Local\pypa\virtualenv\seed-app-data\v1.0.1)
activators BashActivator,BatchActivator,FishActivator,PowerShellActivator,PythonActivator
```
However I've located the related code and wrote out its runtime variable information:
The following is the output of _repr_unicode function in ```src/virtualenv/activation/python/__init__.py```
```
'(win10)
'C:\\Users\\win10\\.virtualenvs\\win10-obmjl69F
'win10-obmjl69F
'Scripts
';
'..\\Lib\\site-packages
'yes
```
As you can see, there's an additional ' before each item. I've done a small experiment on python 3.6 and 3.7:
```
>>> value = "..\\123456"
>>> repr(value.encode("utf-8"))
"b'..\\\\123456'"
>>> repr(value.encode("utf-8"))[1:-1]
"'..\\\\123456"
>>>
```
I believe there's something wrong with this function. This function is introduced in PR #1503
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/virtualenv/activation/python/__init__.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 import os
4 from collections import OrderedDict
5
6 from virtualenv.util.path import Path
7 from virtualenv.util.six import ensure_text
8
9 from ..via_template import ViaTemplateActivator
10
11
12 class PythonActivator(ViaTemplateActivator):
13 def templates(self):
14 yield Path("activate_this.py")
15
16 def replacements(self, creator, dest_folder):
17 replacements = super(PythonActivator, self).replacements(creator, dest_folder)
18 lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)
19 win_py2 = creator.interpreter.platform == "win32" and creator.interpreter.version_info.major == 2
20 replacements.update(
21 {
22 "__LIB_FOLDERS__": ensure_text(os.pathsep.join(lib_folders.keys())),
23 "__DECODE_PATH__": ("yes" if win_py2 else ""),
24 }
25 )
26 return replacements
27
28 @staticmethod
29 def _repr_unicode(creator, value):
30 py2 = creator.interpreter.version_info.major == 2
31 if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals
32 value = ensure_text(repr(value.encode("utf-8"))[1:-1])
33 return value
34
```
Path: `src/virtualenv/activation/via_template.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 import os
4 import sys
5 from abc import ABCMeta, abstractmethod
6
7 from six import add_metaclass
8
9 from virtualenv.util.six import ensure_text
10
11 from .activator import Activator
12
13 if sys.version_info >= (3, 7):
14 from importlib.resources import read_text
15 else:
16 from importlib_resources import read_text
17
18
19 @add_metaclass(ABCMeta)
20 class ViaTemplateActivator(Activator):
21 @abstractmethod
22 def templates(self):
23 raise NotImplementedError
24
25 def generate(self, creator):
26 dest_folder = creator.bin_dir
27 replacements = self.replacements(creator, dest_folder)
28 self._generate(replacements, self.templates(), dest_folder, creator)
29 if self.flag_prompt is not None:
30 creator.pyenv_cfg["prompt"] = self.flag_prompt
31
32 def replacements(self, creator, dest_folder):
33 return {
34 "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
35 "__VIRTUAL_ENV__": ensure_text(str(creator.dest)),
36 "__VIRTUAL_NAME__": creator.env_name,
37 "__BIN_NAME__": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),
38 "__PATH_SEP__": ensure_text(os.pathsep),
39 }
40
41 def _generate(self, replacements, templates, to_folder, creator):
42 for template in templates:
43 text = self.instantiate_template(replacements, template, creator)
44 dest = to_folder / self.as_name(template)
45 dest.write_text(text, encoding="utf-8")
46
47 def as_name(self, template):
48 return template.name
49
50 def instantiate_template(self, replacements, template, creator):
51 # read text and do replacements
52 text = read_text(self.__module__, str(template), encoding="utf-8", errors="strict")
53 for key, value in replacements.items():
54 value = self._repr_unicode(creator, value)
55 text = text.replace(key, value)
56 return text
57
58 @staticmethod
59 def _repr_unicode(creator, value):
60 # by default we just let it be unicode
61 return value
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py
--- a/src/virtualenv/activation/python/__init__.py
+++ b/src/virtualenv/activation/python/__init__.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import, unicode_literals
import os
+import sys
from collections import OrderedDict
from virtualenv.util.path import Path
@@ -29,5 +30,5 @@
def _repr_unicode(creator, value):
py2 = creator.interpreter.version_info.major == 2
if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals
- value = ensure_text(repr(value.encode("utf-8"))[1:-1])
+ value = ensure_text(repr(value.encode("utf-8"))[2 if sys.version_info[0] == 3 else 1 : -1])
return value
diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py
--- a/src/virtualenv/activation/via_template.py
+++ b/src/virtualenv/activation/via_template.py
@@ -25,9 +25,10 @@
def generate(self, creator):
dest_folder = creator.bin_dir
replacements = self.replacements(creator, dest_folder)
- self._generate(replacements, self.templates(), dest_folder, creator)
+ at_path = self._generate(replacements, self.templates(), dest_folder, creator)
if self.flag_prompt is not None:
creator.pyenv_cfg["prompt"] = self.flag_prompt
+ return at_path
def replacements(self, creator, dest_folder):
return {
@@ -43,6 +44,7 @@
text = self.instantiate_template(replacements, template, creator)
dest = to_folder / self.as_name(template)
dest.write_text(text, encoding="utf-8")
+ return dest
def as_name(self, template):
return template.name
| {"golden_diff": "diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py\n--- a/src/virtualenv/activation/python/__init__.py\n+++ b/src/virtualenv/activation/python/__init__.py\n@@ -1,6 +1,7 @@\n from __future__ import absolute_import, unicode_literals\n \n import os\n+import sys\n from collections import OrderedDict\n \n from virtualenv.util.path import Path\n@@ -29,5 +30,5 @@\n def _repr_unicode(creator, value):\n py2 = creator.interpreter.version_info.major == 2\n if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals\n- value = ensure_text(repr(value.encode(\"utf-8\"))[1:-1])\n+ value = ensure_text(repr(value.encode(\"utf-8\"))[2 if sys.version_info[0] == 3 else 1 : -1])\n return value\ndiff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py\n--- a/src/virtualenv/activation/via_template.py\n+++ b/src/virtualenv/activation/via_template.py\n@@ -25,9 +25,10 @@\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n- self._generate(replacements, self.templates(), dest_folder, creator)\n+ at_path = self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n+ return at_path\n \n def replacements(self, creator, dest_folder):\n return {\n@@ -43,6 +44,7 @@\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n dest.write_text(text, encoding=\"utf-8\")\n+ return dest\n \n def as_name(self, template):\n return template.name\n", "issue": "activate_this.py failed for python2 virtualenvs\n**Issue**\r\n\r\nIt seems recently pipenv introduced a new type of activate_this.py. On windows the content of activate_this.py has something like this:\r\n```\r\nprev_length = len(sys.path)\r\nfor lib in \"'..\\\\Lib\\\\site-packages\".split(os.pathsep):\r\n path = os.path.realpath(os.path.join(bin_dir, lib))\r\n site.addsitedir(path.decode(\"utf-8\") if \"'yes\" else path)\r\nsys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\r\n```\r\nAs you can see the \"'..\\\\Lib\\\\site-packages\" is obviously wrong.\r\n\r\n**Environment**\r\n\r\nProvide at least:\r\n- OS: Windows 10\r\n- ``pip list`` of the host python where ``virtualenv`` is installed:\r\n\r\n ```console\r\n virtualenv 20.0.18\r\n virtualenv-clone 0.5.4\r\n ```\r\n\r\n**Output of the virtual environment creation**\r\n\r\nAs I'm using virtualenv through pipenv, so I failed to grab the virtualenv output\r\n\r\n```\r\n[ ==] Creating virtual environment...created virtual environment CPython2.7.17.final.0-64 in 641ms\r\n\r\n creator CPython2Windows(dest=C:\\Users\\win10\\.virtualenvs\\win10-obmjl69F, clear=False, global=False)\r\n\r\n seeder FromAppData(download=False, pip=latest, setuptools=latest, wheel=latest, via=copy, app_data_dir=C:\\Users\\win10\\AppData\\Local\\pypa\\virtualenv\\seed-app-data\\v1.0.1)\r\n\r\n activators BashActivator,BatchActivator,FishActivator,PowerShellActivator,PythonActivator\r\n```\r\n\r\nHowever I've located the related code and wrote out its runtime variable information:\r\nThe following is the output of _repr_unicode function in ```src/virtualenv/activation/python/__init__.py```\r\n\r\n```\r\n'(win10) \r\n'C:\\\\Users\\\\win10\\\\.virtualenvs\\\\win10-obmjl69F\r\n'win10-obmjl69F\r\n'Scripts\r\n';\r\n'..\\\\Lib\\\\site-packages\r\n'yes\r\n```\r\nAs you can see, there's an additional ' before each item. I've done a small experiment on python 3.6 and 3.7:\r\n```\r\n>>> value = \"..\\\\123456\"\r\n>>> repr(value.encode(\"utf-8\"))\r\n\"b'..\\\\\\\\123456'\"\r\n>>> repr(value.encode(\"utf-8\"))[1:-1]\r\n\"'..\\\\\\\\123456\"\r\n>>>\r\n```\r\nI believe there's something wrong with this function. This function is introduced in PR #1503 \n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nfrom collections import OrderedDict\n\nfrom virtualenv.util.path import Path\nfrom virtualenv.util.six import ensure_text\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass PythonActivator(ViaTemplateActivator):\n def templates(self):\n yield Path(\"activate_this.py\")\n\n def replacements(self, creator, dest_folder):\n replacements = super(PythonActivator, self).replacements(creator, dest_folder)\n lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)\n win_py2 = creator.interpreter.platform == \"win32\" and creator.interpreter.version_info.major == 2\n replacements.update(\n {\n \"__LIB_FOLDERS__\": ensure_text(os.pathsep.join(lib_folders.keys())),\n \"__DECODE_PATH__\": (\"yes\" if win_py2 else \"\"),\n }\n )\n return replacements\n\n @staticmethod\n def _repr_unicode(creator, value):\n py2 = creator.interpreter.version_info.major == 2\n if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals\n value = ensure_text(repr(value.encode(\"utf-8\"))[1:-1])\n return value\n", "path": "src/virtualenv/activation/python/__init__.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import add_metaclass\n\nfrom virtualenv.util.six import ensure_text\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_text\nelse:\n from importlib_resources import read_text\n\n\n@add_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n\n def replacements(self, creator, dest_folder):\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": ensure_text(str(creator.dest)),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n dest.write_text(text, encoding=\"utf-8\")\n\n def as_name(self, template):\n return template.name\n\n def instantiate_template(self, replacements, template, creator):\n # read text and do replacements\n text = read_text(self.__module__, str(template), encoding=\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nfrom collections import OrderedDict\n\nfrom virtualenv.util.path import Path\nfrom virtualenv.util.six import ensure_text\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass PythonActivator(ViaTemplateActivator):\n def templates(self):\n yield Path(\"activate_this.py\")\n\n def replacements(self, creator, dest_folder):\n replacements = super(PythonActivator, self).replacements(creator, dest_folder)\n lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)\n win_py2 = creator.interpreter.platform == \"win32\" and creator.interpreter.version_info.major == 2\n replacements.update(\n {\n \"__LIB_FOLDERS__\": ensure_text(os.pathsep.join(lib_folders.keys())),\n \"__DECODE_PATH__\": (\"yes\" if win_py2 else \"\"),\n }\n )\n return replacements\n\n @staticmethod\n def _repr_unicode(creator, value):\n py2 = creator.interpreter.version_info.major == 2\n if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals\n value = ensure_text(repr(value.encode(\"utf-8\"))[2 if sys.version_info[0] == 3 else 1 : -1])\n return value\n", "path": "src/virtualenv/activation/python/__init__.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import add_metaclass\n\nfrom virtualenv.util.six import ensure_text\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_text\nelse:\n from importlib_resources import read_text\n\n\n@add_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n at_path = self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n return at_path\n\n def replacements(self, creator, dest_folder):\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": ensure_text(str(creator.dest)),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n dest.write_text(text, encoding=\"utf-8\")\n return dest\n\n def as_name(self, template):\n return template.name\n\n def instantiate_template(self, replacements, template, creator):\n # read text and do replacements\n text = read_text(self.__module__, str(template), encoding=\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}]} | 1,799 | 454 |
gh_patches_debug_16661 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-265 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
method to load yaml from a string
*cfn-lint version: cfn-lint 0.4.2*
*Description of issue.*
When consuming cfn-lint as a module there is currently only an interface to load yaml from a file(`cfnlint.core.decode.cfn_yaml.load`). In our project([taskcat](https://github.com/aws-quickstart/taskcat/)) we have a need to load templates from strings as well.
While we could just do something like below, think it would be useful to others to have a simpler interface for yaml strings:
```python
from cfnlint.decode.cfn_yaml import MarkedLoader, multi_constructor
loader = MarkedLoader(my_yaml_string, None)
loader.add_multi_constructor('!', multi_constructor)
template = loader.get_single_data()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/decode/cfn_yaml.py`
Content:
```
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import logging
18 import six
19 from yaml.composer import Composer
20 from yaml.reader import Reader
21 from yaml.scanner import Scanner
22 from yaml.resolver import Resolver
23 from yaml import ScalarNode
24 from yaml import SequenceNode
25 from yaml import MappingNode
26 from yaml.constructor import SafeConstructor
27 from yaml.constructor import ConstructorError
28 import cfnlint
29 from cfnlint.decode.str_node import str_node
30
31 try:
32 from yaml.cyaml import CParser as Parser # pylint: disable=ungrouped-imports
33 cyaml = True
34 except ImportError:
35 from yaml.parser import Parser # pylint: disable=ungrouped-imports
36 cyaml = False
37
38 UNCONVERTED_SUFFIXES = ['Ref', 'Condition']
39 FN_PREFIX = 'Fn::'
40
41 LOGGER = logging.getLogger(__name__)
42
43
44 class CfnParseError(ConstructorError):
45 """
46 Error thrown when the template contains Cfn Error
47 """
48 def __init__(self, filename, message, line_number, column_number, key=' '):
49
50 # Call the base class constructor with the parameters it needs
51 super(CfnParseError, self).__init__(message)
52
53 # Now for your custom code...
54 self.filename = filename
55 self.line_number = line_number
56 self.column_number = column_number
57 self.message = message
58 self.match = cfnlint.Match(
59 line_number + 1, column_number + 1, line_number + 1,
60 column_number + 1 + len(key), filename, cfnlint.ParseError(), message=message)
61
62
63 class NodeConstructor(SafeConstructor):
64 """
65 Node Constructors for loading different types in Yaml
66 """
67
68 def __init__(self, filename):
69 # Call the base class constructor
70 super(NodeConstructor, self).__init__()
71
72 self.filename = filename
73
74 # To support lazy loading, the original constructors first yield
75 # an empty object, then fill them in when iterated. Due to
76 # laziness we omit this behaviour (and will only do "deep
77 # construction") by first exhausting iterators, then yielding
78 # copies.
79 def construct_yaml_map(self, node):
80
81 # Check for duplicate keys on the current level, this is not desirable
82 # because a dict does not support this. It overwrites it with the last
83 # occurance, which can give unexpected results
84 mapping = {}
85 for key_node, value_node in node.value:
86 key = self.construct_object(key_node, False)
87 value = self.construct_object(value_node, False)
88
89 if key in mapping:
90 raise CfnParseError(
91 self.filename,
92 'Duplicate resource found "{}" (line {})'.format(key, key_node.start_mark.line + 1),
93 key_node.start_mark.line, key_node.start_mark.column, key)
94 mapping[key] = value
95
96 obj, = SafeConstructor.construct_yaml_map(self, node)
97 return obj
98
99 def construct_yaml_str(self, node):
100 obj = SafeConstructor.construct_yaml_str(self, node)
101 assert isinstance(obj, (six.string_types))
102 return str_node(obj, node.start_mark, node.end_mark)
103
104 def construct_yaml_null_error(self, node):
105 """Throw a null error"""
106 raise CfnParseError(
107 self.filename,
108 'Null value at line {0} column {1}'.format(node.start_mark.line, node.start_mark.column),
109 node.start_mark.line, node.start_mark.column, ' ')
110
111
112 NodeConstructor.add_constructor(
113 u'tag:yaml.org,2002:map',
114 NodeConstructor.construct_yaml_map)
115
116 NodeConstructor.add_constructor(
117 u'tag:yaml.org,2002:str',
118 NodeConstructor.construct_yaml_str)
119
120 NodeConstructor.add_constructor(
121 u'tag:yaml.org,2002:null',
122 NodeConstructor.construct_yaml_null_error)
123
124
125 class MarkedLoader(Reader, Scanner, Parser, Composer, NodeConstructor, Resolver):
126 """
127 Class for marked loading YAML
128 """
129 # pylint: disable=non-parent-init-called,super-init-not-called
130 def __init__(self, stream, filename):
131 Reader.__init__(self, stream)
132 Scanner.__init__(self)
133 if cyaml:
134 Parser.__init__(self, stream)
135 else:
136 Parser.__init__(self)
137 Composer.__init__(self)
138 SafeConstructor.__init__(self)
139 Resolver.__init__(self)
140 NodeConstructor.__init__(self, filename)
141
142
143 def multi_constructor(loader, tag_suffix, node):
144 """
145 Deal with !Ref style function format
146 """
147
148 if tag_suffix not in UNCONVERTED_SUFFIXES:
149 tag_suffix = '{}{}'.format(FN_PREFIX, tag_suffix)
150
151 constructor = None
152
153 if tag_suffix == 'Fn::GetAtt':
154 constructor = construct_getatt
155 elif isinstance(node, ScalarNode):
156 constructor = loader.construct_scalar
157 elif isinstance(node, SequenceNode):
158 constructor = loader.construct_sequence
159 elif isinstance(node, MappingNode):
160 constructor = loader.construct_mapping
161 else:
162 raise 'Bad tag: !{}'.format(tag_suffix)
163
164 return {tag_suffix: constructor(node)}
165
166
167 def construct_getatt(node):
168 """
169 Reconstruct !GetAtt into a list
170 """
171
172 if isinstance(node.value, (six.text_type, six.string_types)):
173 return node.value.split('.')
174 if isinstance(node.value, list):
175 return [s.value for s in node.value]
176
177 raise ValueError('Unexpected node type: {}'.format(type(node.value)))
178
179
180 def load(filename):
181 """
182 Load the give YAML file
183 """
184 fp = open(filename)
185 loader = MarkedLoader(fp.read(), filename)
186 loader.add_multi_constructor('!', multi_constructor)
187 template = loader.get_single_data()
188 # Convert an empty file to an empty dict
189 if template is None:
190 template = {}
191
192 return template
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/decode/cfn_yaml.py b/src/cfnlint/decode/cfn_yaml.py
--- a/src/cfnlint/decode/cfn_yaml.py
+++ b/src/cfnlint/decode/cfn_yaml.py
@@ -177,12 +177,11 @@
raise ValueError('Unexpected node type: {}'.format(type(node.value)))
-def load(filename):
+def loads(yaml_string, fname=None):
"""
- Load the give YAML file
+ Load the given YAML string
"""
- fp = open(filename)
- loader = MarkedLoader(fp.read(), filename)
+ loader = MarkedLoader(yaml_string, fname)
loader.add_multi_constructor('!', multi_constructor)
template = loader.get_single_data()
# Convert an empty file to an empty dict
@@ -190,3 +189,11 @@
template = {}
return template
+
+
+def load(filename):
+ """
+ Load the given YAML file
+ """
+ fp = open(filename)
+ return loads(fp.read(), filename)
| {"golden_diff": "diff --git a/src/cfnlint/decode/cfn_yaml.py b/src/cfnlint/decode/cfn_yaml.py\n--- a/src/cfnlint/decode/cfn_yaml.py\n+++ b/src/cfnlint/decode/cfn_yaml.py\n@@ -177,12 +177,11 @@\n raise ValueError('Unexpected node type: {}'.format(type(node.value)))\n \n \n-def load(filename):\n+def loads(yaml_string, fname=None):\n \"\"\"\n- Load the give YAML file\n+ Load the given YAML string\n \"\"\"\n- fp = open(filename)\n- loader = MarkedLoader(fp.read(), filename)\n+ loader = MarkedLoader(yaml_string, fname)\n loader.add_multi_constructor('!', multi_constructor)\n template = loader.get_single_data()\n # Convert an empty file to an empty dict\n@@ -190,3 +189,11 @@\n template = {}\n \n return template\n+\n+\n+def load(filename):\n+ \"\"\"\n+ Load the given YAML file\n+ \"\"\"\n+ fp = open(filename)\n+ return loads(fp.read(), filename)\n", "issue": "method to load yaml from a string\n*cfn-lint version: cfn-lint 0.4.2*\r\n\r\n*Description of issue.*\r\n\r\nWhen consuming cfn-lint as a module there is currently only an interface to load yaml from a file(`cfnlint.core.decode.cfn_yaml.load`). In our project([taskcat](https://github.com/aws-quickstart/taskcat/)) we have a need to load templates from strings as well. \r\n\r\nWhile we could just do something like below, think it would be useful to others to have a simpler interface for yaml strings:\r\n\r\n```python\r\nfrom cfnlint.decode.cfn_yaml import MarkedLoader, multi_constructor\r\nloader = MarkedLoader(my_yaml_string, None)\r\nloader.add_multi_constructor('!', multi_constructor)\r\ntemplate = loader.get_single_data()\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport logging\nimport six\nfrom yaml.composer import Composer\nfrom yaml.reader import Reader\nfrom yaml.scanner import Scanner\nfrom yaml.resolver import Resolver\nfrom yaml import ScalarNode\nfrom yaml import SequenceNode\nfrom yaml import MappingNode\nfrom yaml.constructor import SafeConstructor\nfrom yaml.constructor import ConstructorError\nimport cfnlint\nfrom cfnlint.decode.str_node import str_node\n\ntry:\n from yaml.cyaml import CParser as Parser # pylint: disable=ungrouped-imports\n cyaml = True\nexcept ImportError:\n from yaml.parser import Parser # pylint: disable=ungrouped-imports\n cyaml = False\n\nUNCONVERTED_SUFFIXES = ['Ref', 'Condition']\nFN_PREFIX = 'Fn::'\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass CfnParseError(ConstructorError):\n \"\"\"\n Error thrown when the template contains Cfn Error\n \"\"\"\n def __init__(self, filename, message, line_number, column_number, key=' '):\n\n # Call the base class constructor with the parameters it needs\n super(CfnParseError, self).__init__(message)\n\n # Now for your custom code...\n self.filename = filename\n self.line_number = line_number\n self.column_number = column_number\n self.message = message\n self.match = cfnlint.Match(\n line_number + 1, column_number + 1, line_number + 1,\n column_number + 1 + len(key), filename, cfnlint.ParseError(), message=message)\n\n\nclass NodeConstructor(SafeConstructor):\n \"\"\"\n Node Constructors for loading different types in Yaml\n \"\"\"\n\n def __init__(self, filename):\n # Call the base class constructor\n super(NodeConstructor, self).__init__()\n\n self.filename = filename\n\n # To support lazy loading, the original constructors first yield\n # an empty object, then fill them in when iterated. Due to\n # laziness we omit this behaviour (and will only do \"deep\n # construction\") by first exhausting iterators, then yielding\n # copies.\n def construct_yaml_map(self, node):\n\n # Check for duplicate keys on the current level, this is not desirable\n # because a dict does not support this. It overwrites it with the last\n # occurance, which can give unexpected results\n mapping = {}\n for key_node, value_node in node.value:\n key = self.construct_object(key_node, False)\n value = self.construct_object(value_node, False)\n\n if key in mapping:\n raise CfnParseError(\n self.filename,\n 'Duplicate resource found \"{}\" (line {})'.format(key, key_node.start_mark.line + 1),\n key_node.start_mark.line, key_node.start_mark.column, key)\n mapping[key] = value\n\n obj, = SafeConstructor.construct_yaml_map(self, node)\n return obj\n\n def construct_yaml_str(self, node):\n obj = SafeConstructor.construct_yaml_str(self, node)\n assert isinstance(obj, (six.string_types))\n return str_node(obj, node.start_mark, node.end_mark)\n\n def construct_yaml_null_error(self, node):\n \"\"\"Throw a null error\"\"\"\n raise CfnParseError(\n self.filename,\n 'Null value at line {0} column {1}'.format(node.start_mark.line, node.start_mark.column),\n node.start_mark.line, node.start_mark.column, ' ')\n\n\nNodeConstructor.add_constructor(\n u'tag:yaml.org,2002:map',\n NodeConstructor.construct_yaml_map)\n\nNodeConstructor.add_constructor(\n u'tag:yaml.org,2002:str',\n NodeConstructor.construct_yaml_str)\n\nNodeConstructor.add_constructor(\n u'tag:yaml.org,2002:null',\n NodeConstructor.construct_yaml_null_error)\n\n\nclass MarkedLoader(Reader, Scanner, Parser, Composer, NodeConstructor, Resolver):\n \"\"\"\n Class for marked loading YAML\n \"\"\"\n # pylint: disable=non-parent-init-called,super-init-not-called\n def __init__(self, stream, filename):\n Reader.__init__(self, stream)\n Scanner.__init__(self)\n if cyaml:\n Parser.__init__(self, stream)\n else:\n Parser.__init__(self)\n Composer.__init__(self)\n SafeConstructor.__init__(self)\n Resolver.__init__(self)\n NodeConstructor.__init__(self, filename)\n\n\ndef multi_constructor(loader, tag_suffix, node):\n \"\"\"\n Deal with !Ref style function format\n \"\"\"\n\n if tag_suffix not in UNCONVERTED_SUFFIXES:\n tag_suffix = '{}{}'.format(FN_PREFIX, tag_suffix)\n\n constructor = None\n\n if tag_suffix == 'Fn::GetAtt':\n constructor = construct_getatt\n elif isinstance(node, ScalarNode):\n constructor = loader.construct_scalar\n elif isinstance(node, SequenceNode):\n constructor = loader.construct_sequence\n elif isinstance(node, MappingNode):\n constructor = loader.construct_mapping\n else:\n raise 'Bad tag: !{}'.format(tag_suffix)\n\n return {tag_suffix: constructor(node)}\n\n\ndef construct_getatt(node):\n \"\"\"\n Reconstruct !GetAtt into a list\n \"\"\"\n\n if isinstance(node.value, (six.text_type, six.string_types)):\n return node.value.split('.')\n if isinstance(node.value, list):\n return [s.value for s in node.value]\n\n raise ValueError('Unexpected node type: {}'.format(type(node.value)))\n\n\ndef load(filename):\n \"\"\"\n Load the give YAML file\n \"\"\"\n fp = open(filename)\n loader = MarkedLoader(fp.read(), filename)\n loader.add_multi_constructor('!', multi_constructor)\n template = loader.get_single_data()\n # Convert an empty file to an empty dict\n if template is None:\n template = {}\n\n return template\n", "path": "src/cfnlint/decode/cfn_yaml.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport logging\nimport six\nfrom yaml.composer import Composer\nfrom yaml.reader import Reader\nfrom yaml.scanner import Scanner\nfrom yaml.resolver import Resolver\nfrom yaml import ScalarNode\nfrom yaml import SequenceNode\nfrom yaml import MappingNode\nfrom yaml.constructor import SafeConstructor\nfrom yaml.constructor import ConstructorError\nimport cfnlint\nfrom cfnlint.decode.str_node import str_node\n\ntry:\n from yaml.cyaml import CParser as Parser # pylint: disable=ungrouped-imports\n cyaml = True\nexcept ImportError:\n from yaml.parser import Parser # pylint: disable=ungrouped-imports\n cyaml = False\n\nUNCONVERTED_SUFFIXES = ['Ref', 'Condition']\nFN_PREFIX = 'Fn::'\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass CfnParseError(ConstructorError):\n \"\"\"\n Error thrown when the template contains Cfn Error\n \"\"\"\n def __init__(self, filename, message, line_number, column_number, key=' '):\n\n # Call the base class constructor with the parameters it needs\n super(CfnParseError, self).__init__(message)\n\n # Now for your custom code...\n self.filename = filename\n self.line_number = line_number\n self.column_number = column_number\n self.message = message\n self.match = cfnlint.Match(\n line_number + 1, column_number + 1, line_number + 1,\n column_number + 1 + len(key), filename, cfnlint.ParseError(), message=message)\n\n\nclass NodeConstructor(SafeConstructor):\n \"\"\"\n Node Constructors for loading different types in Yaml\n \"\"\"\n\n def __init__(self, filename):\n # Call the base class constructor\n super(NodeConstructor, self).__init__()\n\n self.filename = filename\n\n # To support lazy loading, the original constructors first yield\n # an empty object, then fill them in when iterated. Due to\n # laziness we omit this behaviour (and will only do \"deep\n # construction\") by first exhausting iterators, then yielding\n # copies.\n def construct_yaml_map(self, node):\n\n # Check for duplicate keys on the current level, this is not desirable\n # because a dict does not support this. It overwrites it with the last\n # occurance, which can give unexpected results\n mapping = {}\n for key_node, value_node in node.value:\n key = self.construct_object(key_node, False)\n value = self.construct_object(value_node, False)\n\n if key in mapping:\n raise CfnParseError(\n self.filename,\n 'Duplicate resource found \"{}\" (line {})'.format(key, key_node.start_mark.line + 1),\n key_node.start_mark.line, key_node.start_mark.column, key)\n mapping[key] = value\n\n obj, = SafeConstructor.construct_yaml_map(self, node)\n return obj\n\n def construct_yaml_str(self, node):\n obj = SafeConstructor.construct_yaml_str(self, node)\n assert isinstance(obj, (six.string_types))\n return str_node(obj, node.start_mark, node.end_mark)\n\n def construct_yaml_null_error(self, node):\n \"\"\"Throw a null error\"\"\"\n raise CfnParseError(\n self.filename,\n 'Null value at line {0} column {1}'.format(node.start_mark.line, node.start_mark.column),\n node.start_mark.line, node.start_mark.column, ' ')\n\n\nNodeConstructor.add_constructor(\n u'tag:yaml.org,2002:map',\n NodeConstructor.construct_yaml_map)\n\nNodeConstructor.add_constructor(\n u'tag:yaml.org,2002:str',\n NodeConstructor.construct_yaml_str)\n\nNodeConstructor.add_constructor(\n u'tag:yaml.org,2002:null',\n NodeConstructor.construct_yaml_null_error)\n\n\nclass MarkedLoader(Reader, Scanner, Parser, Composer, NodeConstructor, Resolver):\n \"\"\"\n Class for marked loading YAML\n \"\"\"\n # pylint: disable=non-parent-init-called,super-init-not-called\n def __init__(self, stream, filename):\n Reader.__init__(self, stream)\n Scanner.__init__(self)\n if cyaml:\n Parser.__init__(self, stream)\n else:\n Parser.__init__(self)\n Composer.__init__(self)\n SafeConstructor.__init__(self)\n Resolver.__init__(self)\n NodeConstructor.__init__(self, filename)\n\n\ndef multi_constructor(loader, tag_suffix, node):\n \"\"\"\n Deal with !Ref style function format\n \"\"\"\n\n if tag_suffix not in UNCONVERTED_SUFFIXES:\n tag_suffix = '{}{}'.format(FN_PREFIX, tag_suffix)\n\n constructor = None\n\n if tag_suffix == 'Fn::GetAtt':\n constructor = construct_getatt\n elif isinstance(node, ScalarNode):\n constructor = loader.construct_scalar\n elif isinstance(node, SequenceNode):\n constructor = loader.construct_sequence\n elif isinstance(node, MappingNode):\n constructor = loader.construct_mapping\n else:\n raise 'Bad tag: !{}'.format(tag_suffix)\n\n return {tag_suffix: constructor(node)}\n\n\ndef construct_getatt(node):\n \"\"\"\n Reconstruct !GetAtt into a list\n \"\"\"\n\n if isinstance(node.value, (six.text_type, six.string_types)):\n return node.value.split('.')\n if isinstance(node.value, list):\n return [s.value for s in node.value]\n\n raise ValueError('Unexpected node type: {}'.format(type(node.value)))\n\n\ndef loads(yaml_string, fname=None):\n \"\"\"\n Load the given YAML string\n \"\"\"\n loader = MarkedLoader(yaml_string, fname)\n loader.add_multi_constructor('!', multi_constructor)\n template = loader.get_single_data()\n # Convert an empty file to an empty dict\n if template is None:\n template = {}\n\n return template\n\n\ndef load(filename):\n \"\"\"\n Load the given YAML file\n \"\"\"\n fp = open(filename)\n return loads(fp.read(), filename)\n", "path": "src/cfnlint/decode/cfn_yaml.py"}]} | 2,363 | 239 |
gh_patches_debug_49907 | rasdani/github-patches | git_diff | CTFd__CTFd-1101 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CTFd crash right after install
## Git + python 3.7.2 (3b1b82b9a0fbcb8731d7a3a3bbac99499c466c99)
[](https://asciinema.org/a/iHDMVNRWSYJDTZUgIy4wIBsOK)
## Git + python 2.7.15 (3b1b82b9a0fbcb8731d7a3a3bbac99499c466c99)
[](https://asciinema.org/a/JRqfe0rMl0QeZAEklyvV1LQX9)
## 2.1.4 + python 3.7.2
[](https://asciinema.org/a/wErebw8ZN2HFER9P71tLW4FBv)
**Environment**:
- CTFd Version/Commit: see titles
- Operating System: ArchLinux
- Web Browser and Version: Firefox
**What happened?**
CTFd crash right after install.
**What did you expect to happen?**
Working or at least a more comprehensible error.
**How to reproduce your issue**
See the asciinema, but basically
- Download CTFd
- Install dependencies
- run flask
- crash
PS : with the untouched default config
**Any associated stack traces or error logs**
See the asciinema
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wsgi.py`
Content:
```
1 from gevent import monkey
2 monkey.patch_all()
3 from CTFd import create_app
4
5 app = create_app()
6
7 if __name__ == '__main__':
8 app.run(debug=True, threaded=True, host="127.0.0.1", port=4000)
9
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wsgi.py b/wsgi.py
--- a/wsgi.py
+++ b/wsgi.py
@@ -1,8 +1,14 @@
-from gevent import monkey
-monkey.patch_all()
+import os
+
+# Detect if we're running via `flask run` and don't monkey patch
+if not os.getenv("FLASK_RUN_FROM_CLI"):
+ from gevent import monkey
+
+ monkey.patch_all()
+
from CTFd import create_app
app = create_app()
-if __name__ == '__main__':
+if __name__ == "__main__":
app.run(debug=True, threaded=True, host="127.0.0.1", port=4000)
| {"golden_diff": "diff --git a/wsgi.py b/wsgi.py\n--- a/wsgi.py\n+++ b/wsgi.py\n@@ -1,8 +1,14 @@\n-from gevent import monkey\n-monkey.patch_all()\n+import os\n+\n+# Detect if we're running via `flask run` and don't monkey patch\n+if not os.getenv(\"FLASK_RUN_FROM_CLI\"):\n+ from gevent import monkey\n+\n+ monkey.patch_all()\n+\n from CTFd import create_app\n \n app = create_app()\n \n-if __name__ == '__main__':\n+if __name__ == \"__main__\":\n app.run(debug=True, threaded=True, host=\"127.0.0.1\", port=4000)\n", "issue": "CTFd crash right after install\n## Git + python 3.7.2 (3b1b82b9a0fbcb8731d7a3a3bbac99499c466c99)\r\n\r\n[](https://asciinema.org/a/iHDMVNRWSYJDTZUgIy4wIBsOK)\r\n\r\n## Git + python 2.7.15 (3b1b82b9a0fbcb8731d7a3a3bbac99499c466c99)\r\n\r\n[](https://asciinema.org/a/JRqfe0rMl0QeZAEklyvV1LQX9)\r\n\r\n## 2.1.4 + python 3.7.2\r\n\r\n[](https://asciinema.org/a/wErebw8ZN2HFER9P71tLW4FBv)\r\n\r\n**Environment**:\r\n\r\n - CTFd Version/Commit: see titles\r\n - Operating System: ArchLinux\r\n - Web Browser and Version: Firefox\r\n\r\n**What happened?**\r\n\r\nCTFd crash right after install.\r\n\r\n**What did you expect to happen?**\r\n\r\nWorking or at least a more comprehensible error.\r\n\r\n**How to reproduce your issue**\r\n\r\nSee the asciinema, but basically\r\n\r\n- Download CTFd\r\n- Install dependencies\r\n- run flask\r\n- crash\r\n\r\nPS : with the untouched default config\r\n\r\n**Any associated stack traces or error logs**\r\n\r\nSee the asciinema\n", "before_files": [{"content": "from gevent import monkey\nmonkey.patch_all()\nfrom CTFd import create_app\n\napp = create_app()\n\nif __name__ == '__main__':\n app.run(debug=True, threaded=True, host=\"127.0.0.1\", port=4000)\n", "path": "wsgi.py"}], "after_files": [{"content": "import os\n\n# Detect if we're running via `flask run` and don't monkey patch\nif not os.getenv(\"FLASK_RUN_FROM_CLI\"):\n from gevent import monkey\n\n monkey.patch_all()\n\nfrom CTFd import create_app\n\napp = create_app()\n\nif __name__ == \"__main__\":\n app.run(debug=True, threaded=True, host=\"127.0.0.1\", port=4000)\n", "path": "wsgi.py"}]} | 763 | 156 |
gh_patches_debug_31995 | rasdani/github-patches | git_diff | nvaccess__nvda-10182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Screen curtain reveals mouse position
Note, this was reported to me by my wife, so it would help to have at least one sighted person who can verify this.
### Steps to reproduce:
1. Enable the screen curtain by assigning a custom gesture to the global command in the vision category
2. Look at the screen
### Actual behavior:
The screen is black, but the mouse is still shown.
### Expected behavior:
Screen is entirely black
### Proposed fix
There is a function in the magnification API that hides the system cursor, so that can be implemented for this. Having said that, I don't consider this very important.
### System configuration
#### NVDA installed/portable/running from source:
Installed
#### NVDA version:
alpha-18574
#### Windows version:
Windows 10 1903
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/visionEnhancementProviders/screenCurtain.py`
Content:
```
1 # A part of NonVisual Desktop Access (NVDA)
2 # This file is covered by the GNU General Public License.
3 # See the file COPYING for more details.
4 # Copyright (C) 2018-2019 NV Access Limited, Babbage B.V., Leonard de Ruijter
5
6 """Screen curtain implementation based on the windows magnification API.
7 This implementation only works on Windows 8 and above.
8 """
9
10 import vision
11 import winVersion
12 from ctypes import Structure, windll, c_float, POINTER, WINFUNCTYPE, WinError
13 from ctypes.wintypes import BOOL
14
15
16 class MAGCOLOREFFECT(Structure):
17 _fields_ = (("transform", c_float * 5 * 5),)
18
19
20 TRANSFORM_BLACK = MAGCOLOREFFECT()
21 TRANSFORM_BLACK.transform[4][4] = 1.0
22
23
24 def _errCheck(result, func, args):
25 if result == 0:
26 raise WinError()
27 return args
28
29
30 class Magnification:
31 """Static class that wraps necessary functions from the Windows magnification API."""
32
33 _magnification = windll.Magnification
34
35 _MagInitializeFuncType = WINFUNCTYPE(BOOL)
36 _MagUninitializeFuncType = WINFUNCTYPE(BOOL)
37 _MagSetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))
38 _MagSetFullscreenColorEffectArgTypes = ((1, "effect"),)
39 _MagGetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))
40 _MagGetFullscreenColorEffectArgTypes = ((2, "effect"),)
41
42 MagInitialize = _MagInitializeFuncType(("MagInitialize", _magnification))
43 MagInitialize.errcheck = _errCheck
44 MagUninitialize = _MagUninitializeFuncType(("MagUninitialize", _magnification))
45 MagUninitialize.errcheck = _errCheck
46 try:
47 MagSetFullscreenColorEffect = _MagSetFullscreenColorEffectFuncType(
48 ("MagSetFullscreenColorEffect", _magnification),
49 _MagSetFullscreenColorEffectArgTypes
50 )
51 MagSetFullscreenColorEffect.errcheck = _errCheck
52 MagGetFullscreenColorEffect = _MagGetFullscreenColorEffectFuncType(
53 ("MagGetFullscreenColorEffect", _magnification),
54 _MagGetFullscreenColorEffectArgTypes
55 )
56 MagGetFullscreenColorEffect.errcheck = _errCheck
57 except AttributeError:
58 MagSetFullscreenColorEffect = None
59 MagGetFullscreenColorEffect = None
60
61
62 class VisionEnhancementProvider(vision.providerBase.VisionEnhancementProvider):
63 name = "screenCurtain"
64 # Translators: Description of a vision enhancement provider that disables output to the screen,
65 # making it black.
66 description = _("Screen Curtain")
67 supportedRoles = frozenset([vision.constants.Role.COLORENHANCER])
68
69 @classmethod
70 def canStart(cls):
71 return winVersion.isFullScreenMagnificationAvailable()
72
73 def __init__(self):
74 super(VisionEnhancementProvider, self).__init__()
75 Magnification.MagInitialize()
76 Magnification.MagSetFullscreenColorEffect(TRANSFORM_BLACK)
77
78 def terminate(self):
79 super(VisionEnhancementProvider, self).terminate()
80 Magnification.MagUninitialize()
81
82 def registerEventExtensionPoints(self, extensionPoints):
83 # The screen curtain isn't interested in any events
84 pass
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/source/visionEnhancementProviders/screenCurtain.py b/source/visionEnhancementProviders/screenCurtain.py
--- a/source/visionEnhancementProviders/screenCurtain.py
+++ b/source/visionEnhancementProviders/screenCurtain.py
@@ -38,6 +38,8 @@
_MagSetFullscreenColorEffectArgTypes = ((1, "effect"),)
_MagGetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))
_MagGetFullscreenColorEffectArgTypes = ((2, "effect"),)
+ _MagShowSystemCursorFuncType = WINFUNCTYPE(BOOL, BOOL)
+ _MagShowSystemCursorArgTypes = ((1, "showCursor"),)
MagInitialize = _MagInitializeFuncType(("MagInitialize", _magnification))
MagInitialize.errcheck = _errCheck
@@ -57,6 +59,11 @@
except AttributeError:
MagSetFullscreenColorEffect = None
MagGetFullscreenColorEffect = None
+ MagShowSystemCursor = _MagShowSystemCursorFuncType(
+ ("MagShowSystemCursor", _magnification),
+ _MagShowSystemCursorArgTypes
+ )
+ MagShowSystemCursor.errcheck = _errCheck
class VisionEnhancementProvider(vision.providerBase.VisionEnhancementProvider):
@@ -73,10 +80,12 @@
def __init__(self):
super(VisionEnhancementProvider, self).__init__()
Magnification.MagInitialize()
+ Magnification.MagShowSystemCursor(False)
Magnification.MagSetFullscreenColorEffect(TRANSFORM_BLACK)
def terminate(self):
super(VisionEnhancementProvider, self).terminate()
+ Magnification.MagShowSystemCursor(True)
Magnification.MagUninitialize()
def registerEventExtensionPoints(self, extensionPoints):
| {"golden_diff": "diff --git a/source/visionEnhancementProviders/screenCurtain.py b/source/visionEnhancementProviders/screenCurtain.py\n--- a/source/visionEnhancementProviders/screenCurtain.py\n+++ b/source/visionEnhancementProviders/screenCurtain.py\n@@ -38,6 +38,8 @@\n \t_MagSetFullscreenColorEffectArgTypes = ((1, \"effect\"),)\r\n \t_MagGetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))\r\n \t_MagGetFullscreenColorEffectArgTypes = ((2, \"effect\"),)\r\n+\t_MagShowSystemCursorFuncType = WINFUNCTYPE(BOOL, BOOL)\r\n+\t_MagShowSystemCursorArgTypes = ((1, \"showCursor\"),)\r\n \r\n \tMagInitialize = _MagInitializeFuncType((\"MagInitialize\", _magnification))\r\n \tMagInitialize.errcheck = _errCheck\r\n@@ -57,6 +59,11 @@\n \texcept AttributeError:\r\n \t\tMagSetFullscreenColorEffect = None\r\n \t\tMagGetFullscreenColorEffect = None\r\n+\tMagShowSystemCursor = _MagShowSystemCursorFuncType(\r\n+\t\t(\"MagShowSystemCursor\", _magnification),\r\n+\t\t_MagShowSystemCursorArgTypes\r\n+\t)\r\n+\tMagShowSystemCursor.errcheck = _errCheck\r\n \r\n \r\n class VisionEnhancementProvider(vision.providerBase.VisionEnhancementProvider):\r\n@@ -73,10 +80,12 @@\n \tdef __init__(self):\r\n \t\tsuper(VisionEnhancementProvider, self).__init__()\r\n \t\tMagnification.MagInitialize()\r\n+\t\tMagnification.MagShowSystemCursor(False)\r\n \t\tMagnification.MagSetFullscreenColorEffect(TRANSFORM_BLACK)\r\n \r\n \tdef terminate(self):\r\n \t\tsuper(VisionEnhancementProvider, self).terminate()\r\n+\t\tMagnification.MagShowSystemCursor(True)\r\n \t\tMagnification.MagUninitialize()\r\n \r\n \tdef registerEventExtensionPoints(self, extensionPoints):\n", "issue": "Screen curtain reveals mouse position\nNote, this was reported to me by my wife, so it would help to have at least one sighted person who can verify this.\r\n\r\n### Steps to reproduce:\r\n1. Enable the screen curtain by assigning a custom gesture to the global command in the vision category\r\n2. Look at the screen\r\n\r\n### Actual behavior:\r\nThe screen is black, but the mouse is still shown.\r\n\r\n### Expected behavior:\r\nScreen is entirely black\r\n\r\n### Proposed fix\r\nThere is a function in the magnification API that hides the system cursor, so that can be implemented for this. Having said that, I don't consider this very important.\r\n\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\nInstalled\r\n\r\n#### NVDA version:\r\nalpha-18574\r\n\r\n#### Windows version:\r\nWindows 10 1903\r\n\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n# Copyright (C) 2018-2019 NV Access Limited, Babbage B.V., Leonard de Ruijter\r\n\r\n\"\"\"Screen curtain implementation based on the windows magnification API.\r\nThis implementation only works on Windows 8 and above.\r\n\"\"\"\r\n\r\nimport vision\r\nimport winVersion\r\nfrom ctypes import Structure, windll, c_float, POINTER, WINFUNCTYPE, WinError\r\nfrom ctypes.wintypes import BOOL\r\n\r\n\r\nclass MAGCOLOREFFECT(Structure):\r\n\t_fields_ = ((\"transform\", c_float * 5 * 5),)\r\n\r\n\r\nTRANSFORM_BLACK = MAGCOLOREFFECT()\r\nTRANSFORM_BLACK.transform[4][4] = 1.0\r\n\r\n\r\ndef _errCheck(result, func, args):\r\n\tif result == 0:\r\n\t\traise WinError()\r\n\treturn args\r\n\r\n\r\nclass Magnification:\r\n\t\"\"\"Static class that wraps necessary functions from the Windows magnification API.\"\"\"\r\n\r\n\t_magnification = windll.Magnification\r\n\r\n\t_MagInitializeFuncType = WINFUNCTYPE(BOOL)\r\n\t_MagUninitializeFuncType = WINFUNCTYPE(BOOL)\r\n\t_MagSetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))\r\n\t_MagSetFullscreenColorEffectArgTypes = ((1, \"effect\"),)\r\n\t_MagGetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))\r\n\t_MagGetFullscreenColorEffectArgTypes = ((2, \"effect\"),)\r\n\r\n\tMagInitialize = _MagInitializeFuncType((\"MagInitialize\", _magnification))\r\n\tMagInitialize.errcheck = _errCheck\r\n\tMagUninitialize = _MagUninitializeFuncType((\"MagUninitialize\", _magnification))\r\n\tMagUninitialize.errcheck = _errCheck\r\n\ttry:\r\n\t\tMagSetFullscreenColorEffect = _MagSetFullscreenColorEffectFuncType(\r\n\t\t\t(\"MagSetFullscreenColorEffect\", _magnification),\r\n\t\t\t_MagSetFullscreenColorEffectArgTypes\r\n\t\t)\r\n\t\tMagSetFullscreenColorEffect.errcheck = _errCheck\r\n\t\tMagGetFullscreenColorEffect = _MagGetFullscreenColorEffectFuncType(\r\n\t\t\t(\"MagGetFullscreenColorEffect\", _magnification),\r\n\t\t\t_MagGetFullscreenColorEffectArgTypes\r\n\t\t)\r\n\t\tMagGetFullscreenColorEffect.errcheck = _errCheck\r\n\texcept AttributeError:\r\n\t\tMagSetFullscreenColorEffect = None\r\n\t\tMagGetFullscreenColorEffect = None\r\n\r\n\r\nclass VisionEnhancementProvider(vision.providerBase.VisionEnhancementProvider):\r\n\tname = \"screenCurtain\"\r\n\t# Translators: Description of a vision enhancement provider that disables output to the screen,\r\n\t# making it black.\r\n\tdescription = _(\"Screen Curtain\")\r\n\tsupportedRoles = frozenset([vision.constants.Role.COLORENHANCER])\r\n\r\n\t@classmethod\r\n\tdef canStart(cls):\r\n\t\treturn winVersion.isFullScreenMagnificationAvailable()\r\n\r\n\tdef __init__(self):\r\n\t\tsuper(VisionEnhancementProvider, self).__init__()\r\n\t\tMagnification.MagInitialize()\r\n\t\tMagnification.MagSetFullscreenColorEffect(TRANSFORM_BLACK)\r\n\r\n\tdef terminate(self):\r\n\t\tsuper(VisionEnhancementProvider, self).terminate()\r\n\t\tMagnification.MagUninitialize()\r\n\r\n\tdef registerEventExtensionPoints(self, extensionPoints):\r\n\t\t# The screen curtain isn't interested in any events\r\n\t\tpass\r\n", "path": "source/visionEnhancementProviders/screenCurtain.py"}], "after_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n# Copyright (C) 2018-2019 NV Access Limited, Babbage B.V., Leonard de Ruijter\r\n\r\n\"\"\"Screen curtain implementation based on the windows magnification API.\r\nThis implementation only works on Windows 8 and above.\r\n\"\"\"\r\n\r\nimport vision\r\nimport winVersion\r\nfrom ctypes import Structure, windll, c_float, POINTER, WINFUNCTYPE, WinError\r\nfrom ctypes.wintypes import BOOL\r\n\r\n\r\nclass MAGCOLOREFFECT(Structure):\r\n\t_fields_ = ((\"transform\", c_float * 5 * 5),)\r\n\r\n\r\nTRANSFORM_BLACK = MAGCOLOREFFECT()\r\nTRANSFORM_BLACK.transform[4][4] = 1.0\r\n\r\n\r\ndef _errCheck(result, func, args):\r\n\tif result == 0:\r\n\t\traise WinError()\r\n\treturn args\r\n\r\n\r\nclass Magnification:\r\n\t\"\"\"Static class that wraps necessary functions from the Windows magnification API.\"\"\"\r\n\r\n\t_magnification = windll.Magnification\r\n\r\n\t_MagInitializeFuncType = WINFUNCTYPE(BOOL)\r\n\t_MagUninitializeFuncType = WINFUNCTYPE(BOOL)\r\n\t_MagSetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))\r\n\t_MagSetFullscreenColorEffectArgTypes = ((1, \"effect\"),)\r\n\t_MagGetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))\r\n\t_MagGetFullscreenColorEffectArgTypes = ((2, \"effect\"),)\r\n\t_MagShowSystemCursorFuncType = WINFUNCTYPE(BOOL, BOOL)\r\n\t_MagShowSystemCursorArgTypes = ((1, \"showCursor\"),)\r\n\r\n\tMagInitialize = _MagInitializeFuncType((\"MagInitialize\", _magnification))\r\n\tMagInitialize.errcheck = _errCheck\r\n\tMagUninitialize = _MagUninitializeFuncType((\"MagUninitialize\", _magnification))\r\n\tMagUninitialize.errcheck = _errCheck\r\n\ttry:\r\n\t\tMagSetFullscreenColorEffect = _MagSetFullscreenColorEffectFuncType(\r\n\t\t\t(\"MagSetFullscreenColorEffect\", _magnification),\r\n\t\t\t_MagSetFullscreenColorEffectArgTypes\r\n\t\t)\r\n\t\tMagSetFullscreenColorEffect.errcheck = _errCheck\r\n\t\tMagGetFullscreenColorEffect = _MagGetFullscreenColorEffectFuncType(\r\n\t\t\t(\"MagGetFullscreenColorEffect\", _magnification),\r\n\t\t\t_MagGetFullscreenColorEffectArgTypes\r\n\t\t)\r\n\t\tMagGetFullscreenColorEffect.errcheck = _errCheck\r\n\texcept AttributeError:\r\n\t\tMagSetFullscreenColorEffect = None\r\n\t\tMagGetFullscreenColorEffect = None\r\n\tMagShowSystemCursor = _MagShowSystemCursorFuncType(\r\n\t\t(\"MagShowSystemCursor\", _magnification),\r\n\t\t_MagShowSystemCursorArgTypes\r\n\t)\r\n\tMagShowSystemCursor.errcheck = _errCheck\r\n\r\n\r\nclass VisionEnhancementProvider(vision.providerBase.VisionEnhancementProvider):\r\n\tname = \"screenCurtain\"\r\n\t# Translators: Description of a vision enhancement provider that disables output to the screen,\r\n\t# making it black.\r\n\tdescription = _(\"Screen Curtain\")\r\n\tsupportedRoles = frozenset([vision.constants.Role.COLORENHANCER])\r\n\r\n\t@classmethod\r\n\tdef canStart(cls):\r\n\t\treturn winVersion.isFullScreenMagnificationAvailable()\r\n\r\n\tdef __init__(self):\r\n\t\tsuper(VisionEnhancementProvider, self).__init__()\r\n\t\tMagnification.MagInitialize()\r\n\t\tMagnification.MagShowSystemCursor(False)\r\n\t\tMagnification.MagSetFullscreenColorEffect(TRANSFORM_BLACK)\r\n\r\n\tdef terminate(self):\r\n\t\tsuper(VisionEnhancementProvider, self).terminate()\r\n\t\tMagnification.MagShowSystemCursor(True)\r\n\t\tMagnification.MagUninitialize()\r\n\r\n\tdef registerEventExtensionPoints(self, extensionPoints):\r\n\t\t# The screen curtain isn't interested in any events\r\n\t\tpass\r\n", "path": "source/visionEnhancementProviders/screenCurtain.py"}]} | 1,372 | 425 |
gh_patches_debug_12450 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3265 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add OpenAPI Specification for /queries/ endpoint and fix the incorrect specs
This is a part of the API Documentation project #2888
## Problem
To ensure the accuracy of the specs generated by drf-spectacular, we will generate an OpenAPI Specification specifically for a particular endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page. This issue aims to generate spec for /queries/ endpoint.
## Proposed solution
* Add /queries/ to the custom preprocessing hook function to filter out all endpoints except for the /queries/ endpoint.
* Generate the spec file using the _./manage.py spectacular --color --file schema.yml_ command
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `config/settings/openapi.py`
Content:
```
1 def custom_preprocessing_hook(endpoints):
2 filtered = []
3 for (path, path_regex, method, callback) in endpoints:
4 # Remove all but DRF API endpoints
5 if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"):
6 filtered.append((path, path_regex, method, callback))
7 return filtered
8
9
10 def remove_url_prefix_hook(result, **kwargs):
11 # Remove namespace and version URL prefix from the operation Id of the generated API schema
12 for path, path_info in result['paths'].items():
13 for method, operation in path_info.items():
14 operation_id = operation.get('operationId')
15 if operation_id:
16 if path.startswith('/api/db/v0/'):
17 operation['operationId'] = operation_id.replace('db_v0_', '')
18 elif path.startswith('/api/ui/v0/'):
19 operation['operationId'] = operation_id.replace('ui_v0_', '')
20
21 return result
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/config/settings/openapi.py b/config/settings/openapi.py
--- a/config/settings/openapi.py
+++ b/config/settings/openapi.py
@@ -2,7 +2,7 @@
filtered = []
for (path, path_regex, method, callback) in endpoints:
# Remove all but DRF API endpoints
- if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"):
+ if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/") or path.startswith("/api/db/v0/links/") or path.startswith("/api/db/v0/queries/"):
filtered.append((path, path_regex, method, callback))
return filtered
| {"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -2,7 +2,7 @@\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n+ if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\") or path.startswith(\"/api/db/v0/links/\") or path.startswith(\"/api/db/v0/queries/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n", "issue": "Add OpenAPI Specification for /queries/ endpoint and fix the incorrect specs\nThis is a part of the API Documentation project #2888 \r\n## Problem\r\nTo ensure the accuracy of the specs generated by drf-spectacular, we will generate an OpenAPI Specification specifically for a particular endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page. This issue aims to generate spec for /queries/ endpoint.\r\n\r\n## Proposed solution\r\n* Add /queries/ to the custom preprocessing hook function to filter out all endpoints except for the /queries/ endpoint. \r\n* Generate the spec file using the _./manage.py spectacular --color --file schema.yml_ command\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}], "after_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\") or path.startswith(\"/api/db/v0/links/\") or path.startswith(\"/api/db/v0/queries/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]} | 670 | 204 |
gh_patches_debug_53990 | rasdani/github-patches | git_diff | mkdocs__mkdocs-3700 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Empty mkdocs_theme.yml breaks build
Hello! In the docs its [stated](https://www.mkdocs.org/dev-guide/themes/#theme-configuration) that a theme **can** have an empty `mkdocs_theme.yml` file:
> However, if the theme offers no configuration options, the file is still required and can be left blank.
Unfortunately this seems to have changed recently and now themes with empty `mkdocs_theme.yml` files are causing an exception when building:
```shell
> mkdocs build --verbose
DEBUG - Loading configuration file: ./mkdocs.yml
DEBUG - Loaded theme configuration for 'custom_theme' from
'./venv/lib/python3.12/site-packages/custom_theme/mkdocs_theme.yml':
None
Traceback (most recent call last):
[...]
File "./venv/lib/python3.12/site-packages/mkdocs/config/config_options.py", line 868, in run_validation
return theme.Theme(**theme_config)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "./venv/lib/python3.12/site-packages/mkdocs/theme.py", line 61, in __init__
self._load_theme_config(name)
File "./venv/lib/python3.12/site-packages/mkdocs/theme.py", line 143, in _load_theme_config
if parent_theme := theme_config.pop('extends', None):
^^^^^^^^^^^^^^^^
AttributeError: 'NoneType' object has no attribute 'pop'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/theme.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4 import os
5 import warnings
6 from typing import Any, Collection, MutableMapping
7
8 import jinja2
9 import yaml
10
11 try:
12 from yaml import CSafeLoader as SafeLoader
13 except ImportError: # pragma: no cover
14 from yaml import SafeLoader # type: ignore
15
16 from mkdocs import localization, utils
17 from mkdocs.config.base import ValidationError
18 from mkdocs.utils import templates
19
20 log = logging.getLogger(__name__)
21
22
23 class Theme(MutableMapping[str, Any]):
24 """
25 A Theme object.
26
27 Args:
28 name: The name of the theme as defined by its entrypoint.
29 custom_dir: User defined directory for custom templates.
30 static_templates: A list of templates to render as static pages.
31
32 All other keywords are passed as-is and made available as a key/value mapping.
33 """
34
35 def __init__(
36 self,
37 name: str | None = None,
38 *,
39 custom_dir: str | None = None,
40 static_templates: Collection[str] = (),
41 locale: str | None = None,
42 **user_config,
43 ) -> None:
44 self.name = name
45 self._custom_dir = custom_dir
46 _vars: dict[str, Any] = {'name': name, 'locale': 'en'}
47 self.__vars = _vars
48
49 # MkDocs provided static templates are always included
50 package_dir = os.path.abspath(os.path.dirname(__file__))
51 mkdocs_templates = os.path.join(package_dir, 'templates')
52 self.static_templates = set(os.listdir(mkdocs_templates))
53
54 # Build self.dirs from various sources in order of precedence
55 self.dirs = []
56
57 if custom_dir is not None:
58 self.dirs.append(custom_dir)
59
60 if name:
61 self._load_theme_config(name)
62
63 # Include templates provided directly by MkDocs (outside any theme)
64 self.dirs.append(mkdocs_templates)
65
66 # Handle remaining user configs. Override theme configs (if set)
67 self.static_templates.update(static_templates)
68 _vars.update(user_config)
69
70 # Validate locale and convert to Locale object
71 if locale is None:
72 locale = _vars['locale']
73 _vars['locale'] = localization.parse_locale(locale)
74
75 name: str | None
76
77 @property
78 def locale(self) -> localization.Locale:
79 return self['locale']
80
81 @property
82 def custom_dir(self) -> str | None:
83 return self._custom_dir
84
85 @property
86 def _vars(self) -> dict[str, Any]:
87 warnings.warn(
88 "Do not access Theme._vars, instead access the keys of Theme directly.",
89 DeprecationWarning,
90 )
91 return self.__vars
92
93 dirs: list[str]
94
95 static_templates: set[str]
96
97 def __repr__(self) -> str:
98 return "{}(name={!r}, dirs={!r}, static_templates={!r}, {})".format(
99 self.__class__.__name__,
100 self.name,
101 self.dirs,
102 self.static_templates,
103 ', '.join(f'{k}={v!r}' for k, v in self.items()),
104 )
105
106 def __getitem__(self, key: str) -> Any:
107 return self.__vars[key]
108
109 def __setitem__(self, key: str, value):
110 self.__vars[key] = value
111
112 def __delitem__(self, key: str):
113 del self.__vars[key]
114
115 def __contains__(self, item: object) -> bool:
116 return item in self.__vars
117
118 def __len__(self):
119 return len(self.__vars)
120
121 def __iter__(self):
122 return iter(self.__vars)
123
124 def _load_theme_config(self, name: str) -> None:
125 """Recursively load theme and any parent themes."""
126 theme_dir = utils.get_theme_dir(name)
127 utils.get_themes.cache_clear()
128 self.dirs.append(theme_dir)
129
130 try:
131 file_path = os.path.join(theme_dir, 'mkdocs_theme.yml')
132 with open(file_path, 'rb') as f:
133 theme_config = yaml.load(f, SafeLoader)
134 except OSError as e:
135 log.debug(e)
136 raise ValidationError(
137 f"The theme '{name}' does not appear to have a configuration file. "
138 f"Please upgrade to a current version of the theme."
139 )
140
141 log.debug(f"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}")
142
143 if parent_theme := theme_config.pop('extends', None):
144 themes = utils.get_theme_names()
145 if parent_theme not in themes:
146 raise ValidationError(
147 f"The theme '{name}' inherits from '{parent_theme}', which does not appear to be installed. "
148 f"The available installed themes are: {', '.join(themes)}"
149 )
150 self._load_theme_config(parent_theme)
151
152 self.static_templates.update(theme_config.pop('static_templates', []))
153 self.__vars.update(theme_config)
154
155 def get_env(self) -> jinja2.Environment:
156 """Return a Jinja environment for the theme."""
157 loader = jinja2.FileSystemLoader(self.dirs)
158 # No autoreload because editing a template in the middle of a build is not useful.
159 env = jinja2.Environment(loader=loader, auto_reload=False)
160 env.filters['url'] = templates.url_filter
161 env.filters['script_tag'] = templates.script_tag_filter
162 localization.install_translations(env, self.locale, self.dirs)
163 return env
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/theme.py b/mkdocs/theme.py
--- a/mkdocs/theme.py
+++ b/mkdocs/theme.py
@@ -138,6 +138,9 @@
f"Please upgrade to a current version of the theme."
)
+ if theme_config is None:
+ theme_config = {}
+
log.debug(f"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}")
if parent_theme := theme_config.pop('extends', None):
| {"golden_diff": "diff --git a/mkdocs/theme.py b/mkdocs/theme.py\n--- a/mkdocs/theme.py\n+++ b/mkdocs/theme.py\n@@ -138,6 +138,9 @@\n f\"Please upgrade to a current version of the theme.\"\n )\n \n+ if theme_config is None:\n+ theme_config = {}\n+\n log.debug(f\"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}\")\n \n if parent_theme := theme_config.pop('extends', None):\n", "issue": "Empty mkdocs_theme.yml breaks build\nHello! In the docs its [stated](https://www.mkdocs.org/dev-guide/themes/#theme-configuration) that a theme **can** have an empty `mkdocs_theme.yml` file:\r\n\r\n> However, if the theme offers no configuration options, the file is still required and can be left blank.\r\n\r\nUnfortunately this seems to have changed recently and now themes with empty `mkdocs_theme.yml` files are causing an exception when building:\r\n\r\n```shell\r\n> mkdocs build --verbose\r\nDEBUG - Loading configuration file: ./mkdocs.yml\r\nDEBUG - Loaded theme configuration for 'custom_theme' from\r\n './venv/lib/python3.12/site-packages/custom_theme/mkdocs_theme.yml':\r\n None\r\nTraceback (most recent call last):\r\n [...]\r\n File \"./venv/lib/python3.12/site-packages/mkdocs/config/config_options.py\", line 868, in run_validation\r\n return theme.Theme(**theme_config)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"./venv/lib/python3.12/site-packages/mkdocs/theme.py\", line 61, in __init__\r\n self._load_theme_config(name)\r\n File \"./venv/lib/python3.12/site-packages/mkdocs/theme.py\", line 143, in _load_theme_config\r\n if parent_theme := theme_config.pop('extends', None):\r\n ^^^^^^^^^^^^^^^^\r\nAttributeError: 'NoneType' object has no attribute 'pop'\r\n```\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport os\nimport warnings\nfrom typing import Any, Collection, MutableMapping\n\nimport jinja2\nimport yaml\n\ntry:\n from yaml import CSafeLoader as SafeLoader\nexcept ImportError: # pragma: no cover\n from yaml import SafeLoader # type: ignore\n\nfrom mkdocs import localization, utils\nfrom mkdocs.config.base import ValidationError\nfrom mkdocs.utils import templates\n\nlog = logging.getLogger(__name__)\n\n\nclass Theme(MutableMapping[str, Any]):\n \"\"\"\n A Theme object.\n\n Args:\n name: The name of the theme as defined by its entrypoint.\n custom_dir: User defined directory for custom templates.\n static_templates: A list of templates to render as static pages.\n\n All other keywords are passed as-is and made available as a key/value mapping.\n \"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n *,\n custom_dir: str | None = None,\n static_templates: Collection[str] = (),\n locale: str | None = None,\n **user_config,\n ) -> None:\n self.name = name\n self._custom_dir = custom_dir\n _vars: dict[str, Any] = {'name': name, 'locale': 'en'}\n self.__vars = _vars\n\n # MkDocs provided static templates are always included\n package_dir = os.path.abspath(os.path.dirname(__file__))\n mkdocs_templates = os.path.join(package_dir, 'templates')\n self.static_templates = set(os.listdir(mkdocs_templates))\n\n # Build self.dirs from various sources in order of precedence\n self.dirs = []\n\n if custom_dir is not None:\n self.dirs.append(custom_dir)\n\n if name:\n self._load_theme_config(name)\n\n # Include templates provided directly by MkDocs (outside any theme)\n self.dirs.append(mkdocs_templates)\n\n # Handle remaining user configs. Override theme configs (if set)\n self.static_templates.update(static_templates)\n _vars.update(user_config)\n\n # Validate locale and convert to Locale object\n if locale is None:\n locale = _vars['locale']\n _vars['locale'] = localization.parse_locale(locale)\n\n name: str | None\n\n @property\n def locale(self) -> localization.Locale:\n return self['locale']\n\n @property\n def custom_dir(self) -> str | None:\n return self._custom_dir\n\n @property\n def _vars(self) -> dict[str, Any]:\n warnings.warn(\n \"Do not access Theme._vars, instead access the keys of Theme directly.\",\n DeprecationWarning,\n )\n return self.__vars\n\n dirs: list[str]\n\n static_templates: set[str]\n\n def __repr__(self) -> str:\n return \"{}(name={!r}, dirs={!r}, static_templates={!r}, {})\".format(\n self.__class__.__name__,\n self.name,\n self.dirs,\n self.static_templates,\n ', '.join(f'{k}={v!r}' for k, v in self.items()),\n )\n\n def __getitem__(self, key: str) -> Any:\n return self.__vars[key]\n\n def __setitem__(self, key: str, value):\n self.__vars[key] = value\n\n def __delitem__(self, key: str):\n del self.__vars[key]\n\n def __contains__(self, item: object) -> bool:\n return item in self.__vars\n\n def __len__(self):\n return len(self.__vars)\n\n def __iter__(self):\n return iter(self.__vars)\n\n def _load_theme_config(self, name: str) -> None:\n \"\"\"Recursively load theme and any parent themes.\"\"\"\n theme_dir = utils.get_theme_dir(name)\n utils.get_themes.cache_clear()\n self.dirs.append(theme_dir)\n\n try:\n file_path = os.path.join(theme_dir, 'mkdocs_theme.yml')\n with open(file_path, 'rb') as f:\n theme_config = yaml.load(f, SafeLoader)\n except OSError as e:\n log.debug(e)\n raise ValidationError(\n f\"The theme '{name}' does not appear to have a configuration file. \"\n f\"Please upgrade to a current version of the theme.\"\n )\n\n log.debug(f\"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}\")\n\n if parent_theme := theme_config.pop('extends', None):\n themes = utils.get_theme_names()\n if parent_theme not in themes:\n raise ValidationError(\n f\"The theme '{name}' inherits from '{parent_theme}', which does not appear to be installed. \"\n f\"The available installed themes are: {', '.join(themes)}\"\n )\n self._load_theme_config(parent_theme)\n\n self.static_templates.update(theme_config.pop('static_templates', []))\n self.__vars.update(theme_config)\n\n def get_env(self) -> jinja2.Environment:\n \"\"\"Return a Jinja environment for the theme.\"\"\"\n loader = jinja2.FileSystemLoader(self.dirs)\n # No autoreload because editing a template in the middle of a build is not useful.\n env = jinja2.Environment(loader=loader, auto_reload=False)\n env.filters['url'] = templates.url_filter\n env.filters['script_tag'] = templates.script_tag_filter\n localization.install_translations(env, self.locale, self.dirs)\n return env\n", "path": "mkdocs/theme.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport os\nimport warnings\nfrom typing import Any, Collection, MutableMapping\n\nimport jinja2\nimport yaml\n\ntry:\n from yaml import CSafeLoader as SafeLoader\nexcept ImportError: # pragma: no cover\n from yaml import SafeLoader # type: ignore\n\nfrom mkdocs import localization, utils\nfrom mkdocs.config.base import ValidationError\nfrom mkdocs.utils import templates\n\nlog = logging.getLogger(__name__)\n\n\nclass Theme(MutableMapping[str, Any]):\n \"\"\"\n A Theme object.\n\n Args:\n name: The name of the theme as defined by its entrypoint.\n custom_dir: User defined directory for custom templates.\n static_templates: A list of templates to render as static pages.\n\n All other keywords are passed as-is and made available as a key/value mapping.\n \"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n *,\n custom_dir: str | None = None,\n static_templates: Collection[str] = (),\n locale: str | None = None,\n **user_config,\n ) -> None:\n self.name = name\n self._custom_dir = custom_dir\n _vars: dict[str, Any] = {'name': name, 'locale': 'en'}\n self.__vars = _vars\n\n # MkDocs provided static templates are always included\n package_dir = os.path.abspath(os.path.dirname(__file__))\n mkdocs_templates = os.path.join(package_dir, 'templates')\n self.static_templates = set(os.listdir(mkdocs_templates))\n\n # Build self.dirs from various sources in order of precedence\n self.dirs = []\n\n if custom_dir is not None:\n self.dirs.append(custom_dir)\n\n if name:\n self._load_theme_config(name)\n\n # Include templates provided directly by MkDocs (outside any theme)\n self.dirs.append(mkdocs_templates)\n\n # Handle remaining user configs. Override theme configs (if set)\n self.static_templates.update(static_templates)\n _vars.update(user_config)\n\n # Validate locale and convert to Locale object\n if locale is None:\n locale = _vars['locale']\n _vars['locale'] = localization.parse_locale(locale)\n\n name: str | None\n\n @property\n def locale(self) -> localization.Locale:\n return self['locale']\n\n @property\n def custom_dir(self) -> str | None:\n return self._custom_dir\n\n @property\n def _vars(self) -> dict[str, Any]:\n warnings.warn(\n \"Do not access Theme._vars, instead access the keys of Theme directly.\",\n DeprecationWarning,\n )\n return self.__vars\n\n dirs: list[str]\n\n static_templates: set[str]\n\n def __repr__(self) -> str:\n return \"{}(name={!r}, dirs={!r}, static_templates={!r}, {})\".format(\n self.__class__.__name__,\n self.name,\n self.dirs,\n self.static_templates,\n ', '.join(f'{k}={v!r}' for k, v in self.items()),\n )\n\n def __getitem__(self, key: str) -> Any:\n return self.__vars[key]\n\n def __setitem__(self, key: str, value):\n self.__vars[key] = value\n\n def __delitem__(self, key: str):\n del self.__vars[key]\n\n def __contains__(self, item: object) -> bool:\n return item in self.__vars\n\n def __len__(self):\n return len(self.__vars)\n\n def __iter__(self):\n return iter(self.__vars)\n\n def _load_theme_config(self, name: str) -> None:\n \"\"\"Recursively load theme and any parent themes.\"\"\"\n theme_dir = utils.get_theme_dir(name)\n utils.get_themes.cache_clear()\n self.dirs.append(theme_dir)\n\n try:\n file_path = os.path.join(theme_dir, 'mkdocs_theme.yml')\n with open(file_path, 'rb') as f:\n theme_config = yaml.load(f, SafeLoader)\n except OSError as e:\n log.debug(e)\n raise ValidationError(\n f\"The theme '{name}' does not appear to have a configuration file. \"\n f\"Please upgrade to a current version of the theme.\"\n )\n\n if theme_config is None:\n theme_config = {}\n\n log.debug(f\"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}\")\n\n if parent_theme := theme_config.pop('extends', None):\n themes = utils.get_theme_names()\n if parent_theme not in themes:\n raise ValidationError(\n f\"The theme '{name}' inherits from '{parent_theme}', which does not appear to be installed. \"\n f\"The available installed themes are: {', '.join(themes)}\"\n )\n self._load_theme_config(parent_theme)\n\n self.static_templates.update(theme_config.pop('static_templates', []))\n self.__vars.update(theme_config)\n\n def get_env(self) -> jinja2.Environment:\n \"\"\"Return a Jinja environment for the theme.\"\"\"\n loader = jinja2.FileSystemLoader(self.dirs)\n # No autoreload because editing a template in the middle of a build is not useful.\n env = jinja2.Environment(loader=loader, auto_reload=False)\n env.filters['url'] = templates.url_filter\n env.filters['script_tag'] = templates.script_tag_filter\n localization.install_translations(env, self.locale, self.dirs)\n return env\n", "path": "mkdocs/theme.py"}]} | 2,144 | 113 |
gh_patches_debug_10650 | rasdani/github-patches | git_diff | enthought__chaco-499 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Demo noninteractive.py, Errors after <ENTER>
**Problem Description**
Errors after <ENTER>
**Reproduction Steps:**
Run noninteractive.py and hit ENTER
**Expected behavior:**
```
Please enter a path in which to place generated plots.
Press <ENTER> to generate in the current directory.
Path:
Traceback (most recent call last):
File "noninteractive.py", line 113, in <module>
draw_plot(get_directory('noninteractive.png'), size=(800, 600))
File "noninteractive.py", line 90, in get_directory
path = input('Path: ').strip()
File "<string>", line 0
^
SyntaxError: unexpected EOF while parsing
```
**OS, Python version:**
OSX, Python 2.7
xref: #385
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/demo/noninteractive.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 This demonstrates how to create a plot offscreen and save it to an image
4 file on disk.
5 """
6 from __future__ import print_function
7
8 # Standard library imports
9 import os
10 import sys
11
12 # Major library imports
13 from numpy import linspace
14 from scipy.special import jn
15
16 # Enthought library imports
17 from traits.etsconfig.api import ETSConfig
18
19 # Chaco imports
20 from chaco.api import ArrayPlotData, Plot, PlotGraphicsContext
21 from chaco.example_support import COLOR_PALETTE
22
23 DPI = 72.0
24
25 # This is a bit of a hack, to work around the fact that line widths don't scale
26 # with the GraphicsContext's CTM.
27 dpi_scale = DPI / 72.0
28
29
30 def create_plot():
31 numpoints = 100
32 low = -5
33 high = 15.0
34 x = linspace(low, high, numpoints)
35 pd = ArrayPlotData(index=x)
36 p = Plot(pd, bgcolor="oldlace", padding=50, border_visible=True)
37 for i in range(10):
38 pd.set_data("y" + str(i), jn(i, x))
39 p.plot(("index", "y" + str(i)), color=tuple(COLOR_PALETTE[i]),
40 width=2.0 * dpi_scale)
41 p.x_grid.visible = True
42 p.x_grid.line_width *= dpi_scale
43 p.y_grid.visible = True
44 p.y_grid.line_width *= dpi_scale
45 p.legend.visible = True
46 return p
47
48
49 def draw_plot(filename, size=(800, 600)):
50 container = create_plot()
51 container.outer_bounds = list(size)
52 container.do_layout(force=True)
53 gc = PlotGraphicsContext(size, dpi=DPI)
54 gc.render_component(container)
55 gc.save(filename)
56 return
57
58
59 def draw_svg(filename, size=(800, 600)):
60 from chaco.svg_graphics_context import SVGGraphicsContext
61 container = create_plot()
62 container.bounds = list(size)
63 container.do_layout(force=True)
64 gc = SVGGraphicsContext(size)
65 gc.render_component(container)
66 gc.save(filename)
67
68
69 def draw_pdf(filename, size=(800, 600)):
70 from chaco.pdf_graphics_context import PdfPlotGraphicsContext
71 container = create_plot()
72 container.outer_bounds = list(size)
73 container.do_layout(force=True)
74 gc = PdfPlotGraphicsContext(filename=filename,
75 dest_box=(0.5, 0.5, 5.0, 5.0))
76
77 for i in range(2):
78 # draw the plot
79 gc.render_component(container)
80
81 #Start a new page for subsequent draw commands.
82 gc.add_page()
83
84 gc.save()
85
86
87 def get_directory(filename):
88 print('Please enter a path in which to place generated plots.')
89 print('Press <ENTER> to generate in the current directory.')
90 path = input('Path: ').strip()
91
92 if len(path) > 0 and not os.path.exists(path):
93 print('The given path does not exist.')
94 sys.exit()
95
96 if not os.path.isabs(path):
97 print('Creating image: ' + os.path.join(os.getcwd(), path, filename))
98
99 else:
100 print('Creating image: ' + os.path.join(path, filename))
101
102 return os.path.join(path, filename)
103
104
105 if __name__ == "__main__":
106 if ETSConfig.kiva_backend == 'svg':
107 # Render the plot as a SVG
108 draw_svg(get_directory('noninteractive.svg'), size=(800, 600))
109 elif ETSConfig.kiva_backend == 'pdf':
110 # Render the plot as a PDF, requires on ReportLab
111 draw_pdf(get_directory('noninteractive.pdf'))
112 else:
113 draw_plot(get_directory('noninteractive.png'), size=(800, 600))
114
115 # EOF
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/demo/noninteractive.py b/examples/demo/noninteractive.py
--- a/examples/demo/noninteractive.py
+++ b/examples/demo/noninteractive.py
@@ -87,7 +87,14 @@
def get_directory(filename):
print('Please enter a path in which to place generated plots.')
print('Press <ENTER> to generate in the current directory.')
- path = input('Path: ').strip()
+
+ # If python 2.7, use raw_input to parse empty string correctly
+ try:
+ get_input = raw_input
+ except NameError:
+ get_input = input
+
+ path = get_input('Path: ').strip()
if len(path) > 0 and not os.path.exists(path):
print('The given path does not exist.')
| {"golden_diff": "diff --git a/examples/demo/noninteractive.py b/examples/demo/noninteractive.py\n--- a/examples/demo/noninteractive.py\n+++ b/examples/demo/noninteractive.py\n@@ -87,7 +87,14 @@\n def get_directory(filename):\n print('Please enter a path in which to place generated plots.')\n print('Press <ENTER> to generate in the current directory.')\n- path = input('Path: ').strip()\n+\n+ # If python 2.7, use raw_input to parse empty string correctly\n+ try:\n+ get_input = raw_input\n+ except NameError:\n+ get_input = input\n+\n+ path = get_input('Path: ').strip()\n \n if len(path) > 0 and not os.path.exists(path):\n print('The given path does not exist.')\n", "issue": "Demo noninteractive.py, Errors after <ENTER>\n**Problem Description**\r\nErrors after <ENTER>\r\n**Reproduction Steps:**\r\n\r\nRun noninteractive.py and hit ENTER\r\n\r\n**Expected behavior:**\r\n```\r\n Please enter a path in which to place generated plots.\r\nPress <ENTER> to generate in the current directory.\r\nPath:\r\nTraceback (most recent call last):\r\n File \"noninteractive.py\", line 113, in <module>\r\n draw_plot(get_directory('noninteractive.png'), size=(800, 600))\r\n File \"noninteractive.py\", line 90, in get_directory\r\n path = input('Path: ').strip()\r\n File \"<string>\", line 0\r\n\r\n ^\r\nSyntaxError: unexpected EOF while parsing\r\n```\r\n**OS, Python version:**\r\nOSX, Python 2.7\r\n\r\nxref: #385 \n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nThis demonstrates how to create a plot offscreen and save it to an image\nfile on disk.\n\"\"\"\nfrom __future__ import print_function\n\n# Standard library imports\nimport os\nimport sys\n\n# Major library imports\nfrom numpy import linspace\nfrom scipy.special import jn\n\n# Enthought library imports\nfrom traits.etsconfig.api import ETSConfig\n\n# Chaco imports\nfrom chaco.api import ArrayPlotData, Plot, PlotGraphicsContext\nfrom chaco.example_support import COLOR_PALETTE\n\nDPI = 72.0\n\n# This is a bit of a hack, to work around the fact that line widths don't scale\n# with the GraphicsContext's CTM.\ndpi_scale = DPI / 72.0\n\n\ndef create_plot():\n numpoints = 100\n low = -5\n high = 15.0\n x = linspace(low, high, numpoints)\n pd = ArrayPlotData(index=x)\n p = Plot(pd, bgcolor=\"oldlace\", padding=50, border_visible=True)\n for i in range(10):\n pd.set_data(\"y\" + str(i), jn(i, x))\n p.plot((\"index\", \"y\" + str(i)), color=tuple(COLOR_PALETTE[i]),\n width=2.0 * dpi_scale)\n p.x_grid.visible = True\n p.x_grid.line_width *= dpi_scale\n p.y_grid.visible = True\n p.y_grid.line_width *= dpi_scale\n p.legend.visible = True\n return p\n\n\ndef draw_plot(filename, size=(800, 600)):\n container = create_plot()\n container.outer_bounds = list(size)\n container.do_layout(force=True)\n gc = PlotGraphicsContext(size, dpi=DPI)\n gc.render_component(container)\n gc.save(filename)\n return\n\n\ndef draw_svg(filename, size=(800, 600)):\n from chaco.svg_graphics_context import SVGGraphicsContext\n container = create_plot()\n container.bounds = list(size)\n container.do_layout(force=True)\n gc = SVGGraphicsContext(size)\n gc.render_component(container)\n gc.save(filename)\n\n\ndef draw_pdf(filename, size=(800, 600)):\n from chaco.pdf_graphics_context import PdfPlotGraphicsContext\n container = create_plot()\n container.outer_bounds = list(size)\n container.do_layout(force=True)\n gc = PdfPlotGraphicsContext(filename=filename,\n dest_box=(0.5, 0.5, 5.0, 5.0))\n\n for i in range(2):\n # draw the plot\n gc.render_component(container)\n\n #Start a new page for subsequent draw commands.\n gc.add_page()\n\n gc.save()\n\n\ndef get_directory(filename):\n print('Please enter a path in which to place generated plots.')\n print('Press <ENTER> to generate in the current directory.')\n path = input('Path: ').strip()\n\n if len(path) > 0 and not os.path.exists(path):\n print('The given path does not exist.')\n sys.exit()\n\n if not os.path.isabs(path):\n print('Creating image: ' + os.path.join(os.getcwd(), path, filename))\n\n else:\n print('Creating image: ' + os.path.join(path, filename))\n\n return os.path.join(path, filename)\n\n\nif __name__ == \"__main__\":\n if ETSConfig.kiva_backend == 'svg':\n # Render the plot as a SVG\n draw_svg(get_directory('noninteractive.svg'), size=(800, 600))\n elif ETSConfig.kiva_backend == 'pdf':\n # Render the plot as a PDF, requires on ReportLab\n draw_pdf(get_directory('noninteractive.pdf'))\n else:\n draw_plot(get_directory('noninteractive.png'), size=(800, 600))\n\n# EOF\n", "path": "examples/demo/noninteractive.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nThis demonstrates how to create a plot offscreen and save it to an image\nfile on disk.\n\"\"\"\nfrom __future__ import print_function\n\n# Standard library imports\nimport os\nimport sys\n\n# Major library imports\nfrom numpy import linspace\nfrom scipy.special import jn\n\n# Enthought library imports\nfrom traits.etsconfig.api import ETSConfig\n\n# Chaco imports\nfrom chaco.api import ArrayPlotData, Plot, PlotGraphicsContext\nfrom chaco.example_support import COLOR_PALETTE\n\nDPI = 72.0\n\n# This is a bit of a hack, to work around the fact that line widths don't scale\n# with the GraphicsContext's CTM.\ndpi_scale = DPI / 72.0\n\n\ndef create_plot():\n numpoints = 100\n low = -5\n high = 15.0\n x = linspace(low, high, numpoints)\n pd = ArrayPlotData(index=x)\n p = Plot(pd, bgcolor=\"oldlace\", padding=50, border_visible=True)\n for i in range(10):\n pd.set_data(\"y\" + str(i), jn(i, x))\n p.plot((\"index\", \"y\" + str(i)), color=tuple(COLOR_PALETTE[i]),\n width=2.0 * dpi_scale)\n p.x_grid.visible = True\n p.x_grid.line_width *= dpi_scale\n p.y_grid.visible = True\n p.y_grid.line_width *= dpi_scale\n p.legend.visible = True\n return p\n\n\ndef draw_plot(filename, size=(800, 600)):\n container = create_plot()\n container.outer_bounds = list(size)\n container.do_layout(force=True)\n gc = PlotGraphicsContext(size, dpi=DPI)\n gc.render_component(container)\n gc.save(filename)\n return\n\n\ndef draw_svg(filename, size=(800, 600)):\n from chaco.svg_graphics_context import SVGGraphicsContext\n container = create_plot()\n container.bounds = list(size)\n container.do_layout(force=True)\n gc = SVGGraphicsContext(size)\n gc.render_component(container)\n gc.save(filename)\n\n\ndef draw_pdf(filename, size=(800, 600)):\n from chaco.pdf_graphics_context import PdfPlotGraphicsContext\n container = create_plot()\n container.outer_bounds = list(size)\n container.do_layout(force=True)\n gc = PdfPlotGraphicsContext(filename=filename,\n dest_box=(0.5, 0.5, 5.0, 5.0))\n\n for i in range(2):\n # draw the plot\n gc.render_component(container)\n\n #Start a new page for subsequent draw commands.\n gc.add_page()\n\n gc.save()\n\n\ndef get_directory(filename):\n print('Please enter a path in which to place generated plots.')\n print('Press <ENTER> to generate in the current directory.')\n\n # If python 2.7, use raw_input to parse empty string correctly\n try:\n get_input = raw_input\n except NameError:\n get_input = input\n\n path = get_input('Path: ').strip()\n\n if len(path) > 0 and not os.path.exists(path):\n print('The given path does not exist.')\n sys.exit()\n\n if not os.path.isabs(path):\n print('Creating image: ' + os.path.join(os.getcwd(), path, filename))\n\n else:\n print('Creating image: ' + os.path.join(path, filename))\n\n return os.path.join(path, filename)\n\n\nif __name__ == \"__main__\":\n if ETSConfig.kiva_backend == 'svg':\n # Render the plot as a SVG\n draw_svg(get_directory('noninteractive.svg'), size=(800, 600))\n elif ETSConfig.kiva_backend == 'pdf':\n # Render the plot as a PDF, requires on ReportLab\n draw_pdf(get_directory('noninteractive.pdf'))\n else:\n draw_plot(get_directory('noninteractive.png'), size=(800, 600))\n\n# EOF\n", "path": "examples/demo/noninteractive.py"}]} | 1,535 | 173 |
gh_patches_debug_172 | rasdani/github-patches | git_diff | ManageIQ__integration_tests-471 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Paginator returns wrong rec_end() result
When record is last one on it's own on the last page, rec_end() incorrectly shows 1, instead of rec_total() value.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cfme/web_ui/paginator.py`
Content:
```
1 """A set of functions for dealing with the paginator controls."""
2 import cfme.fixtures.pytest_selenium as sel
3 import re
4
5 _locator = '(//div[@id="paging_div"] | //div[@id="records_div"])'
6 _next = '//img[@alt="Next"]'
7 _previous = '//img[@alt="Previous"]'
8 _first = '//img[@alt="First"]'
9 _last = '//img[@alt="Last"]'
10 _num_results = '//select[@id="ppsetting" or @id="perpage_setting1"]'
11 _sort_by = '//select[@id="sort_choice"]'
12 _page_cell = '//td//td[contains(., " of ")]'
13 _check_all = '//input[@id="masterToggle"]'
14
15
16 def _page_nums():
17 return sel.element(_locator + _page_cell).text
18
19
20 def check_all():
21 """ Returns the Check All locator."""
22 return sel.element(_locator + _check_all)
23
24
25 def next():
26 """ Returns the Next button locator."""
27 btn = sel.element(_locator + _next)
28 return btn
29
30
31 def previous():
32 """ Returns the Previous button locator."""
33 btn = sel.element(_locator + _previous)
34 return btn
35
36
37 def first():
38 """ Returns the First button locator."""
39 btn = sel.element(_locator + _first)
40 return btn
41
42
43 def last():
44 """ Returns the Last button locator."""
45 btn = sel.element(_locator + _last)
46 return btn
47
48
49 def results_per_page(num):
50 """ Changes the number of results on a page.
51
52 Args:
53 num: A string, or a tuple of (type, value).
54 """
55 select = sel.element(_locator + _num_results)
56 sel.select(select, num)
57
58
59 def sort_by(sort):
60 """ Changes the sort by field.
61
62 Args:
63 num: A string, or a tuple of (type, value).
64 """
65 select = sel.element(_locator + _sort_by)
66 sel.select(select, sort)
67
68
69 def rec_offset():
70 """ Returns the first record offset."""
71 offset = re.search('\((Item|Items)*\s*(\d+)', _page_nums())
72 return offset.groups()[1]
73
74
75 def rec_end():
76 """ Returns the record set index."""
77 offset = re.search('-(\d+)', _page_nums())
78 if offset:
79 return offset.groups()[0]
80 else:
81 return '1'
82
83
84 def rec_total():
85 """ Returns the total number of records."""
86 offset = re.search('(\d+)\)', _page_nums())
87 return offset.groups()[0]
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cfme/web_ui/paginator.py b/cfme/web_ui/paginator.py
--- a/cfme/web_ui/paginator.py
+++ b/cfme/web_ui/paginator.py
@@ -78,7 +78,7 @@
if offset:
return offset.groups()[0]
else:
- return '1'
+ return rec_total()
def rec_total():
| {"golden_diff": "diff --git a/cfme/web_ui/paginator.py b/cfme/web_ui/paginator.py\n--- a/cfme/web_ui/paginator.py\n+++ b/cfme/web_ui/paginator.py\n@@ -78,7 +78,7 @@\n if offset:\n return offset.groups()[0]\n else:\n- return '1'\n+ return rec_total()\n \n \n def rec_total():\n", "issue": "Paginator returns wrong rec_end() result\nWhen record is last one on it's own on the last page, rec_end() incorrectly shows 1, instead of rec_total() value.\n\n", "before_files": [{"content": "\"\"\"A set of functions for dealing with the paginator controls.\"\"\"\nimport cfme.fixtures.pytest_selenium as sel\nimport re\n\n_locator = '(//div[@id=\"paging_div\"] | //div[@id=\"records_div\"])'\n_next = '//img[@alt=\"Next\"]'\n_previous = '//img[@alt=\"Previous\"]'\n_first = '//img[@alt=\"First\"]'\n_last = '//img[@alt=\"Last\"]'\n_num_results = '//select[@id=\"ppsetting\" or @id=\"perpage_setting1\"]'\n_sort_by = '//select[@id=\"sort_choice\"]'\n_page_cell = '//td//td[contains(., \" of \")]'\n_check_all = '//input[@id=\"masterToggle\"]'\n\n\ndef _page_nums():\n return sel.element(_locator + _page_cell).text\n\n\ndef check_all():\n \"\"\" Returns the Check All locator.\"\"\"\n return sel.element(_locator + _check_all)\n\n\ndef next():\n \"\"\" Returns the Next button locator.\"\"\"\n btn = sel.element(_locator + _next)\n return btn\n\n\ndef previous():\n \"\"\" Returns the Previous button locator.\"\"\"\n btn = sel.element(_locator + _previous)\n return btn\n\n\ndef first():\n \"\"\" Returns the First button locator.\"\"\"\n btn = sel.element(_locator + _first)\n return btn\n\n\ndef last():\n \"\"\" Returns the Last button locator.\"\"\"\n btn = sel.element(_locator + _last)\n return btn\n\n\ndef results_per_page(num):\n \"\"\" Changes the number of results on a page.\n\n Args:\n num: A string, or a tuple of (type, value).\n \"\"\"\n select = sel.element(_locator + _num_results)\n sel.select(select, num)\n\n\ndef sort_by(sort):\n \"\"\" Changes the sort by field.\n\n Args:\n num: A string, or a tuple of (type, value).\n \"\"\"\n select = sel.element(_locator + _sort_by)\n sel.select(select, sort)\n\n\ndef rec_offset():\n \"\"\" Returns the first record offset.\"\"\"\n offset = re.search('\\((Item|Items)*\\s*(\\d+)', _page_nums())\n return offset.groups()[1]\n\n\ndef rec_end():\n \"\"\" Returns the record set index.\"\"\"\n offset = re.search('-(\\d+)', _page_nums())\n if offset:\n return offset.groups()[0]\n else:\n return '1'\n\n\ndef rec_total():\n \"\"\" Returns the total number of records.\"\"\"\n offset = re.search('(\\d+)\\)', _page_nums())\n return offset.groups()[0]\n", "path": "cfme/web_ui/paginator.py"}], "after_files": [{"content": "\"\"\"A set of functions for dealing with the paginator controls.\"\"\"\nimport cfme.fixtures.pytest_selenium as sel\nimport re\n\n_locator = '(//div[@id=\"paging_div\"] | //div[@id=\"records_div\"])'\n_next = '//img[@alt=\"Next\"]'\n_previous = '//img[@alt=\"Previous\"]'\n_first = '//img[@alt=\"First\"]'\n_last = '//img[@alt=\"Last\"]'\n_num_results = '//select[@id=\"ppsetting\" or @id=\"perpage_setting1\"]'\n_sort_by = '//select[@id=\"sort_choice\"]'\n_page_cell = '//td//td[contains(., \" of \")]'\n_check_all = '//input[@id=\"masterToggle\"]'\n\n\ndef _page_nums():\n return sel.element(_locator + _page_cell).text\n\n\ndef check_all():\n \"\"\" Returns the Check All locator.\"\"\"\n return sel.element(_locator + _check_all)\n\n\ndef next():\n \"\"\" Returns the Next button locator.\"\"\"\n btn = sel.element(_locator + _next)\n return btn\n\n\ndef previous():\n \"\"\" Returns the Previous button locator.\"\"\"\n btn = sel.element(_locator + _previous)\n return btn\n\n\ndef first():\n \"\"\" Returns the First button locator.\"\"\"\n btn = sel.element(_locator + _first)\n return btn\n\n\ndef last():\n \"\"\" Returns the Last button locator.\"\"\"\n btn = sel.element(_locator + _last)\n return btn\n\n\ndef results_per_page(num):\n \"\"\" Changes the number of results on a page.\n\n Args:\n num: A string, or a tuple of (type, value).\n \"\"\"\n select = sel.element(_locator + _num_results)\n sel.select(select, num)\n\n\ndef sort_by(sort):\n \"\"\" Changes the sort by field.\n\n Args:\n num: A string, or a tuple of (type, value).\n \"\"\"\n select = sel.element(_locator + _sort_by)\n sel.select(select, sort)\n\n\ndef rec_offset():\n \"\"\" Returns the first record offset.\"\"\"\n offset = re.search('\\((Item|Items)*\\s*(\\d+)', _page_nums())\n return offset.groups()[1]\n\n\ndef rec_end():\n \"\"\" Returns the record set index.\"\"\"\n offset = re.search('-(\\d+)', _page_nums())\n if offset:\n return offset.groups()[0]\n else:\n return rec_total()\n\n\ndef rec_total():\n \"\"\" Returns the total number of records.\"\"\"\n offset = re.search('(\\d+)\\)', _page_nums())\n return offset.groups()[0]\n", "path": "cfme/web_ui/paginator.py"}]} | 1,021 | 87 |
gh_patches_debug_37039 | rasdani/github-patches | git_diff | microsoft__nni-1640 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nnictl package install not working for venv
It seems that `nnictl package install` makes the package going to user level, which makes it not accessible from a venv.
Warning:
```
Installing collected packages: ConfigSpaceNNI, docutils, pynisher, pyrfr, sphinxcontrib-htmlhelp, sphinxcontrib-qthelp, sphinxcontrib-devhelp, imagesize, Jinja2, sphinxcontrib-applehelp, alabaster, sphinxcontrib-jsmath, Pygments, snowballstemmer, sphinxcontrib-serializinghtml, packaging, sphinx, sphinx-rtd-theme, smac
Running setup.py install for ConfigSpaceNNI ... done
The script pygmentize is installed in '/home/zhangyuge/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
Running setup.py install for snowballstemmer ... done
The scripts sphinx-apidoc, sphinx-autogen, sphinx-build and sphinx-quickstart are installed in '/home/zhangyuge/.local/bin' which is not on PATH.
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
Running setup.py install for smac ... done
```
Corresponding code:
https://github.com/microsoft/nni/blob/958efabf066b975a5cdcde293c9de33ec1430cb2/tools/nni_cmd/command_utils.py#L49
We could consider removing this `--user`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/nni_cmd/command_utils.py`
Content:
```
1 from subprocess import call, check_output
2 import sys
3 import os
4 import signal
5 import psutil
6 from .common_utils import print_error, print_normal, print_warning
7
8 def check_output_command(file_path, head=None, tail=None):
9 '''call check_output command to read content from a file'''
10 if os.path.exists(file_path):
11 if sys.platform == 'win32':
12 cmds = ['powershell.exe', 'type', file_path]
13 if head:
14 cmds += ['|', 'select', '-first', str(head)]
15 elif tail:
16 cmds += ['|', 'select', '-last', str(tail)]
17 return check_output(cmds, shell=True).decode('utf-8')
18 else:
19 cmds = ['cat', file_path]
20 if head:
21 cmds = ['head', '-' + str(head), file_path]
22 elif tail:
23 cmds = ['tail', '-' + str(tail), file_path]
24 return check_output(cmds, shell=False).decode('utf-8')
25 else:
26 print_error('{0} does not exist!'.format(file_path))
27 exit(1)
28
29 def kill_command(pid):
30 '''kill command'''
31 if sys.platform == 'win32':
32 process = psutil.Process(pid=pid)
33 process.send_signal(signal.CTRL_BREAK_EVENT)
34 else:
35 cmds = ['kill', str(pid)]
36 call(cmds)
37
38 def install_package_command(package_name):
39 '''install python package from pip'''
40 #TODO refactor python logic
41 if sys.platform == "win32":
42 cmds = 'python -m pip install --user {0}'.format(package_name)
43 else:
44 cmds = 'python3 -m pip install --user {0}'.format(package_name)
45 call(cmds, shell=True)
46
47 def install_requirements_command(requirements_path):
48 '''install requirements.txt'''
49 cmds = 'cd ' + requirements_path + ' && {0} -m pip install --user -r requirements.txt'
50 #TODO refactor python logic
51 if sys.platform == "win32":
52 cmds = cmds.format('python')
53 else:
54 cmds = cmds.format('python3')
55 call(cmds, shell=True)
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/nni_cmd/command_utils.py b/tools/nni_cmd/command_utils.py
--- a/tools/nni_cmd/command_utils.py
+++ b/tools/nni_cmd/command_utils.py
@@ -3,10 +3,11 @@
import os
import signal
import psutil
-from .common_utils import print_error, print_normal, print_warning
+from .common_utils import print_error, print_normal, print_warning
+
def check_output_command(file_path, head=None, tail=None):
- '''call check_output command to read content from a file'''
+ """call check_output command to read content from a file"""
if os.path.exists(file_path):
if sys.platform == 'win32':
cmds = ['powershell.exe', 'type', file_path]
@@ -26,8 +27,9 @@
print_error('{0} does not exist!'.format(file_path))
exit(1)
+
def kill_command(pid):
- '''kill command'''
+ """kill command"""
if sys.platform == 'win32':
process = psutil.Process(pid=pid)
process.send_signal(signal.CTRL_BREAK_EVENT)
@@ -35,21 +37,35 @@
cmds = ['kill', str(pid)]
call(cmds)
+
def install_package_command(package_name):
- '''install python package from pip'''
- #TODO refactor python logic
- if sys.platform == "win32":
- cmds = 'python -m pip install --user {0}'.format(package_name)
- else:
- cmds = 'python3 -m pip install --user {0}'.format(package_name)
- call(cmds, shell=True)
+ """
+ Install python package from pip.
+
+ Parameters
+ ----------
+ package_name: str
+ The name of package to be installed.
+ """
+ call(_get_pip_install() + [package_name], shell=False)
+
def install_requirements_command(requirements_path):
- '''install requirements.txt'''
- cmds = 'cd ' + requirements_path + ' && {0} -m pip install --user -r requirements.txt'
- #TODO refactor python logic
- if sys.platform == "win32":
- cmds = cmds.format('python')
- else:
- cmds = cmds.format('python3')
- call(cmds, shell=True)
+ """
+ Install packages from `requirements.txt` in `requirements_path`.
+
+ Parameters
+ ----------
+ requirements_path: str
+ Path to the directory that contains `requirements.txt`.
+ """
+ call(_get_pip_install() + ["-r", os.path.join(requirements_path, "requirements.txt")], shell=False)
+
+
+def _get_pip_install():
+ python = "python" if sys.platform == "win32" else "python3"
+ ret = [python, "-m", "pip", "install"]
+ if "CONDA_DEFAULT_ENV" not in os.environ and "VIRTUAL_ENV" not in os.environ and \
+ (sys.platform != "win32" and os.getuid() != 0): # on unix and not running in root
+ ret.append("--user") # not in virtualenv or conda
+ return ret
| {"golden_diff": "diff --git a/tools/nni_cmd/command_utils.py b/tools/nni_cmd/command_utils.py\n--- a/tools/nni_cmd/command_utils.py\n+++ b/tools/nni_cmd/command_utils.py\n@@ -3,10 +3,11 @@\n import os\n import signal\n import psutil\n-from .common_utils import print_error, print_normal, print_warning\n+from .common_utils import print_error, print_normal, print_warning\n+\n \n def check_output_command(file_path, head=None, tail=None):\n- '''call check_output command to read content from a file'''\n+ \"\"\"call check_output command to read content from a file\"\"\"\n if os.path.exists(file_path):\n if sys.platform == 'win32':\n cmds = ['powershell.exe', 'type', file_path]\n@@ -26,8 +27,9 @@\n print_error('{0} does not exist!'.format(file_path))\n exit(1)\n \n+\n def kill_command(pid):\n- '''kill command'''\n+ \"\"\"kill command\"\"\"\n if sys.platform == 'win32':\n process = psutil.Process(pid=pid)\n process.send_signal(signal.CTRL_BREAK_EVENT)\n@@ -35,21 +37,35 @@\n cmds = ['kill', str(pid)]\n call(cmds)\n \n+\n def install_package_command(package_name):\n- '''install python package from pip'''\n- #TODO refactor python logic\n- if sys.platform == \"win32\":\n- cmds = 'python -m pip install --user {0}'.format(package_name)\n- else:\n- cmds = 'python3 -m pip install --user {0}'.format(package_name)\n- call(cmds, shell=True)\n+ \"\"\"\n+ Install python package from pip.\n+\n+ Parameters\n+ ----------\n+ package_name: str\n+ The name of package to be installed.\n+ \"\"\"\n+ call(_get_pip_install() + [package_name], shell=False)\n+\n \n def install_requirements_command(requirements_path):\n- '''install requirements.txt'''\n- cmds = 'cd ' + requirements_path + ' && {0} -m pip install --user -r requirements.txt'\n- #TODO refactor python logic\n- if sys.platform == \"win32\":\n- cmds = cmds.format('python')\n- else:\n- cmds = cmds.format('python3')\n- call(cmds, shell=True)\n+ \"\"\"\n+ Install packages from `requirements.txt` in `requirements_path`.\n+\n+ Parameters\n+ ----------\n+ requirements_path: str\n+ Path to the directory that contains `requirements.txt`.\n+ \"\"\"\n+ call(_get_pip_install() + [\"-r\", os.path.join(requirements_path, \"requirements.txt\")], shell=False)\n+\n+\n+def _get_pip_install():\n+ python = \"python\" if sys.platform == \"win32\" else \"python3\"\n+ ret = [python, \"-m\", \"pip\", \"install\"]\n+ if \"CONDA_DEFAULT_ENV\" not in os.environ and \"VIRTUAL_ENV\" not in os.environ and \\\n+ (sys.platform != \"win32\" and os.getuid() != 0): # on unix and not running in root\n+ ret.append(\"--user\") # not in virtualenv or conda\n+ return ret\n", "issue": "nnictl package install not working for venv\nIt seems that `nnictl package install` makes the package going to user level, which makes it not accessible from a venv.\r\n\r\nWarning:\r\n\r\n```\r\nInstalling collected packages: ConfigSpaceNNI, docutils, pynisher, pyrfr, sphinxcontrib-htmlhelp, sphinxcontrib-qthelp, sphinxcontrib-devhelp, imagesize, Jinja2, sphinxcontrib-applehelp, alabaster, sphinxcontrib-jsmath, Pygments, snowballstemmer, sphinxcontrib-serializinghtml, packaging, sphinx, sphinx-rtd-theme, smac\r\n Running setup.py install for ConfigSpaceNNI ... done\r\n The script pygmentize is installed in '/home/zhangyuge/.local/bin' which is not on PATH.\r\n Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\r\n Running setup.py install for snowballstemmer ... done\r\n The scripts sphinx-apidoc, sphinx-autogen, sphinx-build and sphinx-quickstart are installed in '/home/zhangyuge/.local/bin' which is not on PATH.\r\n Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\r\n Running setup.py install for smac ... done\r\n```\r\n\r\nCorresponding code:\r\n\r\nhttps://github.com/microsoft/nni/blob/958efabf066b975a5cdcde293c9de33ec1430cb2/tools/nni_cmd/command_utils.py#L49\r\n\r\nWe could consider removing this `--user`.\n", "before_files": [{"content": "from subprocess import call, check_output\nimport sys\nimport os\nimport signal\nimport psutil\nfrom .common_utils import print_error, print_normal, print_warning\n\ndef check_output_command(file_path, head=None, tail=None):\n '''call check_output command to read content from a file'''\n if os.path.exists(file_path):\n if sys.platform == 'win32':\n cmds = ['powershell.exe', 'type', file_path]\n if head:\n cmds += ['|', 'select', '-first', str(head)]\n elif tail:\n cmds += ['|', 'select', '-last', str(tail)]\n return check_output(cmds, shell=True).decode('utf-8')\n else:\n cmds = ['cat', file_path]\n if head:\n cmds = ['head', '-' + str(head), file_path]\n elif tail:\n cmds = ['tail', '-' + str(tail), file_path]\n return check_output(cmds, shell=False).decode('utf-8')\n else:\n print_error('{0} does not exist!'.format(file_path))\n exit(1)\n\ndef kill_command(pid):\n '''kill command'''\n if sys.platform == 'win32':\n process = psutil.Process(pid=pid)\n process.send_signal(signal.CTRL_BREAK_EVENT)\n else:\n cmds = ['kill', str(pid)]\n call(cmds)\n\ndef install_package_command(package_name):\n '''install python package from pip'''\n #TODO refactor python logic\n if sys.platform == \"win32\":\n cmds = 'python -m pip install --user {0}'.format(package_name)\n else:\n cmds = 'python3 -m pip install --user {0}'.format(package_name)\n call(cmds, shell=True)\n\ndef install_requirements_command(requirements_path):\n '''install requirements.txt'''\n cmds = 'cd ' + requirements_path + ' && {0} -m pip install --user -r requirements.txt'\n #TODO refactor python logic\n if sys.platform == \"win32\":\n cmds = cmds.format('python')\n else:\n cmds = cmds.format('python3')\n call(cmds, shell=True)\n", "path": "tools/nni_cmd/command_utils.py"}], "after_files": [{"content": "from subprocess import call, check_output\nimport sys\nimport os\nimport signal\nimport psutil\nfrom .common_utils import print_error, print_normal, print_warning\n\n\ndef check_output_command(file_path, head=None, tail=None):\n \"\"\"call check_output command to read content from a file\"\"\"\n if os.path.exists(file_path):\n if sys.platform == 'win32':\n cmds = ['powershell.exe', 'type', file_path]\n if head:\n cmds += ['|', 'select', '-first', str(head)]\n elif tail:\n cmds += ['|', 'select', '-last', str(tail)]\n return check_output(cmds, shell=True).decode('utf-8')\n else:\n cmds = ['cat', file_path]\n if head:\n cmds = ['head', '-' + str(head), file_path]\n elif tail:\n cmds = ['tail', '-' + str(tail), file_path]\n return check_output(cmds, shell=False).decode('utf-8')\n else:\n print_error('{0} does not exist!'.format(file_path))\n exit(1)\n\n\ndef kill_command(pid):\n \"\"\"kill command\"\"\"\n if sys.platform == 'win32':\n process = psutil.Process(pid=pid)\n process.send_signal(signal.CTRL_BREAK_EVENT)\n else:\n cmds = ['kill', str(pid)]\n call(cmds)\n\n\ndef install_package_command(package_name):\n \"\"\"\n Install python package from pip.\n\n Parameters\n ----------\n package_name: str\n The name of package to be installed.\n \"\"\"\n call(_get_pip_install() + [package_name], shell=False)\n\n\ndef install_requirements_command(requirements_path):\n \"\"\"\n Install packages from `requirements.txt` in `requirements_path`.\n\n Parameters\n ----------\n requirements_path: str\n Path to the directory that contains `requirements.txt`.\n \"\"\"\n call(_get_pip_install() + [\"-r\", os.path.join(requirements_path, \"requirements.txt\")], shell=False)\n\n\ndef _get_pip_install():\n python = \"python\" if sys.platform == \"win32\" else \"python3\"\n ret = [python, \"-m\", \"pip\", \"install\"]\n if \"CONDA_DEFAULT_ENV\" not in os.environ and \"VIRTUAL_ENV\" not in os.environ and \\\n (sys.platform != \"win32\" and os.getuid() != 0): # on unix and not running in root\n ret.append(\"--user\") # not in virtualenv or conda\n return ret\n", "path": "tools/nni_cmd/command_utils.py"}]} | 1,190 | 719 |
gh_patches_debug_5096 | rasdani/github-patches | git_diff | holoviz__holoviews-4937 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stacked area only show last label
@Pill-GZ found a bug (and a solution!) for the legend of a stacked area plot only showing the last label. See https://discourse.holoviz.org/t/how-to-add-legend-for-stacked-area-plots/2193
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holoviews/element/chart.py`
Content:
```
1 import numpy as np
2 import param
3
4 from ..core import util
5 from ..core import Dimension, Dataset, Element2D, NdOverlay, Overlay
6 from ..core.dimension import process_dimensions
7 from ..core.data import GridInterface
8 from .geom import Rectangles, Points, VectorField # noqa: backward compatible import
9 from .selection import Selection1DExpr, Selection2DExpr
10
11
12 class Chart(Dataset, Element2D):
13 """
14 A Chart is an abstract baseclass for elements representing one or
15 more independent and dependent variables defining a 1D coordinate
16 system with associated values. The independent variables or key
17 dimensions map onto the x-axis while the dependent variables are
18 usually mapped to the location, height or spread along the
19 y-axis. Any number of additional value dimensions may be
20 associated with a Chart.
21
22 If a chart's independent variable (or key dimension) is numeric
23 the chart will represent a discretely sampled version of the
24 underlying continuously sampled 1D space. Therefore indexing along
25 this variable will automatically snap to the closest coordinate.
26
27 Since a Chart is a subclass of a Dataset it supports the full set
28 of data interfaces but usually each dimension of a chart represents
29 a column stored in a dictionary, array or DataFrame.
30 """
31
32 kdims = param.List(default=[Dimension('x')], bounds=(1,2), doc="""
33 The key dimension(s) of a Chart represent the independent
34 variable(s).""")
35
36 group = param.String(default='Chart', constant=True)
37
38 vdims = param.List(default=[Dimension('y')], bounds=(1, None), doc="""
39 The value dimensions of the Chart, usually corresponding to a
40 number of dependent variables.""")
41
42 # Enables adding index if 1D array like data is supplied
43 _auto_indexable_1d = True
44
45 _max_kdim_count = 1 # Remove once kdims has bounds=(1,1) instead of warning
46 __abstract = True
47
48 def __init__(self, data, kdims=None, vdims=None, **params):
49 params.update(process_dimensions(kdims, vdims))
50 if len(params.get('kdims', [])) == self._max_kdim_count + 1:
51 self.param.warning('Chart elements should only be supplied a single kdim')
52 super(Chart, self).__init__(data, **params)
53
54 def __getitem__(self, index):
55 return super(Chart, self).__getitem__(index)
56
57
58 class Scatter(Selection2DExpr, Chart):
59 """
60 Scatter is a Chart element representing a set of points in a 1D
61 coordinate system where the key dimension maps to the points
62 location along the x-axis while the first value dimension
63 represents the location of the point along the y-axis.
64 """
65
66 group = param.String(default='Scatter', constant=True)
67
68
69 class Curve(Selection1DExpr, Chart):
70 """
71 Curve is a Chart element representing a line in a 1D coordinate
72 system where the key dimension maps on the line x-coordinate and
73 the first value dimension represents the height of the line along
74 the y-axis.
75 """
76
77 group = param.String(default='Curve', constant=True)
78
79
80 class ErrorBars(Selection1DExpr, Chart):
81 """
82 ErrorBars is a Chart element representing error bars in a 1D
83 coordinate system where the key dimension corresponds to the
84 location along the x-axis and the first value dimension
85 corresponds to the location along the y-axis and one or two
86 extra value dimensions corresponding to the symmetric or
87 asymetric errors either along x-axis or y-axis. If two value
88 dimensions are given, then the last value dimension will be
89 taken as symmetric errors. If three value dimensions are given
90 then the last two value dimensions will be taken as negative and
91 positive errors. By default the errors are defined along y-axis.
92 A parameter `horizontal`, when set `True`, will define the errors
93 along the x-axis.
94 """
95
96 group = param.String(default='ErrorBars', constant=True, doc="""
97 A string describing the quantity measured by the ErrorBars
98 object.""")
99
100 vdims = param.List(default=[Dimension('y'), Dimension('yerror')],
101 bounds=(1, None), constant=True)
102
103 horizontal = param.Boolean(default=False, doc="""
104 Whether the errors are along y-axis (vertical) or x-axis.""")
105
106 def range(self, dim, data_range=True, dimension_range=True):
107 """Return the lower and upper bounds of values along dimension.
108
109 Range of the y-dimension includes the symmetric or assymetric
110 error.
111
112 Args:
113 dimension: The dimension to compute the range on.
114 data_range (bool): Compute range from data values
115 dimension_range (bool): Include Dimension ranges
116 Whether to include Dimension range and soft_range
117 in range calculation
118
119 Returns:
120 Tuple containing the lower and upper bound
121 """
122 dim_with_err = 0 if self.horizontal else 1
123 didx = self.get_dimension_index(dim)
124 dim = self.get_dimension(dim)
125 if didx == dim_with_err and data_range and len(self):
126 mean = self.dimension_values(didx)
127 neg_error = self.dimension_values(2)
128 if len(self.dimensions()) > 3:
129 pos_error = self.dimension_values(3)
130 else:
131 pos_error = neg_error
132 lower = np.nanmin(mean-neg_error)
133 upper = np.nanmax(mean+pos_error)
134 if not dimension_range:
135 return (lower, upper)
136 return util.dimension_range(lower, upper, dim.range, dim.soft_range)
137 return super(ErrorBars, self).range(dim, data_range)
138
139
140
141 class Spread(ErrorBars):
142 """
143 Spread is a Chart element representing a spread of values or
144 confidence band in a 1D coordinate system. The key dimension(s)
145 corresponds to the location along the x-axis and the value
146 dimensions define the location along the y-axis as well as the
147 symmetric or assymetric spread.
148 """
149
150 group = param.String(default='Spread', constant=True)
151
152
153
154 class Bars(Selection1DExpr, Chart):
155 """
156 Bars is a Chart element representing categorical observations
157 using the height of rectangular bars. The key dimensions represent
158 the categorical groupings of the data, but may also be used to
159 stack the bars, while the first value dimension represents the
160 height of each bar.
161 """
162
163 group = param.String(default='Bars', constant=True)
164
165 kdims = param.List(default=[Dimension('x')], bounds=(1,3))
166
167 _max_kdim_count = 3
168
169
170 class Histogram(Selection1DExpr, Chart):
171 """
172 Histogram is a Chart element representing a number of bins in a 1D
173 coordinate system. The key dimension represents the binned values,
174 which may be declared as bin edges or bin centers, while the value
175 dimensions usually defines a count, frequency or density associated
176 with each bin.
177 """
178
179 datatype = param.List(default=['grid'])
180
181 group = param.String(default='Histogram', constant=True)
182
183 kdims = param.List(default=[Dimension('x')], bounds=(1,1), doc="""
184 Dimensions on Element2Ds determine the number of indexable
185 dimensions.""")
186
187 vdims = param.List(default=[Dimension('Frequency')], bounds=(1, None))
188
189 _binned = True
190
191 def __init__(self, data, edges=None, **params):
192 if data is None:
193 data = []
194 if edges is not None:
195 self.param.warning(
196 "Histogram edges should be supplied as a tuple "
197 "along with the values, passing the edges will "
198 "be deprecated in holoviews 2.0.")
199 data = (edges, data)
200 elif isinstance(data, tuple) and len(data) == 2 and len(data[0])+1 == len(data[1]):
201 data = data[::-1]
202
203 super(Histogram, self).__init__(data, **params)
204 def __setstate__(self, state):
205 """
206 Ensures old-style Histogram types without an interface can be unpickled.
207
208 Note: Deprecate as part of 2.0
209 """
210 if 'interface' not in state:
211 self.interface = GridInterface
212 x, y = state['_kdims_param_value'][0], state['_vdims_param_value'][0]
213 state['data'] = {x.name: state['data'][1], y.name: state['data'][0]}
214 super(Dataset, self).__setstate__(state)
215
216
217 @property
218 def values(self):
219 "Property to access the Histogram values provided for backward compatibility"
220 self.param.warning('Histogram.values is deprecated in favor of '
221 'common dimension_values method.')
222 return self.dimension_values(1)
223
224
225 @property
226 def edges(self):
227 "Property to access the Histogram edges provided for backward compatibility"
228 return self.interface.coords(self, self.kdims[0], edges=True)
229
230
231 class Spikes(Selection1DExpr, Chart):
232 """
233 Spikes is a Chart element which represents a number of discrete
234 spikes, events or observations in a 1D coordinate system. The key
235 dimension therefore represents the position of each spike along
236 the x-axis while the first value dimension, if defined, controls
237 the height along the y-axis. It may therefore be used to visualize
238 the distribution of discrete events, representing a rug plot, or
239 to draw the strength some signal.
240 """
241
242 group = param.String(default='Spikes', constant=True)
243
244 kdims = param.List(default=[Dimension('x')], bounds=(1, 1))
245
246 vdims = param.List(default=[])
247
248 _auto_indexable_1d = False
249
250
251
252 class Area(Curve):
253 """
254 Area is a Chart element representing the area under a curve or
255 between two curves in a 1D coordinate system. The key dimension
256 represents the location of each coordinate along the x-axis, while
257 the value dimension(s) represent the height of the area or the
258 lower and upper bounds of the area between curves.
259
260 Multiple areas may be stacked by overlaying them an passing them
261 to the stack method.
262 """
263
264 group = param.String(default='Area', constant=True)
265
266 @classmethod
267 def stack(cls, areas, baseline_name='Baseline'):
268 """
269 Stacks an (Nd)Overlay of Area or Curve Elements by offsetting
270 their baselines. To stack a HoloMap or DynamicMap use the map
271 method.
272 """
273 if not len(areas):
274 return areas
275 is_overlay = isinstance(areas, Overlay)
276 if is_overlay:
277 areas = NdOverlay({i: el for i, el in enumerate(areas)})
278 df = areas.dframe(multi_index=True)
279 levels = list(range(areas.ndims))
280 vdim = areas.last.vdims[0]
281 vdims = [vdim, baseline_name]
282 baseline = None
283 stacked = areas.clone(shared_data=False)
284 for key, sdf in df.groupby(level=levels):
285 sdf = sdf.droplevel(levels).reindex(index=df.index.levels[-1], fill_value=0)
286 if baseline is None:
287 sdf[baseline_name] = 0
288 else:
289 sdf[vdim.name] = sdf[vdim.name] + baseline
290 sdf[baseline_name] = baseline
291 baseline = sdf[vdim.name]
292 stacked[key] = areas.last.clone(sdf, vdims=vdims)
293 return Overlay(stacked.values()) if is_overlay else stacked
294
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/holoviews/element/chart.py b/holoviews/element/chart.py
--- a/holoviews/element/chart.py
+++ b/holoviews/element/chart.py
@@ -289,5 +289,5 @@
sdf[vdim.name] = sdf[vdim.name] + baseline
sdf[baseline_name] = baseline
baseline = sdf[vdim.name]
- stacked[key] = areas.last.clone(sdf, vdims=vdims)
+ stacked[key] = areas[key].clone(sdf, vdims=vdims)
return Overlay(stacked.values()) if is_overlay else stacked
| {"golden_diff": "diff --git a/holoviews/element/chart.py b/holoviews/element/chart.py\n--- a/holoviews/element/chart.py\n+++ b/holoviews/element/chart.py\n@@ -289,5 +289,5 @@\n sdf[vdim.name] = sdf[vdim.name] + baseline\n sdf[baseline_name] = baseline\n baseline = sdf[vdim.name]\n- stacked[key] = areas.last.clone(sdf, vdims=vdims)\n+ stacked[key] = areas[key].clone(sdf, vdims=vdims)\n return Overlay(stacked.values()) if is_overlay else stacked\n", "issue": "Stacked area only show last label\n@Pill-GZ found a bug (and a solution!) for the legend of a stacked area plot only showing the last label. See https://discourse.holoviz.org/t/how-to-add-legend-for-stacked-area-plots/2193\r\n\n", "before_files": [{"content": "import numpy as np\nimport param\n\nfrom ..core import util\nfrom ..core import Dimension, Dataset, Element2D, NdOverlay, Overlay\nfrom ..core.dimension import process_dimensions\nfrom ..core.data import GridInterface\nfrom .geom import Rectangles, Points, VectorField # noqa: backward compatible import\nfrom .selection import Selection1DExpr, Selection2DExpr\n\n\nclass Chart(Dataset, Element2D):\n \"\"\"\n A Chart is an abstract baseclass for elements representing one or\n more independent and dependent variables defining a 1D coordinate\n system with associated values. The independent variables or key\n dimensions map onto the x-axis while the dependent variables are\n usually mapped to the location, height or spread along the\n y-axis. Any number of additional value dimensions may be\n associated with a Chart.\n\n If a chart's independent variable (or key dimension) is numeric\n the chart will represent a discretely sampled version of the\n underlying continuously sampled 1D space. Therefore indexing along\n this variable will automatically snap to the closest coordinate.\n\n Since a Chart is a subclass of a Dataset it supports the full set\n of data interfaces but usually each dimension of a chart represents\n a column stored in a dictionary, array or DataFrame.\n \"\"\"\n\n kdims = param.List(default=[Dimension('x')], bounds=(1,2), doc=\"\"\"\n The key dimension(s) of a Chart represent the independent\n variable(s).\"\"\")\n\n group = param.String(default='Chart', constant=True)\n\n vdims = param.List(default=[Dimension('y')], bounds=(1, None), doc=\"\"\"\n The value dimensions of the Chart, usually corresponding to a\n number of dependent variables.\"\"\")\n\n # Enables adding index if 1D array like data is supplied\n _auto_indexable_1d = True\n\n _max_kdim_count = 1 # Remove once kdims has bounds=(1,1) instead of warning\n __abstract = True\n\n def __init__(self, data, kdims=None, vdims=None, **params):\n params.update(process_dimensions(kdims, vdims))\n if len(params.get('kdims', [])) == self._max_kdim_count + 1:\n self.param.warning('Chart elements should only be supplied a single kdim')\n super(Chart, self).__init__(data, **params)\n\n def __getitem__(self, index):\n return super(Chart, self).__getitem__(index)\n\n\nclass Scatter(Selection2DExpr, Chart):\n \"\"\"\n Scatter is a Chart element representing a set of points in a 1D\n coordinate system where the key dimension maps to the points\n location along the x-axis while the first value dimension\n represents the location of the point along the y-axis.\n \"\"\"\n\n group = param.String(default='Scatter', constant=True)\n\n\nclass Curve(Selection1DExpr, Chart):\n \"\"\"\n Curve is a Chart element representing a line in a 1D coordinate\n system where the key dimension maps on the line x-coordinate and\n the first value dimension represents the height of the line along\n the y-axis.\n \"\"\"\n\n group = param.String(default='Curve', constant=True)\n\n\nclass ErrorBars(Selection1DExpr, Chart):\n \"\"\"\n ErrorBars is a Chart element representing error bars in a 1D\n coordinate system where the key dimension corresponds to the\n location along the x-axis and the first value dimension\n corresponds to the location along the y-axis and one or two\n extra value dimensions corresponding to the symmetric or\n asymetric errors either along x-axis or y-axis. If two value\n dimensions are given, then the last value dimension will be\n taken as symmetric errors. If three value dimensions are given\n then the last two value dimensions will be taken as negative and\n positive errors. By default the errors are defined along y-axis.\n A parameter `horizontal`, when set `True`, will define the errors\n along the x-axis.\n \"\"\"\n\n group = param.String(default='ErrorBars', constant=True, doc=\"\"\"\n A string describing the quantity measured by the ErrorBars\n object.\"\"\")\n\n vdims = param.List(default=[Dimension('y'), Dimension('yerror')],\n bounds=(1, None), constant=True)\n\n horizontal = param.Boolean(default=False, doc=\"\"\"\n Whether the errors are along y-axis (vertical) or x-axis.\"\"\")\n\n def range(self, dim, data_range=True, dimension_range=True):\n \"\"\"Return the lower and upper bounds of values along dimension.\n\n Range of the y-dimension includes the symmetric or assymetric\n error.\n\n Args:\n dimension: The dimension to compute the range on.\n data_range (bool): Compute range from data values\n dimension_range (bool): Include Dimension ranges\n Whether to include Dimension range and soft_range\n in range calculation\n\n Returns:\n Tuple containing the lower and upper bound\n \"\"\"\n dim_with_err = 0 if self.horizontal else 1\n didx = self.get_dimension_index(dim)\n dim = self.get_dimension(dim)\n if didx == dim_with_err and data_range and len(self):\n mean = self.dimension_values(didx)\n neg_error = self.dimension_values(2)\n if len(self.dimensions()) > 3:\n pos_error = self.dimension_values(3)\n else:\n pos_error = neg_error\n lower = np.nanmin(mean-neg_error)\n upper = np.nanmax(mean+pos_error)\n if not dimension_range:\n return (lower, upper)\n return util.dimension_range(lower, upper, dim.range, dim.soft_range)\n return super(ErrorBars, self).range(dim, data_range)\n\n\n\nclass Spread(ErrorBars):\n \"\"\"\n Spread is a Chart element representing a spread of values or\n confidence band in a 1D coordinate system. The key dimension(s)\n corresponds to the location along the x-axis and the value\n dimensions define the location along the y-axis as well as the\n symmetric or assymetric spread.\n \"\"\"\n\n group = param.String(default='Spread', constant=True)\n\n\n\nclass Bars(Selection1DExpr, Chart):\n \"\"\"\n Bars is a Chart element representing categorical observations\n using the height of rectangular bars. The key dimensions represent\n the categorical groupings of the data, but may also be used to\n stack the bars, while the first value dimension represents the\n height of each bar.\n \"\"\"\n\n group = param.String(default='Bars', constant=True)\n\n kdims = param.List(default=[Dimension('x')], bounds=(1,3))\n\n _max_kdim_count = 3\n\n\nclass Histogram(Selection1DExpr, Chart):\n \"\"\"\n Histogram is a Chart element representing a number of bins in a 1D\n coordinate system. The key dimension represents the binned values,\n which may be declared as bin edges or bin centers, while the value\n dimensions usually defines a count, frequency or density associated\n with each bin.\n \"\"\"\n\n datatype = param.List(default=['grid'])\n\n group = param.String(default='Histogram', constant=True)\n\n kdims = param.List(default=[Dimension('x')], bounds=(1,1), doc=\"\"\"\n Dimensions on Element2Ds determine the number of indexable\n dimensions.\"\"\")\n\n vdims = param.List(default=[Dimension('Frequency')], bounds=(1, None))\n\n _binned = True\n\n def __init__(self, data, edges=None, **params):\n if data is None:\n data = []\n if edges is not None:\n self.param.warning(\n \"Histogram edges should be supplied as a tuple \"\n \"along with the values, passing the edges will \"\n \"be deprecated in holoviews 2.0.\")\n data = (edges, data)\n elif isinstance(data, tuple) and len(data) == 2 and len(data[0])+1 == len(data[1]):\n data = data[::-1]\n\n super(Histogram, self).__init__(data, **params)\n def __setstate__(self, state):\n \"\"\"\n Ensures old-style Histogram types without an interface can be unpickled.\n\n Note: Deprecate as part of 2.0\n \"\"\"\n if 'interface' not in state:\n self.interface = GridInterface\n x, y = state['_kdims_param_value'][0], state['_vdims_param_value'][0]\n state['data'] = {x.name: state['data'][1], y.name: state['data'][0]}\n super(Dataset, self).__setstate__(state)\n\n\n @property\n def values(self):\n \"Property to access the Histogram values provided for backward compatibility\"\n self.param.warning('Histogram.values is deprecated in favor of '\n 'common dimension_values method.')\n return self.dimension_values(1)\n\n\n @property\n def edges(self):\n \"Property to access the Histogram edges provided for backward compatibility\"\n return self.interface.coords(self, self.kdims[0], edges=True)\n\n\nclass Spikes(Selection1DExpr, Chart):\n \"\"\"\n Spikes is a Chart element which represents a number of discrete\n spikes, events or observations in a 1D coordinate system. The key\n dimension therefore represents the position of each spike along\n the x-axis while the first value dimension, if defined, controls\n the height along the y-axis. It may therefore be used to visualize\n the distribution of discrete events, representing a rug plot, or\n to draw the strength some signal.\n \"\"\"\n\n group = param.String(default='Spikes', constant=True)\n\n kdims = param.List(default=[Dimension('x')], bounds=(1, 1))\n\n vdims = param.List(default=[])\n\n _auto_indexable_1d = False\n\n\n\nclass Area(Curve):\n \"\"\"\n Area is a Chart element representing the area under a curve or\n between two curves in a 1D coordinate system. The key dimension\n represents the location of each coordinate along the x-axis, while\n the value dimension(s) represent the height of the area or the\n lower and upper bounds of the area between curves.\n\n Multiple areas may be stacked by overlaying them an passing them\n to the stack method.\n \"\"\"\n\n group = param.String(default='Area', constant=True)\n\n @classmethod\n def stack(cls, areas, baseline_name='Baseline'):\n \"\"\"\n Stacks an (Nd)Overlay of Area or Curve Elements by offsetting\n their baselines. To stack a HoloMap or DynamicMap use the map\n method.\n \"\"\"\n if not len(areas):\n return areas\n is_overlay = isinstance(areas, Overlay)\n if is_overlay:\n areas = NdOverlay({i: el for i, el in enumerate(areas)})\n df = areas.dframe(multi_index=True)\n levels = list(range(areas.ndims))\n vdim = areas.last.vdims[0]\n vdims = [vdim, baseline_name]\n baseline = None\n stacked = areas.clone(shared_data=False)\n for key, sdf in df.groupby(level=levels):\n sdf = sdf.droplevel(levels).reindex(index=df.index.levels[-1], fill_value=0)\n if baseline is None:\n sdf[baseline_name] = 0\n else:\n sdf[vdim.name] = sdf[vdim.name] + baseline\n sdf[baseline_name] = baseline\n baseline = sdf[vdim.name]\n stacked[key] = areas.last.clone(sdf, vdims=vdims)\n return Overlay(stacked.values()) if is_overlay else stacked\n", "path": "holoviews/element/chart.py"}], "after_files": [{"content": "import numpy as np\nimport param\n\nfrom ..core import util\nfrom ..core import Dimension, Dataset, Element2D, NdOverlay, Overlay\nfrom ..core.dimension import process_dimensions\nfrom ..core.data import GridInterface\nfrom .geom import Rectangles, Points, VectorField # noqa: backward compatible import\nfrom .selection import Selection1DExpr, Selection2DExpr\n\n\nclass Chart(Dataset, Element2D):\n \"\"\"\n A Chart is an abstract baseclass for elements representing one or\n more independent and dependent variables defining a 1D coordinate\n system with associated values. The independent variables or key\n dimensions map onto the x-axis while the dependent variables are\n usually mapped to the location, height or spread along the\n y-axis. Any number of additional value dimensions may be\n associated with a Chart.\n\n If a chart's independent variable (or key dimension) is numeric\n the chart will represent a discretely sampled version of the\n underlying continuously sampled 1D space. Therefore indexing along\n this variable will automatically snap to the closest coordinate.\n\n Since a Chart is a subclass of a Dataset it supports the full set\n of data interfaces but usually each dimension of a chart represents\n a column stored in a dictionary, array or DataFrame.\n \"\"\"\n\n kdims = param.List(default=[Dimension('x')], bounds=(1,2), doc=\"\"\"\n The key dimension(s) of a Chart represent the independent\n variable(s).\"\"\")\n\n group = param.String(default='Chart', constant=True)\n\n vdims = param.List(default=[Dimension('y')], bounds=(1, None), doc=\"\"\"\n The value dimensions of the Chart, usually corresponding to a\n number of dependent variables.\"\"\")\n\n # Enables adding index if 1D array like data is supplied\n _auto_indexable_1d = True\n\n _max_kdim_count = 1 # Remove once kdims has bounds=(1,1) instead of warning\n __abstract = True\n\n def __init__(self, data, kdims=None, vdims=None, **params):\n params.update(process_dimensions(kdims, vdims))\n if len(params.get('kdims', [])) == self._max_kdim_count + 1:\n self.param.warning('Chart elements should only be supplied a single kdim')\n super(Chart, self).__init__(data, **params)\n\n def __getitem__(self, index):\n return super(Chart, self).__getitem__(index)\n\n\nclass Scatter(Selection2DExpr, Chart):\n \"\"\"\n Scatter is a Chart element representing a set of points in a 1D\n coordinate system where the key dimension maps to the points\n location along the x-axis while the first value dimension\n represents the location of the point along the y-axis.\n \"\"\"\n\n group = param.String(default='Scatter', constant=True)\n\n\nclass Curve(Selection1DExpr, Chart):\n \"\"\"\n Curve is a Chart element representing a line in a 1D coordinate\n system where the key dimension maps on the line x-coordinate and\n the first value dimension represents the height of the line along\n the y-axis.\n \"\"\"\n\n group = param.String(default='Curve', constant=True)\n\n\nclass ErrorBars(Selection1DExpr, Chart):\n \"\"\"\n ErrorBars is a Chart element representing error bars in a 1D\n coordinate system where the key dimension corresponds to the\n location along the x-axis and the first value dimension\n corresponds to the location along the y-axis and one or two\n extra value dimensions corresponding to the symmetric or\n asymetric errors either along x-axis or y-axis. If two value\n dimensions are given, then the last value dimension will be\n taken as symmetric errors. If three value dimensions are given\n then the last two value dimensions will be taken as negative and\n positive errors. By default the errors are defined along y-axis.\n A parameter `horizontal`, when set `True`, will define the errors\n along the x-axis.\n \"\"\"\n\n group = param.String(default='ErrorBars', constant=True, doc=\"\"\"\n A string describing the quantity measured by the ErrorBars\n object.\"\"\")\n\n vdims = param.List(default=[Dimension('y'), Dimension('yerror')],\n bounds=(1, None), constant=True)\n\n horizontal = param.Boolean(default=False, doc=\"\"\"\n Whether the errors are along y-axis (vertical) or x-axis.\"\"\")\n\n def range(self, dim, data_range=True, dimension_range=True):\n \"\"\"Return the lower and upper bounds of values along dimension.\n\n Range of the y-dimension includes the symmetric or assymetric\n error.\n\n Args:\n dimension: The dimension to compute the range on.\n data_range (bool): Compute range from data values\n dimension_range (bool): Include Dimension ranges\n Whether to include Dimension range and soft_range\n in range calculation\n\n Returns:\n Tuple containing the lower and upper bound\n \"\"\"\n dim_with_err = 0 if self.horizontal else 1\n didx = self.get_dimension_index(dim)\n dim = self.get_dimension(dim)\n if didx == dim_with_err and data_range and len(self):\n mean = self.dimension_values(didx)\n neg_error = self.dimension_values(2)\n if len(self.dimensions()) > 3:\n pos_error = self.dimension_values(3)\n else:\n pos_error = neg_error\n lower = np.nanmin(mean-neg_error)\n upper = np.nanmax(mean+pos_error)\n if not dimension_range:\n return (lower, upper)\n return util.dimension_range(lower, upper, dim.range, dim.soft_range)\n return super(ErrorBars, self).range(dim, data_range)\n\n\n\nclass Spread(ErrorBars):\n \"\"\"\n Spread is a Chart element representing a spread of values or\n confidence band in a 1D coordinate system. The key dimension(s)\n corresponds to the location along the x-axis and the value\n dimensions define the location along the y-axis as well as the\n symmetric or assymetric spread.\n \"\"\"\n\n group = param.String(default='Spread', constant=True)\n\n\n\nclass Bars(Selection1DExpr, Chart):\n \"\"\"\n Bars is a Chart element representing categorical observations\n using the height of rectangular bars. The key dimensions represent\n the categorical groupings of the data, but may also be used to\n stack the bars, while the first value dimension represents the\n height of each bar.\n \"\"\"\n\n group = param.String(default='Bars', constant=True)\n\n kdims = param.List(default=[Dimension('x')], bounds=(1,3))\n\n _max_kdim_count = 3\n\n\nclass Histogram(Selection1DExpr, Chart):\n \"\"\"\n Histogram is a Chart element representing a number of bins in a 1D\n coordinate system. The key dimension represents the binned values,\n which may be declared as bin edges or bin centers, while the value\n dimensions usually defines a count, frequency or density associated\n with each bin.\n \"\"\"\n\n datatype = param.List(default=['grid'])\n\n group = param.String(default='Histogram', constant=True)\n\n kdims = param.List(default=[Dimension('x')], bounds=(1,1), doc=\"\"\"\n Dimensions on Element2Ds determine the number of indexable\n dimensions.\"\"\")\n\n vdims = param.List(default=[Dimension('Frequency')], bounds=(1, None))\n\n _binned = True\n\n def __init__(self, data, edges=None, **params):\n if data is None:\n data = []\n if edges is not None:\n self.param.warning(\n \"Histogram edges should be supplied as a tuple \"\n \"along with the values, passing the edges will \"\n \"be deprecated in holoviews 2.0.\")\n data = (edges, data)\n elif isinstance(data, tuple) and len(data) == 2 and len(data[0])+1 == len(data[1]):\n data = data[::-1]\n\n super(Histogram, self).__init__(data, **params)\n def __setstate__(self, state):\n \"\"\"\n Ensures old-style Histogram types without an interface can be unpickled.\n\n Note: Deprecate as part of 2.0\n \"\"\"\n if 'interface' not in state:\n self.interface = GridInterface\n x, y = state['_kdims_param_value'][0], state['_vdims_param_value'][0]\n state['data'] = {x.name: state['data'][1], y.name: state['data'][0]}\n super(Dataset, self).__setstate__(state)\n\n\n @property\n def values(self):\n \"Property to access the Histogram values provided for backward compatibility\"\n self.param.warning('Histogram.values is deprecated in favor of '\n 'common dimension_values method.')\n return self.dimension_values(1)\n\n\n @property\n def edges(self):\n \"Property to access the Histogram edges provided for backward compatibility\"\n return self.interface.coords(self, self.kdims[0], edges=True)\n\n\nclass Spikes(Selection1DExpr, Chart):\n \"\"\"\n Spikes is a Chart element which represents a number of discrete\n spikes, events or observations in a 1D coordinate system. The key\n dimension therefore represents the position of each spike along\n the x-axis while the first value dimension, if defined, controls\n the height along the y-axis. It may therefore be used to visualize\n the distribution of discrete events, representing a rug plot, or\n to draw the strength some signal.\n \"\"\"\n\n group = param.String(default='Spikes', constant=True)\n\n kdims = param.List(default=[Dimension('x')], bounds=(1, 1))\n\n vdims = param.List(default=[])\n\n _auto_indexable_1d = False\n\n\n\nclass Area(Curve):\n \"\"\"\n Area is a Chart element representing the area under a curve or\n between two curves in a 1D coordinate system. The key dimension\n represents the location of each coordinate along the x-axis, while\n the value dimension(s) represent the height of the area or the\n lower and upper bounds of the area between curves.\n\n Multiple areas may be stacked by overlaying them an passing them\n to the stack method.\n \"\"\"\n\n group = param.String(default='Area', constant=True)\n\n @classmethod\n def stack(cls, areas, baseline_name='Baseline'):\n \"\"\"\n Stacks an (Nd)Overlay of Area or Curve Elements by offsetting\n their baselines. To stack a HoloMap or DynamicMap use the map\n method.\n \"\"\"\n if not len(areas):\n return areas\n is_overlay = isinstance(areas, Overlay)\n if is_overlay:\n areas = NdOverlay({i: el for i, el in enumerate(areas)})\n df = areas.dframe(multi_index=True)\n levels = list(range(areas.ndims))\n vdim = areas.last.vdims[0]\n vdims = [vdim, baseline_name]\n baseline = None\n stacked = areas.clone(shared_data=False)\n for key, sdf in df.groupby(level=levels):\n sdf = sdf.droplevel(levels).reindex(index=df.index.levels[-1], fill_value=0)\n if baseline is None:\n sdf[baseline_name] = 0\n else:\n sdf[vdim.name] = sdf[vdim.name] + baseline\n sdf[baseline_name] = baseline\n baseline = sdf[vdim.name]\n stacked[key] = areas[key].clone(sdf, vdims=vdims)\n return Overlay(stacked.values()) if is_overlay else stacked\n", "path": "holoviews/element/chart.py"}]} | 3,617 | 144 |
gh_patches_debug_10219 | rasdani/github-patches | git_diff | docker__docker-py-1736 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
exec_start does not return command output
I encountered this problem in my work and managed to write an isolated exposing test case attached to this issue.
This program works well on Linux Mint 18.3 and Docker version 17.05.0-ce, build 89658be
but fails on Centos 7.3 with Docker version 1.13.1, build b303bf6/1.13.1
I investigated this issue deeper and wrote an equivalent program using docker REST API and requests_unixsocket library. The program + strace shows that the body in the reply for REST endpoint associated to exec_start if interpreted in the wrong way.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/utils/socket.py`
Content:
```
1 import errno
2 import os
3 import select
4 import struct
5
6 import six
7
8 try:
9 from ..transport import NpipeSocket
10 except ImportError:
11 NpipeSocket = type(None)
12
13
14 class SocketError(Exception):
15 pass
16
17
18 def read(socket, n=4096):
19 """
20 Reads at most n bytes from socket
21 """
22
23 recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
24
25 # wait for data to become available
26 if not isinstance(socket, NpipeSocket):
27 select.select([socket], [], [])
28
29 try:
30 if hasattr(socket, 'recv'):
31 return socket.recv(n)
32 return os.read(socket.fileno(), n)
33 except EnvironmentError as e:
34 if e.errno not in recoverable_errors:
35 raise
36
37
38 def read_exactly(socket, n):
39 """
40 Reads exactly n bytes from socket
41 Raises SocketError if there isn't enough data
42 """
43 data = six.binary_type()
44 while len(data) < n:
45 next_data = read(socket, n - len(data))
46 if not next_data:
47 raise SocketError("Unexpected EOF")
48 data += next_data
49 return data
50
51
52 def next_frame_size(socket):
53 """
54 Returns the size of the next frame of data waiting to be read from socket,
55 according to the protocol defined here:
56
57 https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
58 """
59 try:
60 data = read_exactly(socket, 8)
61 except SocketError:
62 return 0
63
64 _, actual = struct.unpack('>BxxxL', data)
65 return actual
66
67
68 def frames_iter(socket):
69 """
70 Returns a generator of frames read from socket
71 """
72 while True:
73 n = next_frame_size(socket)
74 if n == 0:
75 break
76 while n > 0:
77 result = read(socket, n)
78 if result is None:
79 continue
80 data_length = len(result)
81 if data_length == 0:
82 # We have reached EOF
83 return
84 n -= data_length
85 yield result
86
87
88 def socket_raw_iter(socket):
89 """
90 Returns a generator of data read from the socket.
91 This is used for non-multiplexed streams.
92 """
93 while True:
94 result = read(socket)
95 if len(result) == 0:
96 # We have reached EOF
97 return
98 yield result
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/utils/socket.py b/docker/utils/socket.py
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -59,7 +59,7 @@
try:
data = read_exactly(socket, 8)
except SocketError:
- return 0
+ return -1
_, actual = struct.unpack('>BxxxL', data)
return actual
@@ -71,7 +71,7 @@
"""
while True:
n = next_frame_size(socket)
- if n == 0:
+ if n < 0:
break
while n > 0:
result = read(socket, n)
| {"golden_diff": "diff --git a/docker/utils/socket.py b/docker/utils/socket.py\n--- a/docker/utils/socket.py\n+++ b/docker/utils/socket.py\n@@ -59,7 +59,7 @@\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n- return 0\n+ return -1\n \n _, actual = struct.unpack('>BxxxL', data)\n return actual\n@@ -71,7 +71,7 @@\n \"\"\"\n while True:\n n = next_frame_size(socket)\n- if n == 0:\n+ if n < 0:\n break\n while n > 0:\n result = read(socket, n)\n", "issue": "exec_start does not return command output\nI encountered this problem in my work and managed to write an isolated exposing test case attached to this issue.\r\n\r\nThis program works well on Linux Mint 18.3 and Docker version 17.05.0-ce, build 89658be\r\nbut fails on Centos 7.3 with Docker version 1.13.1, build b303bf6/1.13.1\r\n\r\nI investigated this issue deeper and wrote an equivalent program using docker REST API and requests_unixsocket library. The program + strace shows that the body in the reply for REST endpoint associated to exec_start if interpreted in the wrong way.\n", "before_files": [{"content": "import errno\nimport os\nimport select\nimport struct\n\nimport six\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nclass SocketError(Exception):\n pass\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n # wait for data to become available\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n return os.read(socket.fileno(), n)\n except EnvironmentError as e:\n if e.errno not in recoverable_errors:\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = six.binary_type()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_size(socket):\n \"\"\"\n Returns the size of the next frame of data waiting to be read from socket,\n according to the protocol defined here:\n\n https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return 0\n\n _, actual = struct.unpack('>BxxxL', data)\n return actual\n\n\ndef frames_iter(socket):\n \"\"\"\n Returns a generator of frames read from socket\n \"\"\"\n while True:\n n = next_frame_size(socket)\n if n == 0:\n break\n while n > 0:\n result = read(socket, n)\n if result is None:\n continue\n data_length = len(result)\n if data_length == 0:\n # We have reached EOF\n return\n n -= data_length\n yield result\n\n\ndef socket_raw_iter(socket):\n \"\"\"\n Returns a generator of data read from the socket.\n This is used for non-multiplexed streams.\n \"\"\"\n while True:\n result = read(socket)\n if len(result) == 0:\n # We have reached EOF\n return\n yield result\n", "path": "docker/utils/socket.py"}], "after_files": [{"content": "import errno\nimport os\nimport select\nimport struct\n\nimport six\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nclass SocketError(Exception):\n pass\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n # wait for data to become available\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n return os.read(socket.fileno(), n)\n except EnvironmentError as e:\n if e.errno not in recoverable_errors:\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = six.binary_type()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_size(socket):\n \"\"\"\n Returns the size of the next frame of data waiting to be read from socket,\n according to the protocol defined here:\n\n https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return -1\n\n _, actual = struct.unpack('>BxxxL', data)\n return actual\n\n\ndef frames_iter(socket):\n \"\"\"\n Returns a generator of frames read from socket\n \"\"\"\n while True:\n n = next_frame_size(socket)\n if n < 0:\n break\n while n > 0:\n result = read(socket, n)\n if result is None:\n continue\n data_length = len(result)\n if data_length == 0:\n # We have reached EOF\n return\n n -= data_length\n yield result\n\n\ndef socket_raw_iter(socket):\n \"\"\"\n Returns a generator of data read from the socket.\n This is used for non-multiplexed streams.\n \"\"\"\n while True:\n result = read(socket)\n if len(result) == 0:\n # We have reached EOF\n return\n yield result\n", "path": "docker/utils/socket.py"}]} | 1,125 | 148 |
gh_patches_debug_8919 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-522 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Discovery checks for removed oauth2client dependency
In 9eb303e10cc9d6aa09137d1d68a0673e5aa5b4b0, 3 days ago, the dependency on `oauth2client` was dropped (yay!), but it seems that the Discovery API client is still checking for oauth2client in [apiclient/\_\_init\_\_.py](https://github.com/google/google-api-python-client/blob/081d2a336393a5d497339e462875294cfd8c81ea/apiclient/__init__.py#L7-L16) and claiming I need to upgrade my installation of `google-api-python-client`. Running `pip install -I google-api-python-client` doesn't clear the error message.
```
$ python3 -m venv test-gapi
epall@Calypso:/private/tmp
$ . test-gapi/bin/activate
(test-gapi) epall@Calypso:/private/tmp
$ pip freeze
You are using pip version 9.0.3, however version 10.0.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
(test-gapi) epall@Calypso:/private/tmp
$ pip install google-api-python-client
Collecting google-api-python-client
Using cached https://files.pythonhosted.org/packages/77/24/6a493cff23e60d882598e92b03d9f2c3c77299a9578b4411636616c039f4/google_api_python_client-1.7.1-py2.py3-none-any.whl
Collecting uritemplate<4dev,>=3.0.0 (from google-api-python-client)
Using cached https://files.pythonhosted.org/packages/e5/7d/9d5a640c4f8bf2c8b1afc015e9a9d8de32e13c9016dcc4b0ec03481fb396/uritemplate-3.0.0-py2.py3-none-any.whl
Collecting httplib2<1dev,>=0.9.2 (from google-api-python-client)
Collecting google-auth>=1.4.1 (from google-api-python-client)
Using cached https://files.pythonhosted.org/packages/53/06/6e6d5bfa4d23ee40efd772d6b681a7afecd859a9176e564b8c329382370f/google_auth-1.5.0-py2.py3-none-any.whl
Collecting google-auth-httplib2>=0.0.3 (from google-api-python-client)
Using cached https://files.pythonhosted.org/packages/33/49/c814d6d438b823441552198f096fcd0377fd6c88714dbed34f1d3c8c4389/google_auth_httplib2-0.0.3-py2.py3-none-any.whl
Collecting six<2dev,>=1.6.1 (from google-api-python-client)
Using cached https://files.pythonhosted.org/packages/67/4b/141a581104b1f6397bfa78ac9d43d8ad29a7ca43ea90a2d863fe3056e86a/six-1.11.0-py2.py3-none-any.whl
Collecting cachetools>=2.0.0 (from google-auth>=1.4.1->google-api-python-client)
Using cached https://files.pythonhosted.org/packages/0a/58/cbee863250b31d80f47401d04f34038db6766f95dea1cc909ea099c7e571/cachetools-2.1.0-py2.py3-none-any.whl
Collecting rsa>=3.1.4 (from google-auth>=1.4.1->google-api-python-client)
Using cached https://files.pythonhosted.org/packages/e1/ae/baedc9cb175552e95f3395c43055a6a5e125ae4d48a1d7a924baca83e92e/rsa-3.4.2-py2.py3-none-any.whl
Collecting pyasn1-modules>=0.2.1 (from google-auth>=1.4.1->google-api-python-client)
Using cached https://files.pythonhosted.org/packages/e9/51/bcd96bf6231d4b2cc5e023c511bee86637ba375c44a6f9d1b4b7ad1ce4b9/pyasn1_modules-0.2.1-py2.py3-none-any.whl
Collecting pyasn1>=0.1.3 (from rsa>=3.1.4->google-auth>=1.4.1->google-api-python-client)
Using cached https://files.pythonhosted.org/packages/a0/70/2c27740f08e477499ce19eefe05dbcae6f19fdc49e9e82ce4768be0643b9/pyasn1-0.4.3-py2.py3-none-any.whl
Installing collected packages: uritemplate, httplib2, cachetools, pyasn1, rsa, pyasn1-modules, six, google-auth, google-auth-httplib2, google-api-python-client
Successfully installed cachetools-2.1.0 google-api-python-client-1.7.1 google-auth-1.5.0 google-auth-httplib2-0.0.3 httplib2-0.11.3 pyasn1-0.4.3 pyasn1-modules-0.2.1 rsa-3.4.2 six-1.11.0 uritemplate-3.0.0
You are using pip version 9.0.3, however version 10.0.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
(test-gapi) epall@Calypso:/private/tmp
$ python
Python 3.6.5 (default, Apr 10 2018, 11:19:37)
[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import apiclient.discovery
Traceback (most recent call last):
File "/private/tmp/test-gapi/lib/python3.6/site-packages/apiclient/__init__.py", line 8, in <module>
import oauth2client
ModuleNotFoundError: No module named 'oauth2client'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/private/tmp/test-gapi/lib/python3.6/site-packages/apiclient/__init__.py", line 11, in <module>
'Previous version of google-api-python-client detected; due to a '
RuntimeError: Previous version of google-api-python-client detected; due to a packaging issue, we cannot perform an in-place upgrade. To repair, remove and reinstall this package, along with oauth2client and uritemplate. One can do this with pip via
pip install -I google-api-python-client
>>>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apiclient/__init__.py`
Content:
```
1 """Retain apiclient as an alias for googleapiclient."""
2
3 from six import iteritems
4
5 import googleapiclient
6
7 try:
8 import oauth2client
9 except ImportError:
10 raise RuntimeError(
11 'Previous version of google-api-python-client detected; due to a '
12 'packaging issue, we cannot perform an in-place upgrade. To repair, '
13 'remove and reinstall this package, along with oauth2client and '
14 'uritemplate. One can do this with pip via\n'
15 ' pip install -I google-api-python-client'
16 )
17
18 from googleapiclient import channel
19 from googleapiclient import discovery
20 from googleapiclient import errors
21 from googleapiclient import http
22 from googleapiclient import mimeparse
23 from googleapiclient import model
24 from googleapiclient import sample_tools
25 from googleapiclient import schema
26
27 __version__ = googleapiclient.__version__
28
29 _SUBMODULES = {
30 'channel': channel,
31 'discovery': discovery,
32 'errors': errors,
33 'http': http,
34 'mimeparse': mimeparse,
35 'model': model,
36 'sample_tools': sample_tools,
37 'schema': schema,
38 }
39
40 import sys
41 for module_name, module in iteritems(_SUBMODULES):
42 sys.modules['apiclient.%s' % module_name] = module
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apiclient/__init__.py b/apiclient/__init__.py
--- a/apiclient/__init__.py
+++ b/apiclient/__init__.py
@@ -4,17 +4,6 @@
import googleapiclient
-try:
- import oauth2client
-except ImportError:
- raise RuntimeError(
- 'Previous version of google-api-python-client detected; due to a '
- 'packaging issue, we cannot perform an in-place upgrade. To repair, '
- 'remove and reinstall this package, along with oauth2client and '
- 'uritemplate. One can do this with pip via\n'
- ' pip install -I google-api-python-client'
- )
-
from googleapiclient import channel
from googleapiclient import discovery
from googleapiclient import errors
| {"golden_diff": "diff --git a/apiclient/__init__.py b/apiclient/__init__.py\n--- a/apiclient/__init__.py\n+++ b/apiclient/__init__.py\n@@ -4,17 +4,6 @@\n \n import googleapiclient\n \n-try:\n- import oauth2client\n-except ImportError:\n- raise RuntimeError(\n- 'Previous version of google-api-python-client detected; due to a '\n- 'packaging issue, we cannot perform an in-place upgrade. To repair, '\n- 'remove and reinstall this package, along with oauth2client and '\n- 'uritemplate. One can do this with pip via\\n'\n- ' pip install -I google-api-python-client'\n- )\n-\n from googleapiclient import channel\n from googleapiclient import discovery\n from googleapiclient import errors\n", "issue": "Discovery checks for removed oauth2client dependency \nIn 9eb303e10cc9d6aa09137d1d68a0673e5aa5b4b0, 3 days ago, the dependency on `oauth2client` was dropped (yay!), but it seems that the Discovery API client is still checking for oauth2client in [apiclient/\\_\\_init\\_\\_.py](https://github.com/google/google-api-python-client/blob/081d2a336393a5d497339e462875294cfd8c81ea/apiclient/__init__.py#L7-L16) and claiming I need to upgrade my installation of `google-api-python-client`. Running `pip install -I google-api-python-client` doesn't clear the error message.\r\n\r\n```\r\n$ python3 -m venv test-gapi\r\nepall@Calypso:/private/tmp\r\n$ . test-gapi/bin/activate\r\n(test-gapi) epall@Calypso:/private/tmp\r\n$ pip freeze\r\nYou are using pip version 9.0.3, however version 10.0.1 is available.\r\nYou should consider upgrading via the 'pip install --upgrade pip' command.\r\n(test-gapi) epall@Calypso:/private/tmp\r\n$ pip install google-api-python-client\r\nCollecting google-api-python-client\r\n Using cached https://files.pythonhosted.org/packages/77/24/6a493cff23e60d882598e92b03d9f2c3c77299a9578b4411636616c039f4/google_api_python_client-1.7.1-py2.py3-none-any.whl\r\nCollecting uritemplate<4dev,>=3.0.0 (from google-api-python-client)\r\n Using cached https://files.pythonhosted.org/packages/e5/7d/9d5a640c4f8bf2c8b1afc015e9a9d8de32e13c9016dcc4b0ec03481fb396/uritemplate-3.0.0-py2.py3-none-any.whl\r\nCollecting httplib2<1dev,>=0.9.2 (from google-api-python-client)\r\nCollecting google-auth>=1.4.1 (from google-api-python-client)\r\n Using cached https://files.pythonhosted.org/packages/53/06/6e6d5bfa4d23ee40efd772d6b681a7afecd859a9176e564b8c329382370f/google_auth-1.5.0-py2.py3-none-any.whl\r\nCollecting google-auth-httplib2>=0.0.3 (from google-api-python-client)\r\n Using cached https://files.pythonhosted.org/packages/33/49/c814d6d438b823441552198f096fcd0377fd6c88714dbed34f1d3c8c4389/google_auth_httplib2-0.0.3-py2.py3-none-any.whl\r\nCollecting six<2dev,>=1.6.1 (from google-api-python-client)\r\n Using cached https://files.pythonhosted.org/packages/67/4b/141a581104b1f6397bfa78ac9d43d8ad29a7ca43ea90a2d863fe3056e86a/six-1.11.0-py2.py3-none-any.whl\r\nCollecting cachetools>=2.0.0 (from google-auth>=1.4.1->google-api-python-client)\r\n Using cached https://files.pythonhosted.org/packages/0a/58/cbee863250b31d80f47401d04f34038db6766f95dea1cc909ea099c7e571/cachetools-2.1.0-py2.py3-none-any.whl\r\nCollecting rsa>=3.1.4 (from google-auth>=1.4.1->google-api-python-client)\r\n Using cached https://files.pythonhosted.org/packages/e1/ae/baedc9cb175552e95f3395c43055a6a5e125ae4d48a1d7a924baca83e92e/rsa-3.4.2-py2.py3-none-any.whl\r\nCollecting pyasn1-modules>=0.2.1 (from google-auth>=1.4.1->google-api-python-client)\r\n Using cached https://files.pythonhosted.org/packages/e9/51/bcd96bf6231d4b2cc5e023c511bee86637ba375c44a6f9d1b4b7ad1ce4b9/pyasn1_modules-0.2.1-py2.py3-none-any.whl\r\nCollecting pyasn1>=0.1.3 (from rsa>=3.1.4->google-auth>=1.4.1->google-api-python-client)\r\n Using cached https://files.pythonhosted.org/packages/a0/70/2c27740f08e477499ce19eefe05dbcae6f19fdc49e9e82ce4768be0643b9/pyasn1-0.4.3-py2.py3-none-any.whl\r\nInstalling collected packages: uritemplate, httplib2, cachetools, pyasn1, rsa, pyasn1-modules, six, google-auth, google-auth-httplib2, google-api-python-client\r\nSuccessfully installed cachetools-2.1.0 google-api-python-client-1.7.1 google-auth-1.5.0 google-auth-httplib2-0.0.3 httplib2-0.11.3 pyasn1-0.4.3 pyasn1-modules-0.2.1 rsa-3.4.2 six-1.11.0 uritemplate-3.0.0\r\nYou are using pip version 9.0.3, however version 10.0.1 is available.\r\nYou should consider upgrading via the 'pip install --upgrade pip' command.\r\n(test-gapi) epall@Calypso:/private/tmp\r\n$ python\r\nPython 3.6.5 (default, Apr 10 2018, 11:19:37)\r\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)] on darwin\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import apiclient.discovery\r\nTraceback (most recent call last):\r\n File \"/private/tmp/test-gapi/lib/python3.6/site-packages/apiclient/__init__.py\", line 8, in <module>\r\n import oauth2client\r\nModuleNotFoundError: No module named 'oauth2client'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/private/tmp/test-gapi/lib/python3.6/site-packages/apiclient/__init__.py\", line 11, in <module>\r\n 'Previous version of google-api-python-client detected; due to a '\r\nRuntimeError: Previous version of google-api-python-client detected; due to a packaging issue, we cannot perform an in-place upgrade. To repair, remove and reinstall this package, along with oauth2client and uritemplate. One can do this with pip via\r\n pip install -I google-api-python-client\r\n>>>\r\n```\n", "before_files": [{"content": "\"\"\"Retain apiclient as an alias for googleapiclient.\"\"\"\n\nfrom six import iteritems\n\nimport googleapiclient\n\ntry:\n import oauth2client\nexcept ImportError:\n raise RuntimeError(\n 'Previous version of google-api-python-client detected; due to a '\n 'packaging issue, we cannot perform an in-place upgrade. To repair, '\n 'remove and reinstall this package, along with oauth2client and '\n 'uritemplate. One can do this with pip via\\n'\n ' pip install -I google-api-python-client'\n )\n\nfrom googleapiclient import channel\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\nfrom googleapiclient import http\nfrom googleapiclient import mimeparse\nfrom googleapiclient import model\nfrom googleapiclient import sample_tools\nfrom googleapiclient import schema\n\n__version__ = googleapiclient.__version__\n\n_SUBMODULES = {\n 'channel': channel,\n 'discovery': discovery,\n 'errors': errors,\n 'http': http,\n 'mimeparse': mimeparse,\n 'model': model,\n 'sample_tools': sample_tools,\n 'schema': schema,\n}\n\nimport sys\nfor module_name, module in iteritems(_SUBMODULES):\n sys.modules['apiclient.%s' % module_name] = module\n", "path": "apiclient/__init__.py"}], "after_files": [{"content": "\"\"\"Retain apiclient as an alias for googleapiclient.\"\"\"\n\nfrom six import iteritems\n\nimport googleapiclient\n\nfrom googleapiclient import channel\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\nfrom googleapiclient import http\nfrom googleapiclient import mimeparse\nfrom googleapiclient import model\nfrom googleapiclient import sample_tools\nfrom googleapiclient import schema\n\n__version__ = googleapiclient.__version__\n\n_SUBMODULES = {\n 'channel': channel,\n 'discovery': discovery,\n 'errors': errors,\n 'http': http,\n 'mimeparse': mimeparse,\n 'model': model,\n 'sample_tools': sample_tools,\n 'schema': schema,\n}\n\nimport sys\nfor module_name, module in iteritems(_SUBMODULES):\n sys.modules['apiclient.%s' % module_name] = module\n", "path": "apiclient/__init__.py"}]} | 2,459 | 185 |
gh_patches_debug_226 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-2128 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When adding multiple authors to one book, only the first is added
**Describe the bug**
I would like to add multiple authors to a book (it's an anthology). When I add multiple authors to the book, via the "Add Another Author" button, only the first one is added.
**To Reproduce**
1. Edit book
2. Fill the info for one author (Charlie Jane Anders) in the input
3. Click "Add Another Author"
4. Fill the info for the new author in the second input
5. Click "Add Another Author"
6. Fill the info for that new author in that third input
7. Save book
8. The interface shows the message _Is "Charlie Jane Anders" one of these authors?_
9. Select one of the choices
10. Only Charlie Jane Anders has been added, the other authors don't show up.
**Expected behavior**
I would like to see all authors added, and the interface for selecting the right author (ie. _Is "Charlie Jane Anders" one of these authors?_) should show the choices for all the authors I add.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Instance**
boitam.eu (I expect this problem to be global)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/books/edit_book.py`
Content:
```
1 """ the good stuff! the books! """
2 from re import sub, findall
3 from django.contrib.auth.decorators import login_required, permission_required
4 from django.contrib.postgres.search import SearchRank, SearchVector
5 from django.db import transaction
6 from django.http import HttpResponseBadRequest
7 from django.shortcuts import get_object_or_404, redirect
8 from django.template.response import TemplateResponse
9 from django.utils.decorators import method_decorator
10 from django.views.decorators.http import require_POST
11 from django.views import View
12
13 from bookwyrm import book_search, forms, models
14
15 # from bookwyrm.activitypub.base_activity import ActivityObject
16 from bookwyrm.utils.isni import (
17 find_authors_by_name,
18 build_author_from_isni,
19 augment_author_metadata,
20 )
21 from bookwyrm.views.helpers import get_edition
22 from .books import set_cover_from_url
23
24 # pylint: disable=no-self-use
25 @method_decorator(login_required, name="dispatch")
26 @method_decorator(
27 permission_required("bookwyrm.edit_book", raise_exception=True), name="dispatch"
28 )
29 class EditBook(View):
30 """edit a book"""
31
32 def get(self, request, book_id):
33 """info about a book"""
34 book = get_edition(book_id)
35 if not book.description:
36 book.description = book.parent_work.description
37 data = {"book": book, "form": forms.EditionForm(instance=book)}
38 return TemplateResponse(request, "book/edit/edit_book.html", data)
39
40 def post(self, request, book_id):
41 """edit a book cool"""
42 book = get_object_or_404(models.Edition, id=book_id)
43 form = forms.EditionForm(request.POST, request.FILES, instance=book)
44
45 data = {"book": book, "form": form}
46 if not form.is_valid():
47 return TemplateResponse(request, "book/edit/edit_book.html", data)
48
49 data = add_authors(request, data)
50
51 # either of the above cases requires additional confirmation
52 if data.get("add_author"):
53 return TemplateResponse(request, "book/edit/edit_book.html", data)
54
55 remove_authors = request.POST.getlist("remove_authors")
56 for author_id in remove_authors:
57 book.authors.remove(author_id)
58
59 book = form.save(commit=False)
60
61 url = request.POST.get("cover-url")
62 if url:
63 image = set_cover_from_url(url)
64 if image:
65 book.cover.save(*image, save=False)
66
67 book.save()
68 return redirect(f"/book/{book.id}")
69
70
71 @method_decorator(login_required, name="dispatch")
72 @method_decorator(
73 permission_required("bookwyrm.edit_book", raise_exception=True), name="dispatch"
74 )
75 class CreateBook(View):
76 """brand new book"""
77
78 def get(self, request):
79 """info about a book"""
80 data = {"form": forms.EditionForm()}
81 return TemplateResponse(request, "book/edit/edit_book.html", data)
82
83 # pylint: disable=too-many-locals
84 def post(self, request):
85 """create a new book"""
86 # returns None if no match is found
87 form = forms.EditionForm(request.POST, request.FILES)
88 data = {"form": form}
89
90 # collect data provided by the work or import item
91 parent_work_id = request.POST.get("parent_work")
92 authors = None
93 if request.POST.get("authors"):
94 author_ids = findall(r"\d+", request.POST["authors"])
95 authors = models.Author.objects.filter(id__in=author_ids)
96
97 # fake book in case we need to keep editing
98 if parent_work_id:
99 data["book"] = {
100 "parent_work": {"id": parent_work_id},
101 "authors": authors,
102 }
103
104 if not form.is_valid():
105 return TemplateResponse(request, "book/edit/edit_book.html", data)
106
107 data = add_authors(request, data)
108
109 # check if this is an edition of an existing work
110 author_text = ", ".join(data.get("add_author", []))
111 data["book_matches"] = book_search.search(
112 f'{form.cleaned_data.get("title")} {author_text}',
113 min_confidence=0.1,
114 )[:5]
115
116 # go to confirm mode
117 if not parent_work_id or data.get("add_author"):
118 return TemplateResponse(request, "book/edit/edit_book.html", data)
119
120 with transaction.atomic():
121 book = form.save()
122 parent_work = get_object_or_404(models.Work, id=parent_work_id)
123 book.parent_work = parent_work
124
125 if authors:
126 book.authors.add(*authors)
127
128 url = request.POST.get("cover-url")
129 if url:
130 image = set_cover_from_url(url)
131 if image:
132 book.cover.save(*image, save=False)
133
134 book.save()
135 return redirect(f"/book/{book.id}")
136
137
138 def add_authors(request, data):
139 """helper for adding authors"""
140 add_author = [author for author in request.POST.getlist("add_author") if author]
141 if not add_author:
142 return data
143
144 data["add_author"] = add_author
145 data["author_matches"] = []
146 data["isni_matches"] = []
147
148 # creting a book or adding an author to a book needs another step
149 data["confirm_mode"] = True
150 # this isn't preserved because it isn't part of the form obj
151 data["remove_authors"] = request.POST.getlist("remove_authors")
152 data["cover_url"] = request.POST.get("cover-url")
153
154 for author in add_author:
155 # filter out empty author fields
156 if not author:
157 continue
158 # check for existing authors
159 vector = SearchVector("name", weight="A") + SearchVector("aliases", weight="B")
160
161 author_matches = (
162 models.Author.objects.annotate(search=vector)
163 .annotate(rank=SearchRank(vector, author))
164 .filter(rank__gt=0.4)
165 .order_by("-rank")[:5]
166 )
167
168 isni_authors = find_authors_by_name(
169 author, description=True
170 ) # find matches from ISNI API
171
172 # dedupe isni authors we already have in the DB
173 exists = [
174 i
175 for i in isni_authors
176 for a in author_matches
177 if sub(r"\D", "", str(i.isni)) == sub(r"\D", "", str(a.isni))
178 ]
179
180 # pylint: disable=cell-var-from-loop
181 matches = list(filter(lambda x: x not in exists, isni_authors))
182 # combine existing and isni authors
183 matches.extend(author_matches)
184
185 data["author_matches"].append(
186 {
187 "name": author.strip(),
188 "matches": matches,
189 "existing_isnis": exists,
190 }
191 )
192 return data
193
194
195 @require_POST
196 @permission_required("bookwyrm.edit_book", raise_exception=True)
197 def create_book_from_data(request):
198 """create a book with starter data"""
199 author_ids = findall(r"\d+", request.POST.get("authors"))
200 book = {
201 "parent_work": {"id": request.POST.get("parent_work")},
202 "authors": models.Author.objects.filter(id__in=author_ids).all(),
203 "subjects": request.POST.getlist("subjects"),
204 }
205
206 data = {"book": book, "form": forms.EditionForm(request.POST)}
207 return TemplateResponse(request, "book/edit/edit_book.html", data)
208
209
210 @method_decorator(login_required, name="dispatch")
211 @method_decorator(
212 permission_required("bookwyrm.edit_book", raise_exception=True), name="dispatch"
213 )
214 class ConfirmEditBook(View):
215 """confirm edits to a book"""
216
217 # pylint: disable=too-many-locals
218 # pylint: disable=too-many-branches
219 def post(self, request, book_id=None):
220 """edit a book cool"""
221 # returns None if no match is found
222 book = models.Edition.objects.filter(id=book_id).first()
223 form = forms.EditionForm(request.POST, request.FILES, instance=book)
224
225 data = {"book": book, "form": form}
226 if not form.is_valid():
227 return TemplateResponse(request, "book/edit/edit_book.html", data)
228
229 with transaction.atomic():
230 # save book
231 book = form.save()
232
233 # add known authors
234 authors = None
235 if request.POST.get("authors"):
236 author_ids = findall(r"\d+", request.POST["authors"])
237 authors = models.Author.objects.filter(id__in=author_ids)
238 book.authors.add(*authors)
239
240 # get or create author as needed
241 for i in range(int(request.POST.get("author-match-count", 0))):
242 match = request.POST.get(f"author_match-{i}")
243 if not match:
244 return HttpResponseBadRequest()
245 try:
246 # if it's an int, it's an ID
247 match = int(match)
248 author = get_object_or_404(
249 models.Author, id=request.POST[f"author_match-{i}"]
250 )
251 # update author metadata if the ISNI record is more complete
252 isni = request.POST.get(f"isni-for-{match}", None)
253 if isni is not None:
254 augment_author_metadata(author, isni)
255 except ValueError:
256 # otherwise it's a new author
257 isni_match = request.POST.get(f"author_match-{i}")
258 author_object = build_author_from_isni(isni_match)
259 # with author data class from isni id
260 if "author" in author_object:
261 skeleton = models.Author.objects.create(
262 name=author_object["author"].name
263 )
264 author = author_object["author"].to_model(
265 model=models.Author, overwrite=True, instance=skeleton
266 )
267 else:
268 # or it's just a name
269 author = models.Author.objects.create(name=match)
270 book.authors.add(author)
271
272 # create work, if needed
273 if not book.parent_work:
274 work_match = request.POST.get("parent_work")
275 if work_match and work_match != "0":
276 work = get_object_or_404(models.Work, id=work_match)
277 else:
278 work = models.Work.objects.create(title=form.cleaned_data["title"])
279 work.authors.set(book.authors.all())
280 book.parent_work = work
281
282 for author_id in request.POST.getlist("remove_authors"):
283 book.authors.remove(author_id)
284
285 # import cover, if requested
286 url = request.POST.get("cover-url")
287 if url:
288 image = set_cover_from_url(url)
289 if image:
290 book.cover.save(*image, save=False)
291
292 # we don't tell the world when creating a book
293 book.save(broadcast=False)
294
295 return redirect(f"/book/{book.id}")
296
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/books/edit_book.py b/bookwyrm/views/books/edit_book.py
--- a/bookwyrm/views/books/edit_book.py
+++ b/bookwyrm/views/books/edit_book.py
@@ -189,7 +189,7 @@
"existing_isnis": exists,
}
)
- return data
+ return data
@require_POST
| {"golden_diff": "diff --git a/bookwyrm/views/books/edit_book.py b/bookwyrm/views/books/edit_book.py\n--- a/bookwyrm/views/books/edit_book.py\n+++ b/bookwyrm/views/books/edit_book.py\n@@ -189,7 +189,7 @@\n \"existing_isnis\": exists,\n }\n )\n- return data\n+ return data\n \n \n @require_POST\n", "issue": "When adding multiple authors to one book, only the first is added\n**Describe the bug**\r\n\r\nI would like to add multiple authors to a book (it's an anthology). When I add multiple authors to the book, via the \"Add Another Author\" button, only the first one is added.\r\n\r\n**To Reproduce**\r\n\r\n1. Edit book\r\n2. Fill the info for one author (Charlie Jane Anders) in the input\r\n3. Click \"Add Another Author\"\r\n4. Fill the info for the new author in the second input\r\n5. Click \"Add Another Author\"\r\n6. Fill the info for that new author in that third input\r\n7. Save book\r\n8. The interface shows the message _Is \"Charlie Jane Anders\" one of these authors?_\r\n9. Select one of the choices\r\n10. Only Charlie Jane Anders has been added, the other authors don't show up.\r\n\r\n**Expected behavior**\r\n\r\nI would like to see all authors added, and the interface for selecting the right author (ie. _Is \"Charlie Jane Anders\" one of these authors?_) should show the choices for all the authors I add.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Instance**\r\n\r\nboitam.eu (I expect this problem to be global)\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\" the good stuff! the books! \"\"\"\nfrom re import sub, findall\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.postgres.search import SearchRank, SearchVector\nfrom django.db import transaction\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.http import require_POST\nfrom django.views import View\n\nfrom bookwyrm import book_search, forms, models\n\n# from bookwyrm.activitypub.base_activity import ActivityObject\nfrom bookwyrm.utils.isni import (\n find_authors_by_name,\n build_author_from_isni,\n augment_author_metadata,\n)\nfrom bookwyrm.views.helpers import get_edition\nfrom .books import set_cover_from_url\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass EditBook(View):\n \"\"\"edit a book\"\"\"\n\n def get(self, request, book_id):\n \"\"\"info about a book\"\"\"\n book = get_edition(book_id)\n if not book.description:\n book.description = book.parent_work.description\n data = {\"book\": book, \"form\": forms.EditionForm(instance=book)}\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n def post(self, request, book_id):\n \"\"\"edit a book cool\"\"\"\n book = get_object_or_404(models.Edition, id=book_id)\n form = forms.EditionForm(request.POST, request.FILES, instance=book)\n\n data = {\"book\": book, \"form\": form}\n if not form.is_valid():\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n data = add_authors(request, data)\n\n # either of the above cases requires additional confirmation\n if data.get(\"add_author\"):\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n remove_authors = request.POST.getlist(\"remove_authors\")\n for author_id in remove_authors:\n book.authors.remove(author_id)\n\n book = form.save(commit=False)\n\n url = request.POST.get(\"cover-url\")\n if url:\n image = set_cover_from_url(url)\n if image:\n book.cover.save(*image, save=False)\n\n book.save()\n return redirect(f\"/book/{book.id}\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass CreateBook(View):\n \"\"\"brand new book\"\"\"\n\n def get(self, request):\n \"\"\"info about a book\"\"\"\n data = {\"form\": forms.EditionForm()}\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n # pylint: disable=too-many-locals\n def post(self, request):\n \"\"\"create a new book\"\"\"\n # returns None if no match is found\n form = forms.EditionForm(request.POST, request.FILES)\n data = {\"form\": form}\n\n # collect data provided by the work or import item\n parent_work_id = request.POST.get(\"parent_work\")\n authors = None\n if request.POST.get(\"authors\"):\n author_ids = findall(r\"\\d+\", request.POST[\"authors\"])\n authors = models.Author.objects.filter(id__in=author_ids)\n\n # fake book in case we need to keep editing\n if parent_work_id:\n data[\"book\"] = {\n \"parent_work\": {\"id\": parent_work_id},\n \"authors\": authors,\n }\n\n if not form.is_valid():\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n data = add_authors(request, data)\n\n # check if this is an edition of an existing work\n author_text = \", \".join(data.get(\"add_author\", []))\n data[\"book_matches\"] = book_search.search(\n f'{form.cleaned_data.get(\"title\")} {author_text}',\n min_confidence=0.1,\n )[:5]\n\n # go to confirm mode\n if not parent_work_id or data.get(\"add_author\"):\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n with transaction.atomic():\n book = form.save()\n parent_work = get_object_or_404(models.Work, id=parent_work_id)\n book.parent_work = parent_work\n\n if authors:\n book.authors.add(*authors)\n\n url = request.POST.get(\"cover-url\")\n if url:\n image = set_cover_from_url(url)\n if image:\n book.cover.save(*image, save=False)\n\n book.save()\n return redirect(f\"/book/{book.id}\")\n\n\ndef add_authors(request, data):\n \"\"\"helper for adding authors\"\"\"\n add_author = [author for author in request.POST.getlist(\"add_author\") if author]\n if not add_author:\n return data\n\n data[\"add_author\"] = add_author\n data[\"author_matches\"] = []\n data[\"isni_matches\"] = []\n\n # creting a book or adding an author to a book needs another step\n data[\"confirm_mode\"] = True\n # this isn't preserved because it isn't part of the form obj\n data[\"remove_authors\"] = request.POST.getlist(\"remove_authors\")\n data[\"cover_url\"] = request.POST.get(\"cover-url\")\n\n for author in add_author:\n # filter out empty author fields\n if not author:\n continue\n # check for existing authors\n vector = SearchVector(\"name\", weight=\"A\") + SearchVector(\"aliases\", weight=\"B\")\n\n author_matches = (\n models.Author.objects.annotate(search=vector)\n .annotate(rank=SearchRank(vector, author))\n .filter(rank__gt=0.4)\n .order_by(\"-rank\")[:5]\n )\n\n isni_authors = find_authors_by_name(\n author, description=True\n ) # find matches from ISNI API\n\n # dedupe isni authors we already have in the DB\n exists = [\n i\n for i in isni_authors\n for a in author_matches\n if sub(r\"\\D\", \"\", str(i.isni)) == sub(r\"\\D\", \"\", str(a.isni))\n ]\n\n # pylint: disable=cell-var-from-loop\n matches = list(filter(lambda x: x not in exists, isni_authors))\n # combine existing and isni authors\n matches.extend(author_matches)\n\n data[\"author_matches\"].append(\n {\n \"name\": author.strip(),\n \"matches\": matches,\n \"existing_isnis\": exists,\n }\n )\n return data\n\n\n@require_POST\n@permission_required(\"bookwyrm.edit_book\", raise_exception=True)\ndef create_book_from_data(request):\n \"\"\"create a book with starter data\"\"\"\n author_ids = findall(r\"\\d+\", request.POST.get(\"authors\"))\n book = {\n \"parent_work\": {\"id\": request.POST.get(\"parent_work\")},\n \"authors\": models.Author.objects.filter(id__in=author_ids).all(),\n \"subjects\": request.POST.getlist(\"subjects\"),\n }\n\n data = {\"book\": book, \"form\": forms.EditionForm(request.POST)}\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass ConfirmEditBook(View):\n \"\"\"confirm edits to a book\"\"\"\n\n # pylint: disable=too-many-locals\n # pylint: disable=too-many-branches\n def post(self, request, book_id=None):\n \"\"\"edit a book cool\"\"\"\n # returns None if no match is found\n book = models.Edition.objects.filter(id=book_id).first()\n form = forms.EditionForm(request.POST, request.FILES, instance=book)\n\n data = {\"book\": book, \"form\": form}\n if not form.is_valid():\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n with transaction.atomic():\n # save book\n book = form.save()\n\n # add known authors\n authors = None\n if request.POST.get(\"authors\"):\n author_ids = findall(r\"\\d+\", request.POST[\"authors\"])\n authors = models.Author.objects.filter(id__in=author_ids)\n book.authors.add(*authors)\n\n # get or create author as needed\n for i in range(int(request.POST.get(\"author-match-count\", 0))):\n match = request.POST.get(f\"author_match-{i}\")\n if not match:\n return HttpResponseBadRequest()\n try:\n # if it's an int, it's an ID\n match = int(match)\n author = get_object_or_404(\n models.Author, id=request.POST[f\"author_match-{i}\"]\n )\n # update author metadata if the ISNI record is more complete\n isni = request.POST.get(f\"isni-for-{match}\", None)\n if isni is not None:\n augment_author_metadata(author, isni)\n except ValueError:\n # otherwise it's a new author\n isni_match = request.POST.get(f\"author_match-{i}\")\n author_object = build_author_from_isni(isni_match)\n # with author data class from isni id\n if \"author\" in author_object:\n skeleton = models.Author.objects.create(\n name=author_object[\"author\"].name\n )\n author = author_object[\"author\"].to_model(\n model=models.Author, overwrite=True, instance=skeleton\n )\n else:\n # or it's just a name\n author = models.Author.objects.create(name=match)\n book.authors.add(author)\n\n # create work, if needed\n if not book.parent_work:\n work_match = request.POST.get(\"parent_work\")\n if work_match and work_match != \"0\":\n work = get_object_or_404(models.Work, id=work_match)\n else:\n work = models.Work.objects.create(title=form.cleaned_data[\"title\"])\n work.authors.set(book.authors.all())\n book.parent_work = work\n\n for author_id in request.POST.getlist(\"remove_authors\"):\n book.authors.remove(author_id)\n\n # import cover, if requested\n url = request.POST.get(\"cover-url\")\n if url:\n image = set_cover_from_url(url)\n if image:\n book.cover.save(*image, save=False)\n\n # we don't tell the world when creating a book\n book.save(broadcast=False)\n\n return redirect(f\"/book/{book.id}\")\n", "path": "bookwyrm/views/books/edit_book.py"}], "after_files": [{"content": "\"\"\" the good stuff! the books! \"\"\"\nfrom re import sub, findall\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.postgres.search import SearchRank, SearchVector\nfrom django.db import transaction\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.http import require_POST\nfrom django.views import View\n\nfrom bookwyrm import book_search, forms, models\n\n# from bookwyrm.activitypub.base_activity import ActivityObject\nfrom bookwyrm.utils.isni import (\n find_authors_by_name,\n build_author_from_isni,\n augment_author_metadata,\n)\nfrom bookwyrm.views.helpers import get_edition\nfrom .books import set_cover_from_url\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass EditBook(View):\n \"\"\"edit a book\"\"\"\n\n def get(self, request, book_id):\n \"\"\"info about a book\"\"\"\n book = get_edition(book_id)\n if not book.description:\n book.description = book.parent_work.description\n data = {\"book\": book, \"form\": forms.EditionForm(instance=book)}\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n def post(self, request, book_id):\n \"\"\"edit a book cool\"\"\"\n book = get_object_or_404(models.Edition, id=book_id)\n form = forms.EditionForm(request.POST, request.FILES, instance=book)\n\n data = {\"book\": book, \"form\": form}\n if not form.is_valid():\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n data = add_authors(request, data)\n\n # either of the above cases requires additional confirmation\n if data.get(\"add_author\"):\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n remove_authors = request.POST.getlist(\"remove_authors\")\n for author_id in remove_authors:\n book.authors.remove(author_id)\n\n book = form.save(commit=False)\n\n url = request.POST.get(\"cover-url\")\n if url:\n image = set_cover_from_url(url)\n if image:\n book.cover.save(*image, save=False)\n\n book.save()\n return redirect(f\"/book/{book.id}\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass CreateBook(View):\n \"\"\"brand new book\"\"\"\n\n def get(self, request):\n \"\"\"info about a book\"\"\"\n data = {\"form\": forms.EditionForm()}\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n # pylint: disable=too-many-locals\n def post(self, request):\n \"\"\"create a new book\"\"\"\n # returns None if no match is found\n form = forms.EditionForm(request.POST, request.FILES)\n data = {\"form\": form}\n\n # collect data provided by the work or import item\n parent_work_id = request.POST.get(\"parent_work\")\n authors = None\n if request.POST.get(\"authors\"):\n author_ids = findall(r\"\\d+\", request.POST[\"authors\"])\n authors = models.Author.objects.filter(id__in=author_ids)\n\n # fake book in case we need to keep editing\n if parent_work_id:\n data[\"book\"] = {\n \"parent_work\": {\"id\": parent_work_id},\n \"authors\": authors,\n }\n\n if not form.is_valid():\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n data = add_authors(request, data)\n\n # check if this is an edition of an existing work\n author_text = \", \".join(data.get(\"add_author\", []))\n data[\"book_matches\"] = book_search.search(\n f'{form.cleaned_data.get(\"title\")} {author_text}',\n min_confidence=0.1,\n )[:5]\n\n # go to confirm mode\n if not parent_work_id or data.get(\"add_author\"):\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n with transaction.atomic():\n book = form.save()\n parent_work = get_object_or_404(models.Work, id=parent_work_id)\n book.parent_work = parent_work\n\n if authors:\n book.authors.add(*authors)\n\n url = request.POST.get(\"cover-url\")\n if url:\n image = set_cover_from_url(url)\n if image:\n book.cover.save(*image, save=False)\n\n book.save()\n return redirect(f\"/book/{book.id}\")\n\n\ndef add_authors(request, data):\n \"\"\"helper for adding authors\"\"\"\n add_author = [author for author in request.POST.getlist(\"add_author\") if author]\n if not add_author:\n return data\n\n data[\"add_author\"] = add_author\n data[\"author_matches\"] = []\n data[\"isni_matches\"] = []\n\n # creting a book or adding an author to a book needs another step\n data[\"confirm_mode\"] = True\n # this isn't preserved because it isn't part of the form obj\n data[\"remove_authors\"] = request.POST.getlist(\"remove_authors\")\n data[\"cover_url\"] = request.POST.get(\"cover-url\")\n\n for author in add_author:\n # filter out empty author fields\n if not author:\n continue\n # check for existing authors\n vector = SearchVector(\"name\", weight=\"A\") + SearchVector(\"aliases\", weight=\"B\")\n\n author_matches = (\n models.Author.objects.annotate(search=vector)\n .annotate(rank=SearchRank(vector, author))\n .filter(rank__gt=0.4)\n .order_by(\"-rank\")[:5]\n )\n\n isni_authors = find_authors_by_name(\n author, description=True\n ) # find matches from ISNI API\n\n # dedupe isni authors we already have in the DB\n exists = [\n i\n for i in isni_authors\n for a in author_matches\n if sub(r\"\\D\", \"\", str(i.isni)) == sub(r\"\\D\", \"\", str(a.isni))\n ]\n\n # pylint: disable=cell-var-from-loop\n matches = list(filter(lambda x: x not in exists, isni_authors))\n # combine existing and isni authors\n matches.extend(author_matches)\n\n data[\"author_matches\"].append(\n {\n \"name\": author.strip(),\n \"matches\": matches,\n \"existing_isnis\": exists,\n }\n )\n return data\n\n\n@require_POST\n@permission_required(\"bookwyrm.edit_book\", raise_exception=True)\ndef create_book_from_data(request):\n \"\"\"create a book with starter data\"\"\"\n author_ids = findall(r\"\\d+\", request.POST.get(\"authors\"))\n book = {\n \"parent_work\": {\"id\": request.POST.get(\"parent_work\")},\n \"authors\": models.Author.objects.filter(id__in=author_ids).all(),\n \"subjects\": request.POST.getlist(\"subjects\"),\n }\n\n data = {\"book\": book, \"form\": forms.EditionForm(request.POST)}\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass ConfirmEditBook(View):\n \"\"\"confirm edits to a book\"\"\"\n\n # pylint: disable=too-many-locals\n # pylint: disable=too-many-branches\n def post(self, request, book_id=None):\n \"\"\"edit a book cool\"\"\"\n # returns None if no match is found\n book = models.Edition.objects.filter(id=book_id).first()\n form = forms.EditionForm(request.POST, request.FILES, instance=book)\n\n data = {\"book\": book, \"form\": form}\n if not form.is_valid():\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n with transaction.atomic():\n # save book\n book = form.save()\n\n # add known authors\n authors = None\n if request.POST.get(\"authors\"):\n author_ids = findall(r\"\\d+\", request.POST[\"authors\"])\n authors = models.Author.objects.filter(id__in=author_ids)\n book.authors.add(*authors)\n\n # get or create author as needed\n for i in range(int(request.POST.get(\"author-match-count\", 0))):\n match = request.POST.get(f\"author_match-{i}\")\n if not match:\n return HttpResponseBadRequest()\n try:\n # if it's an int, it's an ID\n match = int(match)\n author = get_object_or_404(\n models.Author, id=request.POST[f\"author_match-{i}\"]\n )\n # update author metadata if the ISNI record is more complete\n isni = request.POST.get(f\"isni-for-{match}\", None)\n if isni is not None:\n augment_author_metadata(author, isni)\n except ValueError:\n # otherwise it's a new author\n isni_match = request.POST.get(f\"author_match-{i}\")\n author_object = build_author_from_isni(isni_match)\n # with author data class from isni id\n if \"author\" in author_object:\n skeleton = models.Author.objects.create(\n name=author_object[\"author\"].name\n )\n author = author_object[\"author\"].to_model(\n model=models.Author, overwrite=True, instance=skeleton\n )\n else:\n # or it's just a name\n author = models.Author.objects.create(name=match)\n book.authors.add(author)\n\n # create work, if needed\n if not book.parent_work:\n work_match = request.POST.get(\"parent_work\")\n if work_match and work_match != \"0\":\n work = get_object_or_404(models.Work, id=work_match)\n else:\n work = models.Work.objects.create(title=form.cleaned_data[\"title\"])\n work.authors.set(book.authors.all())\n book.parent_work = work\n\n for author_id in request.POST.getlist(\"remove_authors\"):\n book.authors.remove(author_id)\n\n # import cover, if requested\n url = request.POST.get(\"cover-url\")\n if url:\n image = set_cover_from_url(url)\n if image:\n book.cover.save(*image, save=False)\n\n # we don't tell the world when creating a book\n book.save(broadcast=False)\n\n return redirect(f\"/book/{book.id}\")\n", "path": "bookwyrm/views/books/edit_book.py"}]} | 3,628 | 85 |
gh_patches_debug_36428 | rasdani/github-patches | git_diff | googleapis__python-bigquery-258 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve guidance for streaming inserts
Suggesting we update the insert_rows.py code to showcase how to explicitly define a schema to avoid excessive calls to get_table().
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `samples/table_insert_rows.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 def table_insert_rows(table_id):
17
18 # [START bigquery_table_insert_rows]
19
20 from google.cloud import bigquery
21
22 # Construct a BigQuery client object.
23 client = bigquery.Client()
24
25 # TODO(developer): Set table_id to the ID of the model to fetch.
26 # table_id = "your-project.your_dataset.your_table"
27
28 table = client.get_table(table_id) # Make an API request.
29 rows_to_insert = [(u"Phred Phlyntstone", 32), (u"Wylma Phlyntstone", 29)]
30
31 errors = client.insert_rows(table, rows_to_insert) # Make an API request.
32 if errors == []:
33 print("New rows have been added.")
34 # [END bigquery_table_insert_rows]
35
```
Path: `samples/table_insert_rows_explicit_none_insert_ids.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 def table_insert_rows_explicit_none_insert_ids(table_id):
17
18 # [START bigquery_table_insert_rows_explicit_none_insert_ids]
19
20 from google.cloud import bigquery
21
22 # Construct a BigQuery client object.
23 client = bigquery.Client()
24
25 # TODO(developer): Set table_id to the ID of the model to fetch.
26 # table_id = "your-project.your_dataset.your_table"
27
28 table = client.get_table(table_id) # Make an API request.
29 rows_to_insert = [(u"Phred Phlyntstone", 32), (u"Wylma Phlyntstone", 29)]
30
31 errors = client.insert_rows(
32 table, rows_to_insert, row_ids=[None] * len(rows_to_insert)
33 ) # Make an API request.
34 if errors == []:
35 print("New rows have been added.")
36 # [END bigquery_table_insert_rows_explicit_none_insert_ids]
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/samples/table_insert_rows.py b/samples/table_insert_rows.py
--- a/samples/table_insert_rows.py
+++ b/samples/table_insert_rows.py
@@ -16,19 +16,22 @@
def table_insert_rows(table_id):
# [START bigquery_table_insert_rows]
-
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
- # TODO(developer): Set table_id to the ID of the model to fetch.
+ # TODO(developer): Set table_id to the ID of table to append to.
# table_id = "your-project.your_dataset.your_table"
- table = client.get_table(table_id) # Make an API request.
- rows_to_insert = [(u"Phred Phlyntstone", 32), (u"Wylma Phlyntstone", 29)]
+ rows_to_insert = [
+ {u"full_name": u"Phred Phlyntstone", u"age": 32},
+ {u"full_name": u"Wylma Phlyntstone", u"age": 29},
+ ]
- errors = client.insert_rows(table, rows_to_insert) # Make an API request.
+ errors = client.insert_rows_json(table_id, rows_to_insert) # Make an API request.
if errors == []:
print("New rows have been added.")
+ else:
+ print("Encountered errors while inserting rows: {}".format(errors))
# [END bigquery_table_insert_rows]
diff --git a/samples/table_insert_rows_explicit_none_insert_ids.py b/samples/table_insert_rows_explicit_none_insert_ids.py
--- a/samples/table_insert_rows_explicit_none_insert_ids.py
+++ b/samples/table_insert_rows_explicit_none_insert_ids.py
@@ -16,21 +16,24 @@
def table_insert_rows_explicit_none_insert_ids(table_id):
# [START bigquery_table_insert_rows_explicit_none_insert_ids]
-
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
- # TODO(developer): Set table_id to the ID of the model to fetch.
+ # TODO(developer): Set table_id to the ID of table to append to.
# table_id = "your-project.your_dataset.your_table"
- table = client.get_table(table_id) # Make an API request.
- rows_to_insert = [(u"Phred Phlyntstone", 32), (u"Wylma Phlyntstone", 29)]
+ rows_to_insert = [
+ {u"full_name": u"Phred Phlyntstone", u"age": 32},
+ {u"full_name": u"Wylma Phlyntstone", u"age": 29},
+ ]
- errors = client.insert_rows(
- table, rows_to_insert, row_ids=[None] * len(rows_to_insert)
+ errors = client.insert_rows_json(
+ table_id, rows_to_insert, row_ids=[None] * len(rows_to_insert)
) # Make an API request.
if errors == []:
print("New rows have been added.")
+ else:
+ print("Encountered errors while inserting rows: {}".format(errors))
# [END bigquery_table_insert_rows_explicit_none_insert_ids]
| {"golden_diff": "diff --git a/samples/table_insert_rows.py b/samples/table_insert_rows.py\n--- a/samples/table_insert_rows.py\n+++ b/samples/table_insert_rows.py\n@@ -16,19 +16,22 @@\n def table_insert_rows(table_id):\n \n # [START bigquery_table_insert_rows]\n-\n from google.cloud import bigquery\n \n # Construct a BigQuery client object.\n client = bigquery.Client()\n \n- # TODO(developer): Set table_id to the ID of the model to fetch.\n+ # TODO(developer): Set table_id to the ID of table to append to.\n # table_id = \"your-project.your_dataset.your_table\"\n \n- table = client.get_table(table_id) # Make an API request.\n- rows_to_insert = [(u\"Phred Phlyntstone\", 32), (u\"Wylma Phlyntstone\", 29)]\n+ rows_to_insert = [\n+ {u\"full_name\": u\"Phred Phlyntstone\", u\"age\": 32},\n+ {u\"full_name\": u\"Wylma Phlyntstone\", u\"age\": 29},\n+ ]\n \n- errors = client.insert_rows(table, rows_to_insert) # Make an API request.\n+ errors = client.insert_rows_json(table_id, rows_to_insert) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n+ else:\n+ print(\"Encountered errors while inserting rows: {}\".format(errors))\n # [END bigquery_table_insert_rows]\ndiff --git a/samples/table_insert_rows_explicit_none_insert_ids.py b/samples/table_insert_rows_explicit_none_insert_ids.py\n--- a/samples/table_insert_rows_explicit_none_insert_ids.py\n+++ b/samples/table_insert_rows_explicit_none_insert_ids.py\n@@ -16,21 +16,24 @@\n def table_insert_rows_explicit_none_insert_ids(table_id):\n \n # [START bigquery_table_insert_rows_explicit_none_insert_ids]\n-\n from google.cloud import bigquery\n \n # Construct a BigQuery client object.\n client = bigquery.Client()\n \n- # TODO(developer): Set table_id to the ID of the model to fetch.\n+ # TODO(developer): Set table_id to the ID of table to append to.\n # table_id = \"your-project.your_dataset.your_table\"\n \n- table = client.get_table(table_id) # Make an API request.\n- rows_to_insert = [(u\"Phred Phlyntstone\", 32), (u\"Wylma Phlyntstone\", 29)]\n+ rows_to_insert = [\n+ {u\"full_name\": u\"Phred Phlyntstone\", u\"age\": 32},\n+ {u\"full_name\": u\"Wylma Phlyntstone\", u\"age\": 29},\n+ ]\n \n- errors = client.insert_rows(\n- table, rows_to_insert, row_ids=[None] * len(rows_to_insert)\n+ errors = client.insert_rows_json(\n+ table_id, rows_to_insert, row_ids=[None] * len(rows_to_insert)\n ) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n+ else:\n+ print(\"Encountered errors while inserting rows: {}\".format(errors))\n # [END bigquery_table_insert_rows_explicit_none_insert_ids]\n", "issue": "Improve guidance for streaming inserts\nSuggesting we update the insert_rows.py code to showcase how to explicitly define a schema to avoid excessive calls to get_table().\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef table_insert_rows(table_id):\n\n # [START bigquery_table_insert_rows]\n\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set table_id to the ID of the model to fetch.\n # table_id = \"your-project.your_dataset.your_table\"\n\n table = client.get_table(table_id) # Make an API request.\n rows_to_insert = [(u\"Phred Phlyntstone\", 32), (u\"Wylma Phlyntstone\", 29)]\n\n errors = client.insert_rows(table, rows_to_insert) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n # [END bigquery_table_insert_rows]\n", "path": "samples/table_insert_rows.py"}, {"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef table_insert_rows_explicit_none_insert_ids(table_id):\n\n # [START bigquery_table_insert_rows_explicit_none_insert_ids]\n\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set table_id to the ID of the model to fetch.\n # table_id = \"your-project.your_dataset.your_table\"\n\n table = client.get_table(table_id) # Make an API request.\n rows_to_insert = [(u\"Phred Phlyntstone\", 32), (u\"Wylma Phlyntstone\", 29)]\n\n errors = client.insert_rows(\n table, rows_to_insert, row_ids=[None] * len(rows_to_insert)\n ) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n # [END bigquery_table_insert_rows_explicit_none_insert_ids]\n", "path": "samples/table_insert_rows_explicit_none_insert_ids.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef table_insert_rows(table_id):\n\n # [START bigquery_table_insert_rows]\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set table_id to the ID of table to append to.\n # table_id = \"your-project.your_dataset.your_table\"\n\n rows_to_insert = [\n {u\"full_name\": u\"Phred Phlyntstone\", u\"age\": 32},\n {u\"full_name\": u\"Wylma Phlyntstone\", u\"age\": 29},\n ]\n\n errors = client.insert_rows_json(table_id, rows_to_insert) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n else:\n print(\"Encountered errors while inserting rows: {}\".format(errors))\n # [END bigquery_table_insert_rows]\n", "path": "samples/table_insert_rows.py"}, {"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef table_insert_rows_explicit_none_insert_ids(table_id):\n\n # [START bigquery_table_insert_rows_explicit_none_insert_ids]\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set table_id to the ID of table to append to.\n # table_id = \"your-project.your_dataset.your_table\"\n\n rows_to_insert = [\n {u\"full_name\": u\"Phred Phlyntstone\", u\"age\": 32},\n {u\"full_name\": u\"Wylma Phlyntstone\", u\"age\": 29},\n ]\n\n errors = client.insert_rows_json(\n table_id, rows_to_insert, row_ids=[None] * len(rows_to_insert)\n ) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n else:\n print(\"Encountered errors while inserting rows: {}\".format(errors))\n # [END bigquery_table_insert_rows_explicit_none_insert_ids]\n", "path": "samples/table_insert_rows_explicit_none_insert_ids.py"}]} | 1,078 | 750 |
gh_patches_debug_23845 | rasdani/github-patches | git_diff | xorbitsai__inference-349 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: chatglm max_length doesn't work
### Describe the bug
The gen config has `max_tokens`, but chatglm requires `max_length`.
### To Reproduce
To help us to reproduce this bug, please provide information below:
1. Your Python version.
2. The version of xinference you use.
3. Versions of crucial packages.
4. Full stack of the error.
5. Minimized code to reproduce the error.
### Expected behavior
A clear and concise description of what you expected to happen.
### Additional context
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xinference/model/llm/ggml/chatglm.py`
Content:
```
1 # Copyright 2022-2023 XProbe Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 import os
17 import time
18 import uuid
19 from pathlib import Path
20 from typing import TYPE_CHECKING, Iterator, List, Optional, TypedDict, Union
21
22 from ....types import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
23 from .. import LLMFamilyV1, LLMSpecV1
24 from ..core import LLM
25
26 if TYPE_CHECKING:
27 from chatglm_cpp import Pipeline
28
29
30 logger = logging.getLogger(__name__)
31
32
33 class ChatglmCppModelConfig(TypedDict, total=False):
34 pass
35
36
37 class ChatglmCppGenerateConfig(TypedDict, total=False):
38 max_tokens: int
39 top_p: float
40 temperature: float
41 stream: bool
42
43
44 class ChatglmCppChatModel(LLM):
45 def __init__(
46 self,
47 model_uid: str,
48 model_family: "LLMFamilyV1",
49 model_spec: "LLMSpecV1",
50 quantization: str,
51 model_path: str,
52 model_config: Optional[ChatglmCppModelConfig] = None,
53 ):
54 super().__init__(model_uid, model_family, model_spec, quantization, model_path)
55 self._llm: Optional["Pipeline"] = None
56
57 # just a placeholder for now as the chatglm_cpp repo doesn't support model config.
58 self._model_config = model_config
59
60 @classmethod
61 def _sanitize_generate_config(
62 cls,
63 chatglmcpp_generate_config: Optional[ChatglmCppGenerateConfig],
64 ) -> ChatglmCppGenerateConfig:
65 if chatglmcpp_generate_config is None:
66 chatglmcpp_generate_config = ChatglmCppGenerateConfig()
67 chatglmcpp_generate_config.setdefault("stream", False)
68 return chatglmcpp_generate_config
69
70 def load(self):
71 try:
72 import chatglm_cpp
73 except ImportError:
74 error_message = "Failed to import module 'chatglm_cpp'"
75 installation_guide = [
76 "Please make sure 'chatglm_cpp' is installed. ",
77 "You can install it by running the following command in the terminal:\n",
78 "pip install git+https://github.com/li-plus/chatglm.cpp.git@main\n\n",
79 "Or visit the original git repo if the above command fails:\n",
80 "https://github.com/li-plus/chatglm.cpp",
81 ]
82
83 raise ImportError(f"{error_message}\n\n{''.join(installation_guide)}")
84
85 model_file_path = os.path.join(
86 self.model_path,
87 self.model_spec.model_file_name_template.format(
88 quantization=self.quantization
89 ),
90 )
91
92 # handle legacy cache.
93 legacy_model_file_path = os.path.join(self.model_path, "model.bin")
94 if os.path.exists(legacy_model_file_path):
95 model_file_path = legacy_model_file_path
96
97 self._llm = chatglm_cpp.Pipeline(Path(model_file_path))
98
99 @classmethod
100 def match(cls, llm_family: "LLMFamilyV1", llm_spec: "LLMSpecV1") -> bool:
101 if llm_spec.model_format != "ggmlv3":
102 return False
103 if "chatglm" not in llm_family.model_name:
104 return False
105 if "chat" not in llm_family.model_ability:
106 return False
107 return True
108
109 @staticmethod
110 def _convert_raw_text_chunks_to_chat(
111 tokens: Iterator[str], model_name: str
112 ) -> Iterator[ChatCompletionChunk]:
113 yield {
114 "id": "chat" + f"cmpl-{str(uuid.uuid4())}",
115 "model": model_name,
116 "object": "chat.completion.chunk",
117 "created": int(time.time()),
118 "choices": [
119 {
120 "index": 0,
121 "delta": {
122 "role": "assistant",
123 },
124 "finish_reason": None,
125 }
126 ],
127 }
128 for token in enumerate(tokens):
129 yield {
130 "id": "chat" + f"cmpl-{str(uuid.uuid4())}",
131 "model": model_name,
132 "object": "chat.completion.chunk",
133 "created": int(time.time()),
134 "choices": [
135 {
136 "index": 0,
137 "delta": {
138 "content": token[1],
139 },
140 "finish_reason": None,
141 }
142 ],
143 }
144
145 @staticmethod
146 def _convert_raw_text_completion_to_chat(
147 text: str, model_name: str
148 ) -> ChatCompletion:
149 return {
150 "id": "chat" + f"cmpl-{str(uuid.uuid4())}",
151 "model": model_name,
152 "object": "chat.completion",
153 "created": int(time.time()),
154 "choices": [
155 {
156 "index": 0,
157 "message": {
158 "role": "assistant",
159 "content": text,
160 },
161 "finish_reason": None,
162 }
163 ],
164 "usage": {
165 "prompt_tokens": -1,
166 "completion_tokens": -1,
167 "total_tokens": -1,
168 },
169 }
170
171 def chat(
172 self,
173 prompt: str,
174 chat_history: Optional[List[ChatCompletionMessage]] = None,
175 generate_config: Optional[ChatglmCppGenerateConfig] = None,
176 ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
177 if chat_history is not None:
178 chat_history_list = [message["content"] for message in chat_history]
179 else:
180 chat_history_list = []
181
182 chat_history_list.append(prompt)
183 logger.debug("Full conversation history:\n%s", str(chat_history_list))
184
185 generate_config = self._sanitize_generate_config(generate_config)
186
187 assert self._llm is not None
188
189 if generate_config.get("stream", False):
190 it = self._llm.stream_chat(
191 chat_history_list,
192 )
193 assert not isinstance(it, str)
194 return self._convert_raw_text_chunks_to_chat(it, self.model_uid)
195 else:
196 c = self._llm.chat(
197 chat_history_list,
198 )
199 assert not isinstance(c, Iterator)
200 return self._convert_raw_text_completion_to_chat(c, self.model_uid)
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xinference/model/llm/ggml/chatglm.py b/xinference/model/llm/ggml/chatglm.py
--- a/xinference/model/llm/ggml/chatglm.py
+++ b/xinference/model/llm/ggml/chatglm.py
@@ -183,18 +183,23 @@
logger.debug("Full conversation history:\n%s", str(chat_history_list))
generate_config = self._sanitize_generate_config(generate_config)
+ params = dict(generate_config)
+ if "max_tokens" in params:
+ params["max_length"] = params.pop("max_tokens")
assert self._llm is not None
- if generate_config.get("stream", False):
+ if params.pop("stream", False):
it = self._llm.stream_chat(
chat_history_list,
+ **params,
)
assert not isinstance(it, str)
return self._convert_raw_text_chunks_to_chat(it, self.model_uid)
else:
c = self._llm.chat(
chat_history_list,
+ **params,
)
assert not isinstance(c, Iterator)
return self._convert_raw_text_completion_to_chat(c, self.model_uid)
| {"golden_diff": "diff --git a/xinference/model/llm/ggml/chatglm.py b/xinference/model/llm/ggml/chatglm.py\n--- a/xinference/model/llm/ggml/chatglm.py\n+++ b/xinference/model/llm/ggml/chatglm.py\n@@ -183,18 +183,23 @@\n logger.debug(\"Full conversation history:\\n%s\", str(chat_history_list))\n \n generate_config = self._sanitize_generate_config(generate_config)\n+ params = dict(generate_config)\n+ if \"max_tokens\" in params:\n+ params[\"max_length\"] = params.pop(\"max_tokens\")\n \n assert self._llm is not None\n \n- if generate_config.get(\"stream\", False):\n+ if params.pop(\"stream\", False):\n it = self._llm.stream_chat(\n chat_history_list,\n+ **params,\n )\n assert not isinstance(it, str)\n return self._convert_raw_text_chunks_to_chat(it, self.model_uid)\n else:\n c = self._llm.chat(\n chat_history_list,\n+ **params,\n )\n assert not isinstance(c, Iterator)\n return self._convert_raw_text_completion_to_chat(c, self.model_uid)\n", "issue": "BUG: chatglm max_length doesn't work\n### Describe the bug\r\nThe gen config has `max_tokens`, but chatglm requires `max_length`.\r\n\r\n### To Reproduce\r\nTo help us to reproduce this bug, please provide information below:\r\n\r\n1. Your Python version.\r\n2. The version of xinference you use.\r\n3. Versions of crucial packages.\r\n4. Full stack of the error.\r\n5. Minimized code to reproduce the error.\r\n\r\n### Expected behavior\r\nA clear and concise description of what you expected to happen.\r\n\r\n### Additional context\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport time\nimport uuid\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Iterator, List, Optional, TypedDict, Union\n\nfrom ....types import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage\nfrom .. import LLMFamilyV1, LLMSpecV1\nfrom ..core import LLM\n\nif TYPE_CHECKING:\n from chatglm_cpp import Pipeline\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ChatglmCppModelConfig(TypedDict, total=False):\n pass\n\n\nclass ChatglmCppGenerateConfig(TypedDict, total=False):\n max_tokens: int\n top_p: float\n temperature: float\n stream: bool\n\n\nclass ChatglmCppChatModel(LLM):\n def __init__(\n self,\n model_uid: str,\n model_family: \"LLMFamilyV1\",\n model_spec: \"LLMSpecV1\",\n quantization: str,\n model_path: str,\n model_config: Optional[ChatglmCppModelConfig] = None,\n ):\n super().__init__(model_uid, model_family, model_spec, quantization, model_path)\n self._llm: Optional[\"Pipeline\"] = None\n\n # just a placeholder for now as the chatglm_cpp repo doesn't support model config.\n self._model_config = model_config\n\n @classmethod\n def _sanitize_generate_config(\n cls,\n chatglmcpp_generate_config: Optional[ChatglmCppGenerateConfig],\n ) -> ChatglmCppGenerateConfig:\n if chatglmcpp_generate_config is None:\n chatglmcpp_generate_config = ChatglmCppGenerateConfig()\n chatglmcpp_generate_config.setdefault(\"stream\", False)\n return chatglmcpp_generate_config\n\n def load(self):\n try:\n import chatglm_cpp\n except ImportError:\n error_message = \"Failed to import module 'chatglm_cpp'\"\n installation_guide = [\n \"Please make sure 'chatglm_cpp' is installed. \",\n \"You can install it by running the following command in the terminal:\\n\",\n \"pip install git+https://github.com/li-plus/chatglm.cpp.git@main\\n\\n\",\n \"Or visit the original git repo if the above command fails:\\n\",\n \"https://github.com/li-plus/chatglm.cpp\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n model_file_path = os.path.join(\n self.model_path,\n self.model_spec.model_file_name_template.format(\n quantization=self.quantization\n ),\n )\n\n # handle legacy cache.\n legacy_model_file_path = os.path.join(self.model_path, \"model.bin\")\n if os.path.exists(legacy_model_file_path):\n model_file_path = legacy_model_file_path\n\n self._llm = chatglm_cpp.Pipeline(Path(model_file_path))\n\n @classmethod\n def match(cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\") -> bool:\n if llm_spec.model_format != \"ggmlv3\":\n return False\n if \"chatglm\" not in llm_family.model_name:\n return False\n if \"chat\" not in llm_family.model_ability:\n return False\n return True\n\n @staticmethod\n def _convert_raw_text_chunks_to_chat(\n tokens: Iterator[str], model_name: str\n ) -> Iterator[ChatCompletionChunk]:\n yield {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion.chunk\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\n \"role\": \"assistant\",\n },\n \"finish_reason\": None,\n }\n ],\n }\n for token in enumerate(tokens):\n yield {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion.chunk\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\n \"content\": token[1],\n },\n \"finish_reason\": None,\n }\n ],\n }\n\n @staticmethod\n def _convert_raw_text_completion_to_chat(\n text: str, model_name: str\n ) -> ChatCompletion:\n return {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": text,\n },\n \"finish_reason\": None,\n }\n ],\n \"usage\": {\n \"prompt_tokens\": -1,\n \"completion_tokens\": -1,\n \"total_tokens\": -1,\n },\n }\n\n def chat(\n self,\n prompt: str,\n chat_history: Optional[List[ChatCompletionMessage]] = None,\n generate_config: Optional[ChatglmCppGenerateConfig] = None,\n ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:\n if chat_history is not None:\n chat_history_list = [message[\"content\"] for message in chat_history]\n else:\n chat_history_list = []\n\n chat_history_list.append(prompt)\n logger.debug(\"Full conversation history:\\n%s\", str(chat_history_list))\n\n generate_config = self._sanitize_generate_config(generate_config)\n\n assert self._llm is not None\n\n if generate_config.get(\"stream\", False):\n it = self._llm.stream_chat(\n chat_history_list,\n )\n assert not isinstance(it, str)\n return self._convert_raw_text_chunks_to_chat(it, self.model_uid)\n else:\n c = self._llm.chat(\n chat_history_list,\n )\n assert not isinstance(c, Iterator)\n return self._convert_raw_text_completion_to_chat(c, self.model_uid)\n", "path": "xinference/model/llm/ggml/chatglm.py"}], "after_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport time\nimport uuid\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Iterator, List, Optional, TypedDict, Union\n\nfrom ....types import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage\nfrom .. import LLMFamilyV1, LLMSpecV1\nfrom ..core import LLM\n\nif TYPE_CHECKING:\n from chatglm_cpp import Pipeline\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ChatglmCppModelConfig(TypedDict, total=False):\n pass\n\n\nclass ChatglmCppGenerateConfig(TypedDict, total=False):\n max_tokens: int\n top_p: float\n temperature: float\n stream: bool\n\n\nclass ChatglmCppChatModel(LLM):\n def __init__(\n self,\n model_uid: str,\n model_family: \"LLMFamilyV1\",\n model_spec: \"LLMSpecV1\",\n quantization: str,\n model_path: str,\n model_config: Optional[ChatglmCppModelConfig] = None,\n ):\n super().__init__(model_uid, model_family, model_spec, quantization, model_path)\n self._llm: Optional[\"Pipeline\"] = None\n\n # just a placeholder for now as the chatglm_cpp repo doesn't support model config.\n self._model_config = model_config\n\n @classmethod\n def _sanitize_generate_config(\n cls,\n chatglmcpp_generate_config: Optional[ChatglmCppGenerateConfig],\n ) -> ChatglmCppGenerateConfig:\n if chatglmcpp_generate_config is None:\n chatglmcpp_generate_config = ChatglmCppGenerateConfig()\n chatglmcpp_generate_config.setdefault(\"stream\", False)\n return chatglmcpp_generate_config\n\n def load(self):\n try:\n import chatglm_cpp\n except ImportError:\n error_message = \"Failed to import module 'chatglm_cpp'\"\n installation_guide = [\n \"Please make sure 'chatglm_cpp' is installed. \",\n \"You can install it by running the following command in the terminal:\\n\",\n \"pip install git+https://github.com/li-plus/chatglm.cpp.git@main\\n\\n\",\n \"Or visit the original git repo if the above command fails:\\n\",\n \"https://github.com/li-plus/chatglm.cpp\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n model_file_path = os.path.join(\n self.model_path,\n self.model_spec.model_file_name_template.format(\n quantization=self.quantization\n ),\n )\n\n # handle legacy cache.\n legacy_model_file_path = os.path.join(self.model_path, \"model.bin\")\n if os.path.exists(legacy_model_file_path):\n model_file_path = legacy_model_file_path\n\n self._llm = chatglm_cpp.Pipeline(Path(model_file_path))\n\n @classmethod\n def match(cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\") -> bool:\n if llm_spec.model_format != \"ggmlv3\":\n return False\n if \"chatglm\" not in llm_family.model_name:\n return False\n if \"chat\" not in llm_family.model_ability:\n return False\n return True\n\n @staticmethod\n def _convert_raw_text_chunks_to_chat(\n tokens: Iterator[str], model_name: str\n ) -> Iterator[ChatCompletionChunk]:\n yield {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion.chunk\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\n \"role\": \"assistant\",\n },\n \"finish_reason\": None,\n }\n ],\n }\n for token in enumerate(tokens):\n yield {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion.chunk\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\n \"content\": token[1],\n },\n \"finish_reason\": None,\n }\n ],\n }\n\n @staticmethod\n def _convert_raw_text_completion_to_chat(\n text: str, model_name: str\n ) -> ChatCompletion:\n return {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": text,\n },\n \"finish_reason\": None,\n }\n ],\n \"usage\": {\n \"prompt_tokens\": -1,\n \"completion_tokens\": -1,\n \"total_tokens\": -1,\n },\n }\n\n def chat(\n self,\n prompt: str,\n chat_history: Optional[List[ChatCompletionMessage]] = None,\n generate_config: Optional[ChatglmCppGenerateConfig] = None,\n ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:\n if chat_history is not None:\n chat_history_list = [message[\"content\"] for message in chat_history]\n else:\n chat_history_list = []\n\n chat_history_list.append(prompt)\n logger.debug(\"Full conversation history:\\n%s\", str(chat_history_list))\n\n generate_config = self._sanitize_generate_config(generate_config)\n params = dict(generate_config)\n if \"max_tokens\" in params:\n params[\"max_length\"] = params.pop(\"max_tokens\")\n\n assert self._llm is not None\n\n if params.pop(\"stream\", False):\n it = self._llm.stream_chat(\n chat_history_list,\n **params,\n )\n assert not isinstance(it, str)\n return self._convert_raw_text_chunks_to_chat(it, self.model_uid)\n else:\n c = self._llm.chat(\n chat_history_list,\n **params,\n )\n assert not isinstance(c, Iterator)\n return self._convert_raw_text_completion_to_chat(c, self.model_uid)\n", "path": "xinference/model/llm/ggml/chatglm.py"}]} | 2,329 | 269 |
gh_patches_debug_3376 | rasdani/github-patches | git_diff | getsentry__sentry-python-554 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Why is tornado.application logger ignored?
It appears Sentry do not report any logs from tornado.application logger
https://github.com/getsentry/sentry-python/blob/master/sentry_sdk/integrations/tornado.py#L53
ignore_logger("tornado.application")
However, it seems the default logger used by Tornado to report unhandled exceptions:
https://github.com/tornadoweb/tornado/blob/682ff8dec2d095b339b62d875252be932ae4a5f2/tornado/web.py#L1762
def log_exception(
self,
typ: "Optional[Type[BaseException]]",
value: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = [value.status_code, self._request_summary()] + list(value.args)
gen_log.warning(format, *args)
else:
app_log.error( # type: ignore
"Uncaught exception %s\n%r",
self._request_summary(),
self.request,
exc_info=(typ, value, tb),
)
How are we supposed to propagate uncaught exceptions in a Tornado based application to Sentry with Tornado integration?
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/tornado.py`
Content:
```
1 import weakref
2 from inspect import iscoroutinefunction
3
4 from sentry_sdk.hub import Hub, _should_send_default_pii
5 from sentry_sdk.utils import (
6 HAS_REAL_CONTEXTVARS,
7 event_from_exception,
8 capture_internal_exceptions,
9 transaction_from_function,
10 )
11 from sentry_sdk.integrations import Integration
12 from sentry_sdk.integrations._wsgi_common import (
13 RequestExtractor,
14 _filter_headers,
15 _is_json_content_type,
16 )
17 from sentry_sdk.integrations.logging import ignore_logger
18 from sentry_sdk._compat import iteritems
19
20 from tornado.web import RequestHandler, HTTPError
21 from tornado.gen import coroutine
22
23 from sentry_sdk._types import MYPY
24
25 if MYPY:
26 from typing import Any
27 from typing import Optional
28 from typing import Dict
29 from typing import Callable
30
31 from sentry_sdk._types import EventProcessor
32
33
34 class TornadoIntegration(Integration):
35 identifier = "tornado"
36
37 @staticmethod
38 def setup_once():
39 # type: () -> None
40 import tornado
41
42 tornado_version = getattr(tornado, "version_info", None)
43 if tornado_version is None or tornado_version < (5, 0):
44 raise RuntimeError("Tornado 5+ required")
45
46 if not HAS_REAL_CONTEXTVARS:
47 # Tornado is async. We better have contextvars or we're going to leak
48 # state between requests.
49 raise RuntimeError(
50 "The tornado integration for Sentry requires Python 3.6+ or the aiocontextvars package"
51 )
52
53 ignore_logger("tornado.application")
54 ignore_logger("tornado.access")
55
56 old_execute = RequestHandler._execute
57
58 awaitable = iscoroutinefunction(old_execute)
59
60 if awaitable:
61 # Starting Tornado 6 RequestHandler._execute method is a standard Python coroutine (async/await)
62 # In that case our method should be a coroutine function too
63 async def sentry_execute_request_handler(self, *args, **kwargs):
64 # type: (Any, *Any, **Any) -> Any
65 hub = Hub.current
66 integration = hub.get_integration(TornadoIntegration)
67 if integration is None:
68 return await old_execute(self, *args, **kwargs)
69
70 weak_handler = weakref.ref(self)
71
72 with Hub(hub) as hub:
73 with hub.configure_scope() as scope:
74 scope.clear_breadcrumbs()
75 scope.add_event_processor(_make_event_processor(weak_handler))
76 return await old_execute(self, *args, **kwargs)
77
78 else:
79
80 @coroutine # type: ignore
81 def sentry_execute_request_handler(self, *args, **kwargs):
82 # type: (RequestHandler, *Any, **Any) -> Any
83 hub = Hub.current
84 integration = hub.get_integration(TornadoIntegration)
85 if integration is None:
86 return old_execute(self, *args, **kwargs)
87
88 weak_handler = weakref.ref(self)
89
90 with Hub(hub) as hub:
91 with hub.configure_scope() as scope:
92 scope.add_event_processor(_make_event_processor(weak_handler))
93 result = yield from old_execute(self, *args, **kwargs)
94 return result
95
96 RequestHandler._execute = sentry_execute_request_handler
97
98 old_log_exception = RequestHandler.log_exception
99
100 def sentry_log_exception(self, ty, value, tb, *args, **kwargs):
101 # type: (Any, type, BaseException, Any, *Any, **Any) -> Optional[Any]
102 _capture_exception(ty, value, tb)
103 return old_log_exception(self, ty, value, tb, *args, **kwargs)
104
105 RequestHandler.log_exception = sentry_log_exception
106
107
108 def _capture_exception(ty, value, tb):
109 # type: (type, BaseException, Any) -> None
110 hub = Hub.current
111 if hub.get_integration(TornadoIntegration) is None:
112 return
113 if isinstance(value, HTTPError):
114 return
115
116 # If an integration is there, a client has to be there.
117 client = hub.client # type: Any
118
119 event, hint = event_from_exception(
120 (ty, value, tb),
121 client_options=client.options,
122 mechanism={"type": "tornado", "handled": False},
123 )
124
125 hub.capture_event(event, hint=hint)
126
127
128 def _make_event_processor(weak_handler):
129 # type: (Callable[[], RequestHandler]) -> EventProcessor
130 def tornado_processor(event, hint):
131 # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
132 handler = weak_handler()
133 if handler is None:
134 return event
135
136 request = handler.request
137
138 with capture_internal_exceptions():
139 method = getattr(handler, handler.request.method.lower())
140 event["transaction"] = transaction_from_function(method)
141
142 with capture_internal_exceptions():
143 extractor = TornadoRequestExtractor(request)
144 extractor.extract_into_event(event)
145
146 request_info = event["request"]
147
148 request_info["url"] = "%s://%s%s" % (
149 request.protocol,
150 request.host,
151 request.path,
152 )
153
154 request_info["query_string"] = request.query
155 request_info["method"] = request.method
156 request_info["env"] = {"REMOTE_ADDR": request.remote_ip}
157 request_info["headers"] = _filter_headers(dict(request.headers))
158
159 with capture_internal_exceptions():
160 if handler.current_user and _should_send_default_pii():
161 event.setdefault("user", {})["is_authenticated"] = True
162
163 return event
164
165 return tornado_processor
166
167
168 class TornadoRequestExtractor(RequestExtractor):
169 def content_length(self):
170 # type: () -> int
171 if self.request.body is None:
172 return 0
173 return len(self.request.body)
174
175 def cookies(self):
176 # type: () -> Dict[str, str]
177 return {k: v.value for k, v in iteritems(self.request.cookies)}
178
179 def raw_data(self):
180 # type: () -> bytes
181 return self.request.body
182
183 def form(self):
184 # type: () -> Dict[str, Any]
185 return {
186 k: [v.decode("latin1", "replace") for v in vs]
187 for k, vs in iteritems(self.request.body_arguments)
188 }
189
190 def is_json(self):
191 # type: () -> bool
192 return _is_json_content_type(self.request.headers.get("content-type"))
193
194 def files(self):
195 # type: () -> Dict[str, Any]
196 return {k: v[0] for k, v in iteritems(self.request.files) if v}
197
198 def size_of_file(self, file):
199 # type: (Any) -> int
200 return len(file.body or ())
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/integrations/tornado.py b/sentry_sdk/integrations/tornado.py
--- a/sentry_sdk/integrations/tornado.py
+++ b/sentry_sdk/integrations/tornado.py
@@ -50,7 +50,6 @@
"The tornado integration for Sentry requires Python 3.6+ or the aiocontextvars package"
)
- ignore_logger("tornado.application")
ignore_logger("tornado.access")
old_execute = RequestHandler._execute
| {"golden_diff": "diff --git a/sentry_sdk/integrations/tornado.py b/sentry_sdk/integrations/tornado.py\n--- a/sentry_sdk/integrations/tornado.py\n+++ b/sentry_sdk/integrations/tornado.py\n@@ -50,7 +50,6 @@\n \"The tornado integration for Sentry requires Python 3.6+ or the aiocontextvars package\"\n )\n \n- ignore_logger(\"tornado.application\")\n ignore_logger(\"tornado.access\")\n \n old_execute = RequestHandler._execute\n", "issue": "Why is tornado.application logger ignored?\nIt appears Sentry do not report any logs from tornado.application logger\r\n\r\nhttps://github.com/getsentry/sentry-python/blob/master/sentry_sdk/integrations/tornado.py#L53\r\n ignore_logger(\"tornado.application\")\r\n\r\nHowever, it seems the default logger used by Tornado to report unhandled exceptions:\r\n\r\nhttps://github.com/tornadoweb/tornado/blob/682ff8dec2d095b339b62d875252be932ae4a5f2/tornado/web.py#L1762\r\n\r\n def log_exception(\r\n self,\r\n typ: \"Optional[Type[BaseException]]\",\r\n value: Optional[BaseException],\r\n tb: Optional[TracebackType],\r\n ) -> None:\r\n \"\"\"Override to customize logging of uncaught exceptions.\r\n By default logs instances of `HTTPError` as warnings without\r\n stack traces (on the ``tornado.general`` logger), and all\r\n other exceptions as errors with stack traces (on the\r\n ``tornado.application`` logger).\r\n .. versionadded:: 3.1\r\n \"\"\"\r\n if isinstance(value, HTTPError):\r\n if value.log_message:\r\n format = \"%d %s: \" + value.log_message\r\n args = [value.status_code, self._request_summary()] + list(value.args)\r\n gen_log.warning(format, *args)\r\n else:\r\n app_log.error( # type: ignore\r\n \"Uncaught exception %s\\n%r\",\r\n self._request_summary(),\r\n self.request,\r\n exc_info=(typ, value, tb),\r\n )\r\n\r\nHow are we supposed to propagate uncaught exceptions in a Tornado based application to Sentry with Tornado integration?\r\n\r\nThanks\n", "before_files": [{"content": "import weakref\nfrom inspect import iscoroutinefunction\n\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.utils import (\n HAS_REAL_CONTEXTVARS,\n event_from_exception,\n capture_internal_exceptions,\n transaction_from_function,\n)\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations._wsgi_common import (\n RequestExtractor,\n _filter_headers,\n _is_json_content_type,\n)\nfrom sentry_sdk.integrations.logging import ignore_logger\nfrom sentry_sdk._compat import iteritems\n\nfrom tornado.web import RequestHandler, HTTPError\nfrom tornado.gen import coroutine\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Optional\n from typing import Dict\n from typing import Callable\n\n from sentry_sdk._types import EventProcessor\n\n\nclass TornadoIntegration(Integration):\n identifier = \"tornado\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n import tornado\n\n tornado_version = getattr(tornado, \"version_info\", None)\n if tornado_version is None or tornado_version < (5, 0):\n raise RuntimeError(\"Tornado 5+ required\")\n\n if not HAS_REAL_CONTEXTVARS:\n # Tornado is async. We better have contextvars or we're going to leak\n # state between requests.\n raise RuntimeError(\n \"The tornado integration for Sentry requires Python 3.6+ or the aiocontextvars package\"\n )\n\n ignore_logger(\"tornado.application\")\n ignore_logger(\"tornado.access\")\n\n old_execute = RequestHandler._execute\n\n awaitable = iscoroutinefunction(old_execute)\n\n if awaitable:\n # Starting Tornado 6 RequestHandler._execute method is a standard Python coroutine (async/await)\n # In that case our method should be a coroutine function too\n async def sentry_execute_request_handler(self, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(TornadoIntegration)\n if integration is None:\n return await old_execute(self, *args, **kwargs)\n\n weak_handler = weakref.ref(self)\n\n with Hub(hub) as hub:\n with hub.configure_scope() as scope:\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_event_processor(weak_handler))\n return await old_execute(self, *args, **kwargs)\n\n else:\n\n @coroutine # type: ignore\n def sentry_execute_request_handler(self, *args, **kwargs):\n # type: (RequestHandler, *Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(TornadoIntegration)\n if integration is None:\n return old_execute(self, *args, **kwargs)\n\n weak_handler = weakref.ref(self)\n\n with Hub(hub) as hub:\n with hub.configure_scope() as scope:\n scope.add_event_processor(_make_event_processor(weak_handler))\n result = yield from old_execute(self, *args, **kwargs)\n return result\n\n RequestHandler._execute = sentry_execute_request_handler\n\n old_log_exception = RequestHandler.log_exception\n\n def sentry_log_exception(self, ty, value, tb, *args, **kwargs):\n # type: (Any, type, BaseException, Any, *Any, **Any) -> Optional[Any]\n _capture_exception(ty, value, tb)\n return old_log_exception(self, ty, value, tb, *args, **kwargs)\n\n RequestHandler.log_exception = sentry_log_exception\n\n\ndef _capture_exception(ty, value, tb):\n # type: (type, BaseException, Any) -> None\n hub = Hub.current\n if hub.get_integration(TornadoIntegration) is None:\n return\n if isinstance(value, HTTPError):\n return\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n (ty, value, tb),\n client_options=client.options,\n mechanism={\"type\": \"tornado\", \"handled\": False},\n )\n\n hub.capture_event(event, hint=hint)\n\n\ndef _make_event_processor(weak_handler):\n # type: (Callable[[], RequestHandler]) -> EventProcessor\n def tornado_processor(event, hint):\n # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]\n handler = weak_handler()\n if handler is None:\n return event\n\n request = handler.request\n\n with capture_internal_exceptions():\n method = getattr(handler, handler.request.method.lower())\n event[\"transaction\"] = transaction_from_function(method)\n\n with capture_internal_exceptions():\n extractor = TornadoRequestExtractor(request)\n extractor.extract_into_event(event)\n\n request_info = event[\"request\"]\n\n request_info[\"url\"] = \"%s://%s%s\" % (\n request.protocol,\n request.host,\n request.path,\n )\n\n request_info[\"query_string\"] = request.query\n request_info[\"method\"] = request.method\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote_ip}\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n with capture_internal_exceptions():\n if handler.current_user and _should_send_default_pii():\n event.setdefault(\"user\", {})[\"is_authenticated\"] = True\n\n return event\n\n return tornado_processor\n\n\nclass TornadoRequestExtractor(RequestExtractor):\n def content_length(self):\n # type: () -> int\n if self.request.body is None:\n return 0\n return len(self.request.body)\n\n def cookies(self):\n # type: () -> Dict[str, str]\n return {k: v.value for k, v in iteritems(self.request.cookies)}\n\n def raw_data(self):\n # type: () -> bytes\n return self.request.body\n\n def form(self):\n # type: () -> Dict[str, Any]\n return {\n k: [v.decode(\"latin1\", \"replace\") for v in vs]\n for k, vs in iteritems(self.request.body_arguments)\n }\n\n def is_json(self):\n # type: () -> bool\n return _is_json_content_type(self.request.headers.get(\"content-type\"))\n\n def files(self):\n # type: () -> Dict[str, Any]\n return {k: v[0] for k, v in iteritems(self.request.files) if v}\n\n def size_of_file(self, file):\n # type: (Any) -> int\n return len(file.body or ())\n", "path": "sentry_sdk/integrations/tornado.py"}], "after_files": [{"content": "import weakref\nfrom inspect import iscoroutinefunction\n\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.utils import (\n HAS_REAL_CONTEXTVARS,\n event_from_exception,\n capture_internal_exceptions,\n transaction_from_function,\n)\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations._wsgi_common import (\n RequestExtractor,\n _filter_headers,\n _is_json_content_type,\n)\nfrom sentry_sdk.integrations.logging import ignore_logger\nfrom sentry_sdk._compat import iteritems\n\nfrom tornado.web import RequestHandler, HTTPError\nfrom tornado.gen import coroutine\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Optional\n from typing import Dict\n from typing import Callable\n\n from sentry_sdk._types import EventProcessor\n\n\nclass TornadoIntegration(Integration):\n identifier = \"tornado\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n import tornado\n\n tornado_version = getattr(tornado, \"version_info\", None)\n if tornado_version is None or tornado_version < (5, 0):\n raise RuntimeError(\"Tornado 5+ required\")\n\n if not HAS_REAL_CONTEXTVARS:\n # Tornado is async. We better have contextvars or we're going to leak\n # state between requests.\n raise RuntimeError(\n \"The tornado integration for Sentry requires Python 3.6+ or the aiocontextvars package\"\n )\n\n ignore_logger(\"tornado.access\")\n\n old_execute = RequestHandler._execute\n\n awaitable = iscoroutinefunction(old_execute)\n\n if awaitable:\n # Starting Tornado 6 RequestHandler._execute method is a standard Python coroutine (async/await)\n # In that case our method should be a coroutine function too\n async def sentry_execute_request_handler(self, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(TornadoIntegration)\n if integration is None:\n return await old_execute(self, *args, **kwargs)\n\n weak_handler = weakref.ref(self)\n\n with Hub(hub) as hub:\n with hub.configure_scope() as scope:\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_event_processor(weak_handler))\n return await old_execute(self, *args, **kwargs)\n\n else:\n\n @coroutine # type: ignore\n def sentry_execute_request_handler(self, *args, **kwargs):\n # type: (RequestHandler, *Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(TornadoIntegration)\n if integration is None:\n return old_execute(self, *args, **kwargs)\n\n weak_handler = weakref.ref(self)\n\n with Hub(hub) as hub:\n with hub.configure_scope() as scope:\n scope.add_event_processor(_make_event_processor(weak_handler))\n result = yield from old_execute(self, *args, **kwargs)\n return result\n\n RequestHandler._execute = sentry_execute_request_handler\n\n old_log_exception = RequestHandler.log_exception\n\n def sentry_log_exception(self, ty, value, tb, *args, **kwargs):\n # type: (Any, type, BaseException, Any, *Any, **Any) -> Optional[Any]\n _capture_exception(ty, value, tb)\n return old_log_exception(self, ty, value, tb, *args, **kwargs)\n\n RequestHandler.log_exception = sentry_log_exception\n\n\ndef _capture_exception(ty, value, tb):\n # type: (type, BaseException, Any) -> None\n hub = Hub.current\n if hub.get_integration(TornadoIntegration) is None:\n return\n if isinstance(value, HTTPError):\n return\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n (ty, value, tb),\n client_options=client.options,\n mechanism={\"type\": \"tornado\", \"handled\": False},\n )\n\n hub.capture_event(event, hint=hint)\n\n\ndef _make_event_processor(weak_handler):\n # type: (Callable[[], RequestHandler]) -> EventProcessor\n def tornado_processor(event, hint):\n # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]\n handler = weak_handler()\n if handler is None:\n return event\n\n request = handler.request\n\n with capture_internal_exceptions():\n method = getattr(handler, handler.request.method.lower())\n event[\"transaction\"] = transaction_from_function(method)\n\n with capture_internal_exceptions():\n extractor = TornadoRequestExtractor(request)\n extractor.extract_into_event(event)\n\n request_info = event[\"request\"]\n\n request_info[\"url\"] = \"%s://%s%s\" % (\n request.protocol,\n request.host,\n request.path,\n )\n\n request_info[\"query_string\"] = request.query\n request_info[\"method\"] = request.method\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote_ip}\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n with capture_internal_exceptions():\n if handler.current_user and _should_send_default_pii():\n event.setdefault(\"user\", {})[\"is_authenticated\"] = True\n\n return event\n\n return tornado_processor\n\n\nclass TornadoRequestExtractor(RequestExtractor):\n def content_length(self):\n # type: () -> int\n if self.request.body is None:\n return 0\n return len(self.request.body)\n\n def cookies(self):\n # type: () -> Dict[str, str]\n return {k: v.value for k, v in iteritems(self.request.cookies)}\n\n def raw_data(self):\n # type: () -> bytes\n return self.request.body\n\n def form(self):\n # type: () -> Dict[str, Any]\n return {\n k: [v.decode(\"latin1\", \"replace\") for v in vs]\n for k, vs in iteritems(self.request.body_arguments)\n }\n\n def is_json(self):\n # type: () -> bool\n return _is_json_content_type(self.request.headers.get(\"content-type\"))\n\n def files(self):\n # type: () -> Dict[str, Any]\n return {k: v[0] for k, v in iteritems(self.request.files) if v}\n\n def size_of_file(self, file):\n # type: (Any) -> int\n return len(file.body or ())\n", "path": "sentry_sdk/integrations/tornado.py"}]} | 2,579 | 111 |
gh_patches_debug_15309 | rasdani/github-patches | git_diff | InstaPy__InstaPy-3257 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Last update ha a bug: local variable 'send_security_code_button' referenced before assignment
@timgrossmann @uluQulu @sionking
Traceback (most recent call last):
File "/root/InstaPy/5bcd8f2445908600133a63b6.py", line 18, in <module>
session.login()
File "/root/InstaPy/instapy/instapy.py", line 396, in login
self.bypass_with_mobile):
File "/root/InstaPy/instapy/login_util.py", line 268, in login_user
bypass_suspicious_login(browser, bypass_with_mobile)
File "/root/InstaPy/instapy/login_util.py", line 90, in bypass_suspicious_login
.move_to_element(send_security_code_button)
UnboundLocalError: local variable 'send_security_code_button' referenced before assignment
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instapy/login_util.py`
Content:
```
1 """Module only used for the login part of the script"""
2 import time
3 import pickle
4 from selenium.webdriver.common.action_chains import ActionChains
5
6 from .time_util import sleep
7 from .util import update_activity
8 from .util import web_address_navigator
9 from .util import explicit_wait
10 from .util import click_element
11
12 from selenium.common.exceptions import NoSuchElementException
13 from selenium.common.exceptions import WebDriverException
14
15
16
17
18 def bypass_suspicious_login(browser, bypass_with_mobile):
19 """Bypass suspicious loggin attempt verification. This should be only enabled
20 when there isn't available cookie for the username, otherwise it will and
21 shows "Unable to locate email or phone button" message, folollowed by
22 CRITICAL - Wrong login data!"""
23 # close sign up Instagram modal if available
24 try:
25 close_button = browser.find_element_by_xpath("[text()='Close']")
26
27 (ActionChains(browser)
28 .move_to_element(close_button)
29 .click()
30 .perform())
31
32 # update server calls
33 update_activity()
34
35 except NoSuchElementException:
36 pass
37
38 try:
39 # click on "This was me" button if challenge page was called
40 this_was_me_button = browser.find_element_by_xpath(
41 "//button[@name='choice'][text()='This Was Me']")
42
43 (ActionChains(browser)
44 .move_to_element(this_was_me_button)
45 .click()
46 .perform())
47
48 # update server calls
49 update_activity()
50
51 except NoSuchElementException:
52 # no verification needed
53 pass
54
55 try:
56 choice = browser.find_element_by_xpath(
57 "//label[@for='choice_1']").text
58
59 except NoSuchElementException:
60 try:
61 choice = browser.find_element_by_xpath(
62 "//label[@class='_q0nt5']").text
63
64 except:
65 try:
66 choice = browser.find_element_by_xpath(
67 "//label[@class='_q0nt5 _a7z3k']").text
68
69 except:
70 print("Unable to locate email or phone button, maybe "
71 "bypass_suspicious_login=True isn't needed anymore.")
72 return False
73
74 if bypass_with_mobile:
75 choice = browser.find_element_by_xpath(
76 "//label[@for='choice_0']").text
77
78 send_security_code_button = browser.find_element_by_xpath(
79 "//button[text()='Send Security Code']")
80
81 mobile_button = browser.find_element_by_xpath(
82 "//label[@for='choice_0']")
83
84 (ActionChains(browser)
85 .move_to_element(mobile_button)
86 .click()
87 .perform())
88
89 sleep(5)
90
91 (ActionChains(browser)
92 .move_to_element(send_security_code_button)
93 .click()
94 .perform())
95
96 # update server calls
97 update_activity()
98
99 print('Instagram detected an unusual login attempt')
100 print('A security code was sent to your {}'.format(choice))
101 security_code = input('Type the security code here: ')
102
103 security_code_field = browser.find_element_by_xpath((
104 "//input[@id='security_code']"))
105
106 (ActionChains(browser)
107 .move_to_element(security_code_field)
108 .click()
109 .send_keys(security_code)
110 .perform())
111
112 # update server calls for both 'click' and 'send_keys' actions
113 for i in range(2):
114 update_activity()
115
116 submit_security_code_button = browser.find_element_by_xpath(
117 "//button[text()='Submit']")
118
119 (ActionChains(browser)
120 .move_to_element(submit_security_code_button)
121 .click()
122 .perform())
123
124 # update server calls
125 update_activity()
126
127 try:
128 sleep(5)
129 # locate wrong security code message
130 wrong_login = browser.find_element_by_xpath((
131 "//p[text()='Please check the code we sent you and try "
132 "again.']"))
133
134 if wrong_login is not None:
135 print(('Wrong security code! Please check the code Instagram'
136 'sent you and try again.'))
137
138 except NoSuchElementException:
139 # correct security code
140 pass
141
142
143
144 def login_user(browser,
145 username,
146 password,
147 logger,
148 logfolder,
149 switch_language=True,
150 bypass_suspicious_attempt=False,
151 bypass_with_mobile=False
152 ):
153 """Logins the user with the given username and password"""
154 assert username, 'Username not provided'
155 assert password, 'Password not provided'
156
157 ig_homepage = "https://www.instagram.com"
158 web_address_navigator(browser, ig_homepage)
159 cookie_loaded = False
160
161 # try to load cookie from username
162 try:
163 googledotcom = "https://www.google.com"
164 web_address_navigator(browser, googledotcom)
165 for cookie in pickle.load(open('{0}{1}_cookie.pkl'
166 .format(logfolder,username), 'rb')):
167 browser.add_cookie(cookie)
168 cookie_loaded = True
169 except (WebDriverException, OSError, IOError):
170 print("Cookie file not found, creating cookie...")
171
172 # include time.sleep(1) to prevent getting stuck on google.com
173 time.sleep(1)
174
175 web_address_navigator(browser, ig_homepage)
176
177 # Cookie has been loaded, user should be logged in. Ensurue this is true
178 login_elem = browser.find_elements_by_xpath(
179 "//*[contains(text(), 'Log in')]")
180 # Login text is not found, user logged in
181 # If not, issue with cookie, create new cookie
182 if len(login_elem) == 0:
183 dismiss_notification_offer(browser, logger)
184 return True
185
186 # If not, issue with cookie, create new cookie
187 if cookie_loaded:
188 print("Issue with cookie for user " + username
189 + ". Creating new cookie...")
190
191 # Changes instagram language to english, to ensure no errors ensue from
192 # having the site on a different language
193 # Might cause problems if the OS language is english
194 if switch_language:
195 language_element_ENG = browser.find_element_by_xpath(
196 "//select[@class='hztqj']/option[text()='English']")
197 click_element(browser, language_element_ENG)
198
199 # Check if the first div is 'Create an Account' or 'Log In'
200 login_elem = browser.find_element_by_xpath(
201 "//article//a[text()='Log in']")
202
203 if login_elem is not None:
204 (ActionChains(browser)
205 .move_to_element(login_elem)
206 .click()
207 .perform())
208
209 # update server calls
210 update_activity()
211
212 # Enter username and password and logs the user in
213 # Sometimes the element name isn't 'Username' and 'Password'
214 # (valid for placeholder too)
215
216 # wait until it navigates to the login page
217 login_page_title = "Login"
218 explicit_wait(browser, "TC", login_page_title, logger)
219
220 # wait until the 'username' input element is located and visible
221 input_username_XP = "//input[@name='username']"
222 explicit_wait(browser, "VOEL", [input_username_XP, "XPath"], logger)
223
224 input_username = browser.find_element_by_xpath(input_username_XP)
225
226 (ActionChains(browser)
227 .move_to_element(input_username)
228 .click()
229 .send_keys(username)
230 .perform())
231
232 # update server calls for both 'click' and 'send_keys' actions
233 for i in range(2):
234 update_activity()
235
236 sleep(1)
237
238 # password
239 input_password = browser.find_elements_by_xpath(
240 "//input[@name='password']")
241
242 if not isinstance(password, str):
243 password = str(password)
244
245 (ActionChains(browser)
246 .move_to_element(input_password[0])
247 .click()
248 .send_keys(password)
249 .perform())
250
251 # update server calls for both 'click' and 'send_keys' actions
252 for i in range(2):
253 update_activity()
254
255 login_button = browser.find_element_by_xpath(
256 "//button[text()='Log in']")
257
258 (ActionChains(browser)
259 .move_to_element(login_button)
260 .click()
261 .perform())
262
263 # update server calls
264 update_activity()
265
266 dismiss_get_app_offer(browser, logger)
267 dismiss_notification_offer(browser, logger)
268
269 if bypass_suspicious_attempt is True:
270 bypass_suspicious_login(browser, bypass_with_mobile)
271
272 sleep(5)
273
274 # Check if user is logged-in (If there's two 'nav' elements)
275 nav = browser.find_elements_by_xpath('//nav')
276 if len(nav) == 2:
277 # create cookie for username
278 pickle.dump(browser.get_cookies(),
279 open('{0}{1}_cookie.pkl'.format(logfolder,username), 'wb'))
280 return True
281 else:
282 return False
283
284
285
286 def dismiss_get_app_offer(browser, logger):
287 """ Dismiss 'Get the Instagram App' page after a fresh login """
288 offer_elem = "//*[contains(text(), 'Get App')]"
289 dismiss_elem = "//*[contains(text(), 'Not Now')]"
290
291 # wait a bit and see if the 'Get App' offer rises up
292 offer_loaded = explicit_wait(browser, "VOEL", [offer_elem, "XPath"], logger, 5, False)
293
294 if offer_loaded:
295 dismiss_elem = browser.find_element_by_xpath(dismiss_elem)
296 click_element(browser, dismiss_elem)
297
298
299
300 def dismiss_notification_offer(browser, logger):
301 """ Dismiss 'Turn on Notifications' offer on session start """
302 offer_elem_loc = "//div/h2[text()='Turn on Notifications']"
303 dismiss_elem_loc = "//button[text()='Not Now']"
304
305 # wait a bit and see if the 'Turn on Notifications' offer rises up
306 offer_loaded = explicit_wait(browser, "VOEL", [offer_elem_loc, "XPath"], logger, 4, False)
307
308 if offer_loaded:
309 dismiss_elem = browser.find_element_by_xpath(dismiss_elem_loc)
310 click_element(browser, dismiss_elem)
311
312
313
314
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/instapy/login_util.py b/instapy/login_util.py
--- a/instapy/login_util.py
+++ b/instapy/login_util.py
@@ -75,9 +75,6 @@
choice = browser.find_element_by_xpath(
"//label[@for='choice_0']").text
- send_security_code_button = browser.find_element_by_xpath(
- "//button[text()='Send Security Code']")
-
mobile_button = browser.find_element_by_xpath(
"//label[@for='choice_0']")
@@ -88,6 +85,9 @@
sleep(5)
+ send_security_code_button = browser.find_element_by_xpath(
+ "//button[text()='Send Security Code']")
+
(ActionChains(browser)
.move_to_element(send_security_code_button)
.click()
| {"golden_diff": "diff --git a/instapy/login_util.py b/instapy/login_util.py\n--- a/instapy/login_util.py\n+++ b/instapy/login_util.py\n@@ -75,9 +75,6 @@\n choice = browser.find_element_by_xpath(\n \"//label[@for='choice_0']\").text\n \n- send_security_code_button = browser.find_element_by_xpath(\n- \"//button[text()='Send Security Code']\")\n-\n mobile_button = browser.find_element_by_xpath(\n \"//label[@for='choice_0']\")\n \n@@ -88,6 +85,9 @@\n \n sleep(5)\n \n+ send_security_code_button = browser.find_element_by_xpath(\n+ \"//button[text()='Send Security Code']\")\n+\n (ActionChains(browser)\n .move_to_element(send_security_code_button)\n .click()\n", "issue": "Last update ha a bug: local variable 'send_security_code_button' referenced before assignment\n@timgrossmann @uluQulu @sionking \r\n\r\nTraceback (most recent call last):\r\n File \"/root/InstaPy/5bcd8f2445908600133a63b6.py\", line 18, in <module>\r\n session.login()\r\n File \"/root/InstaPy/instapy/instapy.py\", line 396, in login\r\n self.bypass_with_mobile):\r\n File \"/root/InstaPy/instapy/login_util.py\", line 268, in login_user\r\n bypass_suspicious_login(browser, bypass_with_mobile)\r\n File \"/root/InstaPy/instapy/login_util.py\", line 90, in bypass_suspicious_login\r\n .move_to_element(send_security_code_button)\r\nUnboundLocalError: local variable 'send_security_code_button' referenced before assignment\r\n\r\n\n", "before_files": [{"content": "\"\"\"Module only used for the login part of the script\"\"\"\nimport time\nimport pickle\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nfrom .time_util import sleep\nfrom .util import update_activity\nfrom .util import web_address_navigator\nfrom .util import explicit_wait\nfrom .util import click_element\n\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import WebDriverException\n\n\n\n\ndef bypass_suspicious_login(browser, bypass_with_mobile):\n \"\"\"Bypass suspicious loggin attempt verification. This should be only enabled\n when there isn't available cookie for the username, otherwise it will and\n shows \"Unable to locate email or phone button\" message, folollowed by\n CRITICAL - Wrong login data!\"\"\"\n # close sign up Instagram modal if available\n try:\n close_button = browser.find_element_by_xpath(\"[text()='Close']\")\n\n (ActionChains(browser)\n .move_to_element(close_button)\n .click()\n .perform())\n\n # update server calls\n update_activity()\n\n except NoSuchElementException:\n pass\n\n try:\n # click on \"This was me\" button if challenge page was called\n this_was_me_button = browser.find_element_by_xpath(\n \"//button[@name='choice'][text()='This Was Me']\")\n\n (ActionChains(browser)\n .move_to_element(this_was_me_button)\n .click()\n .perform())\n\n # update server calls\n update_activity()\n\n except NoSuchElementException:\n # no verification needed\n pass\n\n try:\n choice = browser.find_element_by_xpath(\n \"//label[@for='choice_1']\").text\n\n except NoSuchElementException:\n try:\n choice = browser.find_element_by_xpath(\n \"//label[@class='_q0nt5']\").text\n\n except:\n try:\n choice = browser.find_element_by_xpath(\n \"//label[@class='_q0nt5 _a7z3k']\").text\n\n except:\n print(\"Unable to locate email or phone button, maybe \"\n \"bypass_suspicious_login=True isn't needed anymore.\")\n return False\n\n if bypass_with_mobile:\n choice = browser.find_element_by_xpath(\n \"//label[@for='choice_0']\").text\n\n send_security_code_button = browser.find_element_by_xpath(\n \"//button[text()='Send Security Code']\")\n\n mobile_button = browser.find_element_by_xpath(\n \"//label[@for='choice_0']\")\n\n (ActionChains(browser)\n .move_to_element(mobile_button)\n .click()\n .perform())\n \n sleep(5)\n\n (ActionChains(browser)\n .move_to_element(send_security_code_button)\n .click()\n .perform())\n\n # update server calls\n update_activity()\n\n print('Instagram detected an unusual login attempt')\n print('A security code was sent to your {}'.format(choice))\n security_code = input('Type the security code here: ')\n\n security_code_field = browser.find_element_by_xpath((\n \"//input[@id='security_code']\"))\n\n (ActionChains(browser)\n .move_to_element(security_code_field)\n .click()\n .send_keys(security_code)\n .perform())\n\n # update server calls for both 'click' and 'send_keys' actions\n for i in range(2):\n update_activity()\n\n submit_security_code_button = browser.find_element_by_xpath(\n \"//button[text()='Submit']\")\n\n (ActionChains(browser)\n .move_to_element(submit_security_code_button)\n .click()\n .perform())\n\n # update server calls\n update_activity()\n\n try:\n sleep(5)\n # locate wrong security code message\n wrong_login = browser.find_element_by_xpath((\n \"//p[text()='Please check the code we sent you and try \"\n \"again.']\"))\n\n if wrong_login is not None:\n print(('Wrong security code! Please check the code Instagram'\n 'sent you and try again.'))\n\n except NoSuchElementException:\n # correct security code\n pass\n\n\n\ndef login_user(browser,\n username,\n password,\n logger,\n logfolder,\n switch_language=True,\n bypass_suspicious_attempt=False,\n bypass_with_mobile=False\n ):\n \"\"\"Logins the user with the given username and password\"\"\"\n assert username, 'Username not provided'\n assert password, 'Password not provided'\n\n ig_homepage = \"https://www.instagram.com\"\n web_address_navigator(browser, ig_homepage)\n cookie_loaded = False\n\n # try to load cookie from username\n try:\n googledotcom = \"https://www.google.com\"\n web_address_navigator(browser, googledotcom)\n for cookie in pickle.load(open('{0}{1}_cookie.pkl'\n .format(logfolder,username), 'rb')):\n browser.add_cookie(cookie)\n cookie_loaded = True\n except (WebDriverException, OSError, IOError):\n print(\"Cookie file not found, creating cookie...\")\n\n # include time.sleep(1) to prevent getting stuck on google.com\n time.sleep(1)\n \n web_address_navigator(browser, ig_homepage)\n\n # Cookie has been loaded, user should be logged in. Ensurue this is true\n login_elem = browser.find_elements_by_xpath(\n \"//*[contains(text(), 'Log in')]\")\n # Login text is not found, user logged in\n # If not, issue with cookie, create new cookie\n if len(login_elem) == 0:\n dismiss_notification_offer(browser, logger)\n return True\n\n # If not, issue with cookie, create new cookie\n if cookie_loaded:\n print(\"Issue with cookie for user \" + username\n + \". Creating new cookie...\")\n\n # Changes instagram language to english, to ensure no errors ensue from\n # having the site on a different language\n # Might cause problems if the OS language is english\n if switch_language:\n language_element_ENG = browser.find_element_by_xpath(\n \"//select[@class='hztqj']/option[text()='English']\")\n click_element(browser, language_element_ENG)\n\n # Check if the first div is 'Create an Account' or 'Log In'\n login_elem = browser.find_element_by_xpath(\n \"//article//a[text()='Log in']\")\n\n if login_elem is not None:\n (ActionChains(browser)\n .move_to_element(login_elem)\n .click()\n .perform())\n\n # update server calls\n update_activity()\n\n # Enter username and password and logs the user in\n # Sometimes the element name isn't 'Username' and 'Password'\n # (valid for placeholder too)\n\n # wait until it navigates to the login page\n login_page_title = \"Login\"\n explicit_wait(browser, \"TC\", login_page_title, logger)\n\n # wait until the 'username' input element is located and visible\n input_username_XP = \"//input[@name='username']\"\n explicit_wait(browser, \"VOEL\", [input_username_XP, \"XPath\"], logger)\n\n input_username = browser.find_element_by_xpath(input_username_XP)\n\n (ActionChains(browser)\n .move_to_element(input_username)\n .click()\n .send_keys(username)\n .perform())\n\n # update server calls for both 'click' and 'send_keys' actions\n for i in range(2):\n update_activity()\n\n sleep(1)\n\n # password\n input_password = browser.find_elements_by_xpath(\n \"//input[@name='password']\")\n\n if not isinstance(password, str):\n password = str(password)\n\n (ActionChains(browser)\n .move_to_element(input_password[0])\n .click()\n .send_keys(password)\n .perform())\n\n # update server calls for both 'click' and 'send_keys' actions\n for i in range(2):\n update_activity()\n\n login_button = browser.find_element_by_xpath(\n \"//button[text()='Log in']\")\n\n (ActionChains(browser)\n .move_to_element(login_button)\n .click()\n .perform())\n\n # update server calls\n update_activity()\n\n dismiss_get_app_offer(browser, logger)\n dismiss_notification_offer(browser, logger)\n\n if bypass_suspicious_attempt is True:\n bypass_suspicious_login(browser, bypass_with_mobile)\n\n sleep(5)\n\n # Check if user is logged-in (If there's two 'nav' elements)\n nav = browser.find_elements_by_xpath('//nav')\n if len(nav) == 2:\n # create cookie for username\n pickle.dump(browser.get_cookies(),\n open('{0}{1}_cookie.pkl'.format(logfolder,username), 'wb'))\n return True\n else:\n return False\n\n\n\ndef dismiss_get_app_offer(browser, logger):\n \"\"\" Dismiss 'Get the Instagram App' page after a fresh login \"\"\"\n offer_elem = \"//*[contains(text(), 'Get App')]\"\n dismiss_elem = \"//*[contains(text(), 'Not Now')]\"\n\n # wait a bit and see if the 'Get App' offer rises up\n offer_loaded = explicit_wait(browser, \"VOEL\", [offer_elem, \"XPath\"], logger, 5, False)\n\n if offer_loaded:\n dismiss_elem = browser.find_element_by_xpath(dismiss_elem)\n click_element(browser, dismiss_elem)\n\n\n\ndef dismiss_notification_offer(browser, logger):\n \"\"\" Dismiss 'Turn on Notifications' offer on session start \"\"\"\n offer_elem_loc = \"//div/h2[text()='Turn on Notifications']\"\n dismiss_elem_loc = \"//button[text()='Not Now']\"\n\n # wait a bit and see if the 'Turn on Notifications' offer rises up\n offer_loaded = explicit_wait(browser, \"VOEL\", [offer_elem_loc, \"XPath\"], logger, 4, False)\n\n if offer_loaded:\n dismiss_elem = browser.find_element_by_xpath(dismiss_elem_loc)\n click_element(browser, dismiss_elem)\n\n\n\n", "path": "instapy/login_util.py"}], "after_files": [{"content": "\"\"\"Module only used for the login part of the script\"\"\"\nimport time\nimport pickle\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nfrom .time_util import sleep\nfrom .util import update_activity\nfrom .util import web_address_navigator\nfrom .util import explicit_wait\nfrom .util import click_element\n\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import WebDriverException\n\n\n\n\ndef bypass_suspicious_login(browser, bypass_with_mobile):\n \"\"\"Bypass suspicious loggin attempt verification. This should be only enabled\n when there isn't available cookie for the username, otherwise it will and\n shows \"Unable to locate email or phone button\" message, folollowed by\n CRITICAL - Wrong login data!\"\"\"\n # close sign up Instagram modal if available\n try:\n close_button = browser.find_element_by_xpath(\"[text()='Close']\")\n\n (ActionChains(browser)\n .move_to_element(close_button)\n .click()\n .perform())\n\n # update server calls\n update_activity()\n\n except NoSuchElementException:\n pass\n\n try:\n # click on \"This was me\" button if challenge page was called\n this_was_me_button = browser.find_element_by_xpath(\n \"//button[@name='choice'][text()='This Was Me']\")\n\n (ActionChains(browser)\n .move_to_element(this_was_me_button)\n .click()\n .perform())\n\n # update server calls\n update_activity()\n\n except NoSuchElementException:\n # no verification needed\n pass\n\n try:\n choice = browser.find_element_by_xpath(\n \"//label[@for='choice_1']\").text\n\n except NoSuchElementException:\n try:\n choice = browser.find_element_by_xpath(\n \"//label[@class='_q0nt5']\").text\n\n except:\n try:\n choice = browser.find_element_by_xpath(\n \"//label[@class='_q0nt5 _a7z3k']\").text\n\n except:\n print(\"Unable to locate email or phone button, maybe \"\n \"bypass_suspicious_login=True isn't needed anymore.\")\n return False\n\n if bypass_with_mobile:\n choice = browser.find_element_by_xpath(\n \"//label[@for='choice_0']\").text\n\n mobile_button = browser.find_element_by_xpath(\n \"//label[@for='choice_0']\")\n\n (ActionChains(browser)\n .move_to_element(mobile_button)\n .click()\n .perform())\n \n sleep(5)\n\n send_security_code_button = browser.find_element_by_xpath(\n \"//button[text()='Send Security Code']\")\n\n (ActionChains(browser)\n .move_to_element(send_security_code_button)\n .click()\n .perform())\n\n # update server calls\n update_activity()\n\n print('Instagram detected an unusual login attempt')\n print('A security code was sent to your {}'.format(choice))\n security_code = input('Type the security code here: ')\n\n security_code_field = browser.find_element_by_xpath((\n \"//input[@id='security_code']\"))\n\n (ActionChains(browser)\n .move_to_element(security_code_field)\n .click()\n .send_keys(security_code)\n .perform())\n\n # update server calls for both 'click' and 'send_keys' actions\n for i in range(2):\n update_activity()\n\n submit_security_code_button = browser.find_element_by_xpath(\n \"//button[text()='Submit']\")\n\n (ActionChains(browser)\n .move_to_element(submit_security_code_button)\n .click()\n .perform())\n\n # update server calls\n update_activity()\n\n try:\n sleep(5)\n # locate wrong security code message\n wrong_login = browser.find_element_by_xpath((\n \"//p[text()='Please check the code we sent you and try \"\n \"again.']\"))\n\n if wrong_login is not None:\n print(('Wrong security code! Please check the code Instagram'\n 'sent you and try again.'))\n\n except NoSuchElementException:\n # correct security code\n pass\n\n\n\ndef login_user(browser,\n username,\n password,\n logger,\n logfolder,\n switch_language=True,\n bypass_suspicious_attempt=False,\n bypass_with_mobile=False\n ):\n \"\"\"Logins the user with the given username and password\"\"\"\n assert username, 'Username not provided'\n assert password, 'Password not provided'\n\n ig_homepage = \"https://www.instagram.com\"\n web_address_navigator(browser, ig_homepage)\n cookie_loaded = False\n\n # try to load cookie from username\n try:\n googledotcom = \"https://www.google.com\"\n web_address_navigator(browser, googledotcom)\n for cookie in pickle.load(open('{0}{1}_cookie.pkl'\n .format(logfolder,username), 'rb')):\n browser.add_cookie(cookie)\n cookie_loaded = True\n except (WebDriverException, OSError, IOError):\n print(\"Cookie file not found, creating cookie...\")\n\n # include time.sleep(1) to prevent getting stuck on google.com\n time.sleep(1)\n \n web_address_navigator(browser, ig_homepage)\n\n # Cookie has been loaded, user should be logged in. Ensurue this is true\n login_elem = browser.find_elements_by_xpath(\n \"//*[contains(text(), 'Log in')]\")\n # Login text is not found, user logged in\n # If not, issue with cookie, create new cookie\n if len(login_elem) == 0:\n dismiss_notification_offer(browser, logger)\n return True\n\n # If not, issue with cookie, create new cookie\n if cookie_loaded:\n print(\"Issue with cookie for user \" + username\n + \". Creating new cookie...\")\n\n # Changes instagram language to english, to ensure no errors ensue from\n # having the site on a different language\n # Might cause problems if the OS language is english\n if switch_language:\n language_element_ENG = browser.find_element_by_xpath(\n \"//select[@class='hztqj']/option[text()='English']\")\n click_element(browser, language_element_ENG)\n\n # Check if the first div is 'Create an Account' or 'Log In'\n login_elem = browser.find_element_by_xpath(\n \"//article//a[text()='Log in']\")\n\n if login_elem is not None:\n (ActionChains(browser)\n .move_to_element(login_elem)\n .click()\n .perform())\n\n # update server calls\n update_activity()\n\n # Enter username and password and logs the user in\n # Sometimes the element name isn't 'Username' and 'Password'\n # (valid for placeholder too)\n\n # wait until it navigates to the login page\n login_page_title = \"Login\"\n explicit_wait(browser, \"TC\", login_page_title, logger)\n\n # wait until the 'username' input element is located and visible\n input_username_XP = \"//input[@name='username']\"\n explicit_wait(browser, \"VOEL\", [input_username_XP, \"XPath\"], logger)\n\n input_username = browser.find_element_by_xpath(input_username_XP)\n\n (ActionChains(browser)\n .move_to_element(input_username)\n .click()\n .send_keys(username)\n .perform())\n\n # update server calls for both 'click' and 'send_keys' actions\n for i in range(2):\n update_activity()\n\n sleep(1)\n\n # password\n input_password = browser.find_elements_by_xpath(\n \"//input[@name='password']\")\n\n if not isinstance(password, str):\n password = str(password)\n\n (ActionChains(browser)\n .move_to_element(input_password[0])\n .click()\n .send_keys(password)\n .perform())\n\n # update server calls for both 'click' and 'send_keys' actions\n for i in range(2):\n update_activity()\n\n login_button = browser.find_element_by_xpath(\n \"//button[text()='Log in']\")\n\n (ActionChains(browser)\n .move_to_element(login_button)\n .click()\n .perform())\n\n # update server calls\n update_activity()\n\n dismiss_get_app_offer(browser, logger)\n dismiss_notification_offer(browser, logger)\n\n if bypass_suspicious_attempt is True:\n bypass_suspicious_login(browser, bypass_with_mobile)\n\n sleep(5)\n\n # Check if user is logged-in (If there's two 'nav' elements)\n nav = browser.find_elements_by_xpath('//nav')\n if len(nav) == 2:\n # create cookie for username\n pickle.dump(browser.get_cookies(),\n open('{0}{1}_cookie.pkl'.format(logfolder,username), 'wb'))\n return True\n else:\n return False\n\n\n\ndef dismiss_get_app_offer(browser, logger):\n \"\"\" Dismiss 'Get the Instagram App' page after a fresh login \"\"\"\n offer_elem = \"//*[contains(text(), 'Get App')]\"\n dismiss_elem = \"//*[contains(text(), 'Not Now')]\"\n\n # wait a bit and see if the 'Get App' offer rises up\n offer_loaded = explicit_wait(browser, \"VOEL\", [offer_elem, \"XPath\"], logger, 5, False)\n\n if offer_loaded:\n dismiss_elem = browser.find_element_by_xpath(dismiss_elem)\n click_element(browser, dismiss_elem)\n\n\n\ndef dismiss_notification_offer(browser, logger):\n \"\"\" Dismiss 'Turn on Notifications' offer on session start \"\"\"\n offer_elem_loc = \"//div/h2[text()='Turn on Notifications']\"\n dismiss_elem_loc = \"//button[text()='Not Now']\"\n\n # wait a bit and see if the 'Turn on Notifications' offer rises up\n offer_loaded = explicit_wait(browser, \"VOEL\", [offer_elem_loc, \"XPath\"], logger, 4, False)\n\n if offer_loaded:\n dismiss_elem = browser.find_element_by_xpath(dismiss_elem_loc)\n click_element(browser, dismiss_elem)\n\n\n\n", "path": "instapy/login_util.py"}]} | 3,439 | 178 |
gh_patches_debug_1717 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2016 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update OTLP HTTP port
As per spec change here:
https://github.com/open-telemetry/opentelemetry-specification/pull/1839
The OTLP HTTP port should be 4318.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import gzip
16 import logging
17 import zlib
18 from io import BytesIO
19 from os import environ
20 from typing import Dict, Optional
21 from time import sleep
22
23 import requests
24 from backoff import expo
25
26 from opentelemetry.sdk.environment_variables import (
27 OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,
28 OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,
29 OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
30 OTEL_EXPORTER_OTLP_TRACES_HEADERS,
31 OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,
32 OTEL_EXPORTER_OTLP_CERTIFICATE,
33 OTEL_EXPORTER_OTLP_COMPRESSION,
34 OTEL_EXPORTER_OTLP_ENDPOINT,
35 OTEL_EXPORTER_OTLP_HEADERS,
36 OTEL_EXPORTER_OTLP_TIMEOUT,
37 )
38 from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
39 from opentelemetry.exporter.otlp.proto.http import Compression
40 from opentelemetry.exporter.otlp.proto.http.trace_exporter.encoder import (
41 _ProtobufEncoder,
42 )
43 from opentelemetry.util.re import parse_headers
44
45
46 _logger = logging.getLogger(__name__)
47
48
49 DEFAULT_COMPRESSION = Compression.NoCompression
50 DEFAULT_ENDPOINT = "http://localhost:55681/v1/traces"
51 DEFAULT_TIMEOUT = 10 # in seconds
52
53
54 class OTLPSpanExporter(SpanExporter):
55
56 _MAX_RETRY_TIMEOUT = 64
57
58 def __init__(
59 self,
60 endpoint: Optional[str] = None,
61 certificate_file: Optional[str] = None,
62 headers: Optional[Dict[str, str]] = None,
63 timeout: Optional[int] = None,
64 compression: Optional[Compression] = None,
65 ):
66 self._endpoint = endpoint or environ.get(
67 OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
68 environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT),
69 )
70 self._certificate_file = certificate_file or environ.get(
71 OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,
72 environ.get(OTEL_EXPORTER_OTLP_CERTIFICATE, True),
73 )
74 headers_string = environ.get(
75 OTEL_EXPORTER_OTLP_TRACES_HEADERS,
76 environ.get(OTEL_EXPORTER_OTLP_HEADERS, ""),
77 )
78 self._headers = headers or parse_headers(headers_string)
79 self._timeout = timeout or int(
80 environ.get(
81 OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,
82 environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, DEFAULT_TIMEOUT),
83 )
84 )
85 self._compression = compression or _compression_from_env()
86 self._session = requests.Session()
87 self._session.headers.update(self._headers)
88 self._session.headers.update(
89 {"Content-Type": _ProtobufEncoder._CONTENT_TYPE}
90 )
91 if self._compression is not Compression.NoCompression:
92 self._session.headers.update(
93 {"Content-Encoding": self._compression.value}
94 )
95 self._shutdown = False
96
97 def _export(self, serialized_data: str):
98 data = serialized_data
99 if self._compression == Compression.Gzip:
100 gzip_data = BytesIO()
101 with gzip.GzipFile(fileobj=gzip_data, mode="w") as gzip_stream:
102 gzip_stream.write(serialized_data)
103 data = gzip_data.getvalue()
104 elif self._compression == Compression.Deflate:
105 data = zlib.compress(bytes(serialized_data))
106
107 return self._session.post(
108 url=self._endpoint,
109 data=data,
110 verify=self._certificate_file,
111 timeout=self._timeout,
112 )
113
114 @staticmethod
115 def _retryable(resp: requests.Response) -> bool:
116 if resp.status_code == 408:
117 return True
118 if resp.status_code >= 500 and resp.status_code <= 599:
119 return True
120 return False
121
122 def export(self, spans) -> SpanExportResult:
123 # After the call to Shutdown subsequent calls to Export are
124 # not allowed and should return a Failure result.
125 if self._shutdown:
126 _logger.warning("Exporter already shutdown, ignoring batch")
127 return SpanExportResult.FAILURE
128
129 serialized_data = _ProtobufEncoder.serialize(spans)
130
131 for delay in expo(max_value=self._MAX_RETRY_TIMEOUT):
132
133 if delay == self._MAX_RETRY_TIMEOUT:
134 return SpanExportResult.FAILURE
135
136 resp = self._export(serialized_data)
137 # pylint: disable=no-else-return
138 if resp.status_code in (200, 202):
139 return SpanExportResult.SUCCESS
140 elif self._retryable(resp):
141 _logger.debug(
142 "Waiting %ss before retrying export of span", delay
143 )
144 sleep(delay)
145 continue
146 else:
147 _logger.warning(
148 "Failed to export batch code: %s, reason: %s",
149 resp.status_code,
150 resp.text,
151 )
152 return SpanExportResult.FAILURE
153 return SpanExportResult.FAILURE
154
155 def shutdown(self):
156 if self._shutdown:
157 _logger.warning("Exporter already shutdown, ignoring call")
158 return
159 self._session.close()
160 self._shutdown = True
161
162
163 def _compression_from_env() -> Compression:
164 compression = (
165 environ.get(
166 OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,
167 environ.get(OTEL_EXPORTER_OTLP_COMPRESSION, "none"),
168 )
169 .lower()
170 .strip()
171 )
172 return Compression(compression)
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py
--- a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py
+++ b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py
@@ -47,7 +47,7 @@
DEFAULT_COMPRESSION = Compression.NoCompression
-DEFAULT_ENDPOINT = "http://localhost:55681/v1/traces"
+DEFAULT_ENDPOINT = "http://localhost:4318/v1/traces"
DEFAULT_TIMEOUT = 10 # in seconds
| {"golden_diff": "diff --git a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py\n--- a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py\n+++ b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py\n@@ -47,7 +47,7 @@\n \n \n DEFAULT_COMPRESSION = Compression.NoCompression\n-DEFAULT_ENDPOINT = \"http://localhost:55681/v1/traces\"\n+DEFAULT_ENDPOINT = \"http://localhost:4318/v1/traces\"\n DEFAULT_TIMEOUT = 10 # in seconds\n", "issue": "Update OTLP HTTP port\nAs per spec change here:\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-specification/pull/1839\r\n\r\nThe OTLP HTTP port should be 4318.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gzip\nimport logging\nimport zlib\nfrom io import BytesIO\nfrom os import environ\nfrom typing import Dict, Optional\nfrom time import sleep\n\nimport requests\nfrom backoff import expo\n\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n OTEL_EXPORTER_OTLP_TRACES_HEADERS,\n OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,\n OTEL_EXPORTER_OTLP_CERTIFICATE,\n OTEL_EXPORTER_OTLP_COMPRESSION,\n OTEL_EXPORTER_OTLP_ENDPOINT,\n OTEL_EXPORTER_OTLP_HEADERS,\n OTEL_EXPORTER_OTLP_TIMEOUT,\n)\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.exporter.otlp.proto.http import Compression\nfrom opentelemetry.exporter.otlp.proto.http.trace_exporter.encoder import (\n _ProtobufEncoder,\n)\nfrom opentelemetry.util.re import parse_headers\n\n\n_logger = logging.getLogger(__name__)\n\n\nDEFAULT_COMPRESSION = Compression.NoCompression\nDEFAULT_ENDPOINT = \"http://localhost:55681/v1/traces\"\nDEFAULT_TIMEOUT = 10 # in seconds\n\n\nclass OTLPSpanExporter(SpanExporter):\n\n _MAX_RETRY_TIMEOUT = 64\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n certificate_file: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n self._endpoint = endpoint or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT),\n )\n self._certificate_file = certificate_file or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n environ.get(OTEL_EXPORTER_OTLP_CERTIFICATE, True),\n )\n headers_string = environ.get(\n OTEL_EXPORTER_OTLP_TRACES_HEADERS,\n environ.get(OTEL_EXPORTER_OTLP_HEADERS, \"\"),\n )\n self._headers = headers or parse_headers(headers_string)\n self._timeout = timeout or int(\n environ.get(\n OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,\n environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, DEFAULT_TIMEOUT),\n )\n )\n self._compression = compression or _compression_from_env()\n self._session = requests.Session()\n self._session.headers.update(self._headers)\n self._session.headers.update(\n {\"Content-Type\": _ProtobufEncoder._CONTENT_TYPE}\n )\n if self._compression is not Compression.NoCompression:\n self._session.headers.update(\n {\"Content-Encoding\": self._compression.value}\n )\n self._shutdown = False\n\n def _export(self, serialized_data: str):\n data = serialized_data\n if self._compression == Compression.Gzip:\n gzip_data = BytesIO()\n with gzip.GzipFile(fileobj=gzip_data, mode=\"w\") as gzip_stream:\n gzip_stream.write(serialized_data)\n data = gzip_data.getvalue()\n elif self._compression == Compression.Deflate:\n data = zlib.compress(bytes(serialized_data))\n\n return self._session.post(\n url=self._endpoint,\n data=data,\n verify=self._certificate_file,\n timeout=self._timeout,\n )\n\n @staticmethod\n def _retryable(resp: requests.Response) -> bool:\n if resp.status_code == 408:\n return True\n if resp.status_code >= 500 and resp.status_code <= 599:\n return True\n return False\n\n def export(self, spans) -> SpanExportResult:\n # After the call to Shutdown subsequent calls to Export are\n # not allowed and should return a Failure result.\n if self._shutdown:\n _logger.warning(\"Exporter already shutdown, ignoring batch\")\n return SpanExportResult.FAILURE\n\n serialized_data = _ProtobufEncoder.serialize(spans)\n\n for delay in expo(max_value=self._MAX_RETRY_TIMEOUT):\n\n if delay == self._MAX_RETRY_TIMEOUT:\n return SpanExportResult.FAILURE\n\n resp = self._export(serialized_data)\n # pylint: disable=no-else-return\n if resp.status_code in (200, 202):\n return SpanExportResult.SUCCESS\n elif self._retryable(resp):\n _logger.debug(\n \"Waiting %ss before retrying export of span\", delay\n )\n sleep(delay)\n continue\n else:\n _logger.warning(\n \"Failed to export batch code: %s, reason: %s\",\n resp.status_code,\n resp.text,\n )\n return SpanExportResult.FAILURE\n return SpanExportResult.FAILURE\n\n def shutdown(self):\n if self._shutdown:\n _logger.warning(\"Exporter already shutdown, ignoring call\")\n return\n self._session.close()\n self._shutdown = True\n\n\ndef _compression_from_env() -> Compression:\n compression = (\n environ.get(\n OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,\n environ.get(OTEL_EXPORTER_OTLP_COMPRESSION, \"none\"),\n )\n .lower()\n .strip()\n )\n return Compression(compression)\n", "path": "exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gzip\nimport logging\nimport zlib\nfrom io import BytesIO\nfrom os import environ\nfrom typing import Dict, Optional\nfrom time import sleep\n\nimport requests\nfrom backoff import expo\n\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n OTEL_EXPORTER_OTLP_TRACES_HEADERS,\n OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,\n OTEL_EXPORTER_OTLP_CERTIFICATE,\n OTEL_EXPORTER_OTLP_COMPRESSION,\n OTEL_EXPORTER_OTLP_ENDPOINT,\n OTEL_EXPORTER_OTLP_HEADERS,\n OTEL_EXPORTER_OTLP_TIMEOUT,\n)\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.exporter.otlp.proto.http import Compression\nfrom opentelemetry.exporter.otlp.proto.http.trace_exporter.encoder import (\n _ProtobufEncoder,\n)\nfrom opentelemetry.util.re import parse_headers\n\n\n_logger = logging.getLogger(__name__)\n\n\nDEFAULT_COMPRESSION = Compression.NoCompression\nDEFAULT_ENDPOINT = \"http://localhost:4318/v1/traces\"\nDEFAULT_TIMEOUT = 10 # in seconds\n\n\nclass OTLPSpanExporter(SpanExporter):\n\n _MAX_RETRY_TIMEOUT = 64\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n certificate_file: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n self._endpoint = endpoint or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT),\n )\n self._certificate_file = certificate_file or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n environ.get(OTEL_EXPORTER_OTLP_CERTIFICATE, True),\n )\n headers_string = environ.get(\n OTEL_EXPORTER_OTLP_TRACES_HEADERS,\n environ.get(OTEL_EXPORTER_OTLP_HEADERS, \"\"),\n )\n self._headers = headers or parse_headers(headers_string)\n self._timeout = timeout or int(\n environ.get(\n OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,\n environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, DEFAULT_TIMEOUT),\n )\n )\n self._compression = compression or _compression_from_env()\n self._session = requests.Session()\n self._session.headers.update(self._headers)\n self._session.headers.update(\n {\"Content-Type\": _ProtobufEncoder._CONTENT_TYPE}\n )\n if self._compression is not Compression.NoCompression:\n self._session.headers.update(\n {\"Content-Encoding\": self._compression.value}\n )\n self._shutdown = False\n\n def _export(self, serialized_data: str):\n data = serialized_data\n if self._compression == Compression.Gzip:\n gzip_data = BytesIO()\n with gzip.GzipFile(fileobj=gzip_data, mode=\"w\") as gzip_stream:\n gzip_stream.write(serialized_data)\n data = gzip_data.getvalue()\n elif self._compression == Compression.Deflate:\n data = zlib.compress(bytes(serialized_data))\n\n return self._session.post(\n url=self._endpoint,\n data=data,\n verify=self._certificate_file,\n timeout=self._timeout,\n )\n\n @staticmethod\n def _retryable(resp: requests.Response) -> bool:\n if resp.status_code == 408:\n return True\n if resp.status_code >= 500 and resp.status_code <= 599:\n return True\n return False\n\n def export(self, spans) -> SpanExportResult:\n # After the call to Shutdown subsequent calls to Export are\n # not allowed and should return a Failure result.\n if self._shutdown:\n _logger.warning(\"Exporter already shutdown, ignoring batch\")\n return SpanExportResult.FAILURE\n\n serialized_data = _ProtobufEncoder.serialize(spans)\n\n for delay in expo(max_value=self._MAX_RETRY_TIMEOUT):\n\n if delay == self._MAX_RETRY_TIMEOUT:\n return SpanExportResult.FAILURE\n\n resp = self._export(serialized_data)\n # pylint: disable=no-else-return\n if resp.status_code in (200, 202):\n return SpanExportResult.SUCCESS\n elif self._retryable(resp):\n _logger.debug(\n \"Waiting %ss before retrying export of span\", delay\n )\n sleep(delay)\n continue\n else:\n _logger.warning(\n \"Failed to export batch code: %s, reason: %s\",\n resp.status_code,\n resp.text,\n )\n return SpanExportResult.FAILURE\n return SpanExportResult.FAILURE\n\n def shutdown(self):\n if self._shutdown:\n _logger.warning(\"Exporter already shutdown, ignoring call\")\n return\n self._session.close()\n self._shutdown = True\n\n\ndef _compression_from_env() -> Compression:\n compression = (\n environ.get(\n OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,\n environ.get(OTEL_EXPORTER_OTLP_COMPRESSION, \"none\"),\n )\n .lower()\n .strip()\n )\n return Compression(compression)\n", "path": "exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py"}]} | 2,008 | 209 |
gh_patches_debug_24639 | rasdani/github-patches | git_diff | pyca__cryptography-3722 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DHParametersWithSerialization, DHPublicKeyWithSerialization
For RSAPublicKey, DSAPublicKey, and EllipticCurvePublicKey we decided that there was no scenario under which they could not be serializable. The same is true for these two classes, so we should go ahead and merge the interfaces and then make the WithSerialization versions aliases (this is the approach we took for the other 3 to maintain backwards compatibility)
DHParametersWithSerialization, DHPublicKeyWithSerialization
For RSAPublicKey, DSAPublicKey, and EllipticCurvePublicKey we decided that there was no scenario under which they could not be serializable. The same is true for these two classes, so we should go ahead and merge the interfaces and then make the WithSerialization versions aliases (this is the approach we took for the other 3 to maintain backwards compatibility)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/asymmetric/dh.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import abc
8
9 import six
10
11 from cryptography import utils
12
13
14 def generate_parameters(generator, key_size, backend):
15 return backend.generate_dh_parameters(generator, key_size)
16
17
18 class DHPrivateNumbers(object):
19 def __init__(self, x, public_numbers):
20 if not isinstance(x, six.integer_types):
21 raise TypeError("x must be an integer.")
22
23 if not isinstance(public_numbers, DHPublicNumbers):
24 raise TypeError("public_numbers must be an instance of "
25 "DHPublicNumbers.")
26
27 self._x = x
28 self._public_numbers = public_numbers
29
30 def __eq__(self, other):
31 if not isinstance(other, DHPrivateNumbers):
32 return NotImplemented
33
34 return (
35 self._x == other._x and
36 self._public_numbers == other._public_numbers
37 )
38
39 def __ne__(self, other):
40 return not self == other
41
42 def private_key(self, backend):
43 return backend.load_dh_private_numbers(self)
44
45 public_numbers = utils.read_only_property("_public_numbers")
46 x = utils.read_only_property("_x")
47
48
49 class DHPublicNumbers(object):
50 def __init__(self, y, parameter_numbers):
51 if not isinstance(y, six.integer_types):
52 raise TypeError("y must be an integer.")
53
54 if not isinstance(parameter_numbers, DHParameterNumbers):
55 raise TypeError(
56 "parameters must be an instance of DHParameterNumbers.")
57
58 self._y = y
59 self._parameter_numbers = parameter_numbers
60
61 def __eq__(self, other):
62 if not isinstance(other, DHPublicNumbers):
63 return NotImplemented
64
65 return (
66 self._y == other._y and
67 self._parameter_numbers == other._parameter_numbers
68 )
69
70 def __ne__(self, other):
71 return not self == other
72
73 def public_key(self, backend):
74 return backend.load_dh_public_numbers(self)
75
76 y = utils.read_only_property("_y")
77 parameter_numbers = utils.read_only_property("_parameter_numbers")
78
79
80 class DHParameterNumbers(object):
81 def __init__(self, p, g, q=None):
82 if (
83 not isinstance(p, six.integer_types) or
84 not isinstance(g, six.integer_types)
85 ):
86 raise TypeError("p and g must be integers")
87 if q is not None and not isinstance(q, six.integer_types):
88 raise TypeError("q must be integer or None")
89
90 if q is None and g not in (2, 5):
91 raise ValueError("DH generator must be 2 or 5")
92
93 self._p = p
94 self._g = g
95 self._q = q
96
97 def __eq__(self, other):
98 if not isinstance(other, DHParameterNumbers):
99 return NotImplemented
100
101 return (
102 self._p == other._p and
103 self._g == other._g and
104 self._q == other._q
105 )
106
107 def __ne__(self, other):
108 return not self == other
109
110 def parameters(self, backend):
111 return backend.load_dh_parameter_numbers(self)
112
113 p = utils.read_only_property("_p")
114 g = utils.read_only_property("_g")
115 q = utils.read_only_property("_q")
116
117
118 @six.add_metaclass(abc.ABCMeta)
119 class DHParameters(object):
120 @abc.abstractmethod
121 def generate_private_key(self):
122 """
123 Generates and returns a DHPrivateKey.
124 """
125
126 @abc.abstractmethod
127 def parameter_bytes(self, encoding, format):
128 """
129 Returns the parameters serialized as bytes.
130 """
131
132
133 @six.add_metaclass(abc.ABCMeta)
134 class DHParametersWithSerialization(DHParameters):
135 @abc.abstractmethod
136 def parameter_numbers(self):
137 """
138 Returns a DHParameterNumbers.
139 """
140
141
142 @six.add_metaclass(abc.ABCMeta)
143 class DHPrivateKey(object):
144 @abc.abstractproperty
145 def key_size(self):
146 """
147 The bit length of the prime modulus.
148 """
149
150 @abc.abstractmethod
151 def public_key(self):
152 """
153 The DHPublicKey associated with this private key.
154 """
155
156 @abc.abstractmethod
157 def parameters(self):
158 """
159 The DHParameters object associated with this private key.
160 """
161
162 @abc.abstractmethod
163 def exchange(self, peer_public_key):
164 """
165 Given peer's DHPublicKey, carry out the key exchange and
166 return shared key as bytes.
167 """
168
169
170 @six.add_metaclass(abc.ABCMeta)
171 class DHPrivateKeyWithSerialization(DHPrivateKey):
172 @abc.abstractmethod
173 def private_numbers(self):
174 """
175 Returns a DHPrivateNumbers.
176 """
177
178 @abc.abstractmethod
179 def private_bytes(self, encoding, format, encryption_algorithm):
180 """
181 Returns the key serialized as bytes.
182 """
183
184
185 @six.add_metaclass(abc.ABCMeta)
186 class DHPublicKey(object):
187 @abc.abstractproperty
188 def key_size(self):
189 """
190 The bit length of the prime modulus.
191 """
192
193 @abc.abstractmethod
194 def parameters(self):
195 """
196 The DHParameters object associated with this public key.
197 """
198
199
200 @six.add_metaclass(abc.ABCMeta)
201 class DHPublicKeyWithSerialization(DHPublicKey):
202 @abc.abstractmethod
203 def public_numbers(self):
204 """
205 Returns a DHPublicNumbers.
206 """
207
208 @abc.abstractmethod
209 def public_bytes(self, encoding, format):
210 """
211 Returns the key serialized as bytes.
212 """
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/hazmat/primitives/asymmetric/dh.py b/src/cryptography/hazmat/primitives/asymmetric/dh.py
--- a/src/cryptography/hazmat/primitives/asymmetric/dh.py
+++ b/src/cryptography/hazmat/primitives/asymmetric/dh.py
@@ -129,9 +129,6 @@
Returns the parameters serialized as bytes.
"""
-
[email protected]_metaclass(abc.ABCMeta)
-class DHParametersWithSerialization(DHParameters):
@abc.abstractmethod
def parameter_numbers(self):
"""
@@ -139,6 +136,9 @@
"""
+DHParametersWithSerialization = DHParameters
+
+
@six.add_metaclass(abc.ABCMeta)
class DHPrivateKey(object):
@abc.abstractproperty
@@ -196,9 +196,6 @@
The DHParameters object associated with this public key.
"""
-
[email protected]_metaclass(abc.ABCMeta)
-class DHPublicKeyWithSerialization(DHPublicKey):
@abc.abstractmethod
def public_numbers(self):
"""
@@ -210,3 +207,6 @@
"""
Returns the key serialized as bytes.
"""
+
+
+DHPublicKeyWithSerialization = DHPublicKey
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/asymmetric/dh.py b/src/cryptography/hazmat/primitives/asymmetric/dh.py\n--- a/src/cryptography/hazmat/primitives/asymmetric/dh.py\n+++ b/src/cryptography/hazmat/primitives/asymmetric/dh.py\n@@ -129,9 +129,6 @@\n Returns the parameters serialized as bytes.\n \"\"\"\n \n-\[email protected]_metaclass(abc.ABCMeta)\n-class DHParametersWithSerialization(DHParameters):\n @abc.abstractmethod\n def parameter_numbers(self):\n \"\"\"\n@@ -139,6 +136,9 @@\n \"\"\"\n \n \n+DHParametersWithSerialization = DHParameters\n+\n+\n @six.add_metaclass(abc.ABCMeta)\n class DHPrivateKey(object):\n @abc.abstractproperty\n@@ -196,9 +196,6 @@\n The DHParameters object associated with this public key.\n \"\"\"\n \n-\[email protected]_metaclass(abc.ABCMeta)\n-class DHPublicKeyWithSerialization(DHPublicKey):\n @abc.abstractmethod\n def public_numbers(self):\n \"\"\"\n@@ -210,3 +207,6 @@\n \"\"\"\n Returns the key serialized as bytes.\n \"\"\"\n+\n+\n+DHPublicKeyWithSerialization = DHPublicKey\n", "issue": "DHParametersWithSerialization, DHPublicKeyWithSerialization\nFor RSAPublicKey, DSAPublicKey, and EllipticCurvePublicKey we decided that there was no scenario under which they could not be serializable. The same is true for these two classes, so we should go ahead and merge the interfaces and then make the WithSerialization versions aliases (this is the approach we took for the other 3 to maintain backwards compatibility)\nDHParametersWithSerialization, DHPublicKeyWithSerialization\nFor RSAPublicKey, DSAPublicKey, and EllipticCurvePublicKey we decided that there was no scenario under which they could not be serializable. The same is true for these two classes, so we should go ahead and merge the interfaces and then make the WithSerialization versions aliases (this is the approach we took for the other 3 to maintain backwards compatibility)\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport six\n\nfrom cryptography import utils\n\n\ndef generate_parameters(generator, key_size, backend):\n return backend.generate_dh_parameters(generator, key_size)\n\n\nclass DHPrivateNumbers(object):\n def __init__(self, x, public_numbers):\n if not isinstance(x, six.integer_types):\n raise TypeError(\"x must be an integer.\")\n\n if not isinstance(public_numbers, DHPublicNumbers):\n raise TypeError(\"public_numbers must be an instance of \"\n \"DHPublicNumbers.\")\n\n self._x = x\n self._public_numbers = public_numbers\n\n def __eq__(self, other):\n if not isinstance(other, DHPrivateNumbers):\n return NotImplemented\n\n return (\n self._x == other._x and\n self._public_numbers == other._public_numbers\n )\n\n def __ne__(self, other):\n return not self == other\n\n def private_key(self, backend):\n return backend.load_dh_private_numbers(self)\n\n public_numbers = utils.read_only_property(\"_public_numbers\")\n x = utils.read_only_property(\"_x\")\n\n\nclass DHPublicNumbers(object):\n def __init__(self, y, parameter_numbers):\n if not isinstance(y, six.integer_types):\n raise TypeError(\"y must be an integer.\")\n\n if not isinstance(parameter_numbers, DHParameterNumbers):\n raise TypeError(\n \"parameters must be an instance of DHParameterNumbers.\")\n\n self._y = y\n self._parameter_numbers = parameter_numbers\n\n def __eq__(self, other):\n if not isinstance(other, DHPublicNumbers):\n return NotImplemented\n\n return (\n self._y == other._y and\n self._parameter_numbers == other._parameter_numbers\n )\n\n def __ne__(self, other):\n return not self == other\n\n def public_key(self, backend):\n return backend.load_dh_public_numbers(self)\n\n y = utils.read_only_property(\"_y\")\n parameter_numbers = utils.read_only_property(\"_parameter_numbers\")\n\n\nclass DHParameterNumbers(object):\n def __init__(self, p, g, q=None):\n if (\n not isinstance(p, six.integer_types) or\n not isinstance(g, six.integer_types)\n ):\n raise TypeError(\"p and g must be integers\")\n if q is not None and not isinstance(q, six.integer_types):\n raise TypeError(\"q must be integer or None\")\n\n if q is None and g not in (2, 5):\n raise ValueError(\"DH generator must be 2 or 5\")\n\n self._p = p\n self._g = g\n self._q = q\n\n def __eq__(self, other):\n if not isinstance(other, DHParameterNumbers):\n return NotImplemented\n\n return (\n self._p == other._p and\n self._g == other._g and\n self._q == other._q\n )\n\n def __ne__(self, other):\n return not self == other\n\n def parameters(self, backend):\n return backend.load_dh_parameter_numbers(self)\n\n p = utils.read_only_property(\"_p\")\n g = utils.read_only_property(\"_g\")\n q = utils.read_only_property(\"_q\")\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass DHParameters(object):\n @abc.abstractmethod\n def generate_private_key(self):\n \"\"\"\n Generates and returns a DHPrivateKey.\n \"\"\"\n\n @abc.abstractmethod\n def parameter_bytes(self, encoding, format):\n \"\"\"\n Returns the parameters serialized as bytes.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass DHParametersWithSerialization(DHParameters):\n @abc.abstractmethod\n def parameter_numbers(self):\n \"\"\"\n Returns a DHParameterNumbers.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass DHPrivateKey(object):\n @abc.abstractproperty\n def key_size(self):\n \"\"\"\n The bit length of the prime modulus.\n \"\"\"\n\n @abc.abstractmethod\n def public_key(self):\n \"\"\"\n The DHPublicKey associated with this private key.\n \"\"\"\n\n @abc.abstractmethod\n def parameters(self):\n \"\"\"\n The DHParameters object associated with this private key.\n \"\"\"\n\n @abc.abstractmethod\n def exchange(self, peer_public_key):\n \"\"\"\n Given peer's DHPublicKey, carry out the key exchange and\n return shared key as bytes.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass DHPrivateKeyWithSerialization(DHPrivateKey):\n @abc.abstractmethod\n def private_numbers(self):\n \"\"\"\n Returns a DHPrivateNumbers.\n \"\"\"\n\n @abc.abstractmethod\n def private_bytes(self, encoding, format, encryption_algorithm):\n \"\"\"\n Returns the key serialized as bytes.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass DHPublicKey(object):\n @abc.abstractproperty\n def key_size(self):\n \"\"\"\n The bit length of the prime modulus.\n \"\"\"\n\n @abc.abstractmethod\n def parameters(self):\n \"\"\"\n The DHParameters object associated with this public key.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass DHPublicKeyWithSerialization(DHPublicKey):\n @abc.abstractmethod\n def public_numbers(self):\n \"\"\"\n Returns a DHPublicNumbers.\n \"\"\"\n\n @abc.abstractmethod\n def public_bytes(self, encoding, format):\n \"\"\"\n Returns the key serialized as bytes.\n \"\"\"\n", "path": "src/cryptography/hazmat/primitives/asymmetric/dh.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport six\n\nfrom cryptography import utils\n\n\ndef generate_parameters(generator, key_size, backend):\n return backend.generate_dh_parameters(generator, key_size)\n\n\nclass DHPrivateNumbers(object):\n def __init__(self, x, public_numbers):\n if not isinstance(x, six.integer_types):\n raise TypeError(\"x must be an integer.\")\n\n if not isinstance(public_numbers, DHPublicNumbers):\n raise TypeError(\"public_numbers must be an instance of \"\n \"DHPublicNumbers.\")\n\n self._x = x\n self._public_numbers = public_numbers\n\n def __eq__(self, other):\n if not isinstance(other, DHPrivateNumbers):\n return NotImplemented\n\n return (\n self._x == other._x and\n self._public_numbers == other._public_numbers\n )\n\n def __ne__(self, other):\n return not self == other\n\n def private_key(self, backend):\n return backend.load_dh_private_numbers(self)\n\n public_numbers = utils.read_only_property(\"_public_numbers\")\n x = utils.read_only_property(\"_x\")\n\n\nclass DHPublicNumbers(object):\n def __init__(self, y, parameter_numbers):\n if not isinstance(y, six.integer_types):\n raise TypeError(\"y must be an integer.\")\n\n if not isinstance(parameter_numbers, DHParameterNumbers):\n raise TypeError(\n \"parameters must be an instance of DHParameterNumbers.\")\n\n self._y = y\n self._parameter_numbers = parameter_numbers\n\n def __eq__(self, other):\n if not isinstance(other, DHPublicNumbers):\n return NotImplemented\n\n return (\n self._y == other._y and\n self._parameter_numbers == other._parameter_numbers\n )\n\n def __ne__(self, other):\n return not self == other\n\n def public_key(self, backend):\n return backend.load_dh_public_numbers(self)\n\n y = utils.read_only_property(\"_y\")\n parameter_numbers = utils.read_only_property(\"_parameter_numbers\")\n\n\nclass DHParameterNumbers(object):\n def __init__(self, p, g, q=None):\n if (\n not isinstance(p, six.integer_types) or\n not isinstance(g, six.integer_types)\n ):\n raise TypeError(\"p and g must be integers\")\n if q is not None and not isinstance(q, six.integer_types):\n raise TypeError(\"q must be integer or None\")\n\n if q is None and g not in (2, 5):\n raise ValueError(\"DH generator must be 2 or 5\")\n\n self._p = p\n self._g = g\n self._q = q\n\n def __eq__(self, other):\n if not isinstance(other, DHParameterNumbers):\n return NotImplemented\n\n return (\n self._p == other._p and\n self._g == other._g and\n self._q == other._q\n )\n\n def __ne__(self, other):\n return not self == other\n\n def parameters(self, backend):\n return backend.load_dh_parameter_numbers(self)\n\n p = utils.read_only_property(\"_p\")\n g = utils.read_only_property(\"_g\")\n q = utils.read_only_property(\"_q\")\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass DHParameters(object):\n @abc.abstractmethod\n def generate_private_key(self):\n \"\"\"\n Generates and returns a DHPrivateKey.\n \"\"\"\n\n @abc.abstractmethod\n def parameter_bytes(self, encoding, format):\n \"\"\"\n Returns the parameters serialized as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def parameter_numbers(self):\n \"\"\"\n Returns a DHParameterNumbers.\n \"\"\"\n\n\nDHParametersWithSerialization = DHParameters\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass DHPrivateKey(object):\n @abc.abstractproperty\n def key_size(self):\n \"\"\"\n The bit length of the prime modulus.\n \"\"\"\n\n @abc.abstractmethod\n def public_key(self):\n \"\"\"\n The DHPublicKey associated with this private key.\n \"\"\"\n\n @abc.abstractmethod\n def parameters(self):\n \"\"\"\n The DHParameters object associated with this private key.\n \"\"\"\n\n @abc.abstractmethod\n def exchange(self, peer_public_key):\n \"\"\"\n Given peer's DHPublicKey, carry out the key exchange and\n return shared key as bytes.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass DHPrivateKeyWithSerialization(DHPrivateKey):\n @abc.abstractmethod\n def private_numbers(self):\n \"\"\"\n Returns a DHPrivateNumbers.\n \"\"\"\n\n @abc.abstractmethod\n def private_bytes(self, encoding, format, encryption_algorithm):\n \"\"\"\n Returns the key serialized as bytes.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass DHPublicKey(object):\n @abc.abstractproperty\n def key_size(self):\n \"\"\"\n The bit length of the prime modulus.\n \"\"\"\n\n @abc.abstractmethod\n def parameters(self):\n \"\"\"\n The DHParameters object associated with this public key.\n \"\"\"\n\n @abc.abstractmethod\n def public_numbers(self):\n \"\"\"\n Returns a DHPublicNumbers.\n \"\"\"\n\n @abc.abstractmethod\n def public_bytes(self, encoding, format):\n \"\"\"\n Returns the key serialized as bytes.\n \"\"\"\n\n\nDHPublicKeyWithSerialization = DHPublicKey\n", "path": "src/cryptography/hazmat/primitives/asymmetric/dh.py"}]} | 2,212 | 283 |
gh_patches_debug_31931 | rasdani/github-patches | git_diff | Azure__azure-cli-extensions-1181 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
app-insights az cli extension seems not working properly in AzureChinaCloud (China East 2)
- If the issue is to do with Azure CLI 2.0 in-particular, create an issue here at [Azure/azure-cli](https://github.com/Azure/azure-cli/issues)
### Extension name (the extension in question)
application-insights
### Description of issue (in as much detail as possible)
I have properly logged in with a certain subscription in AzureChinaCloud (China East2)
```
az account show
{
"environmentName": "AzureChinaCloud",
"id": "5b5d5f37-8aca-49f8-8682-7ed6d2f00424",
"isDefault": true,
"name": "Project Vienna PROD - ChinaEast2",
"state": "Enabled",
"tenantId": "a55a4d5b-9241-49b1-b4ff-befa8db00269",
"user": {
"name": "[email protected]",
"type": "user"
}
}
```
However run any az monitor app-insights command always failed with error "The subscription '5b5d5f37-8aca-49f8-8682-7ed6d2f00424' could not be found"
for example, api-key show
```
az monitor app-insights api-key show --app model-monitoring-chinaeast2 --resource-group model-mgmt-chinaeast2 --api-key MMSModelMonitoringApiKey
The subscription '5b5d5f37-8aca-49f8-8682-7ed6d2f00424' could not be found.
```
I doubted application-insights is not working in AzureChinaCloud, any insight of it? Appreciate
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/application-insights/setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8 from codecs import open
9 from setuptools import setup, find_packages
10
11 VERSION = "0.1.1"
12
13 CLASSIFIERS = [
14 'Development Status :: 4 - Beta',
15 'Intended Audience :: Developers',
16 'Intended Audience :: System Administrators',
17 'Programming Language :: Python',
18 'Programming Language :: Python :: 2',
19 'Programming Language :: Python :: 2.7',
20 'Programming Language :: Python :: 3',
21 'Programming Language :: Python :: 3.4',
22 'Programming Language :: Python :: 3.5',
23 'Programming Language :: Python :: 3.6',
24 'License :: OSI Approved :: MIT License',
25 ]
26
27 DEPENDENCIES = []
28
29 with open('README.rst', 'r', encoding='utf-8') as f:
30 README = f.read()
31 with open('HISTORY.rst', 'r', encoding='utf-8') as f:
32 HISTORY = f.read()
33
34 setup(
35 name='application-insights',
36 version=VERSION,
37 description='Support for managing Application Insights components and querying metrics, events, and logs from such components.',
38 long_description=README + '\n\n' + HISTORY,
39 license='MIT',
40 author='Ace Eldeib',
41 author_email='[email protected]',
42 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/application-insights',
43 classifiers=CLASSIFIERS,
44 packages=find_packages(exclude=["tests"]),
45 package_data={'azext_applicationinsights': ['azext_metadata.json']},
46 install_requires=DEPENDENCIES
47 )
48
```
Path: `src/application-insights/azext_applicationinsights/_client_factory.py`
Content:
```
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6
7 def applicationinsights_data_plane_client(cli_ctx, _, subscription=None):
8 """Initialize Log Analytics data client for use with CLI."""
9 from .vendored_sdks.applicationinsights import ApplicationInsightsDataClient
10 from azure.cli.core._profile import Profile
11 profile = Profile(cli_ctx=cli_ctx)
12 cred, _, _ = profile.get_login_credentials(
13 resource="https://api.applicationinsights.io",
14 subscription_id=subscription
15 )
16 return ApplicationInsightsDataClient(cred)
17
18
19 def applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=None):
20 """Initialize Log Analytics mgmt client for use with CLI."""
21 from .vendored_sdks.mgmt_applicationinsights import ApplicationInsightsManagementClient
22 from azure.cli.core._profile import Profile
23 profile = Profile(cli_ctx=cli_ctx)
24 # Use subscription from resource_id where possible, otherwise use login.
25 if subscription:
26 cred, _, _ = profile.get_login_credentials(subscription_id=subscription)
27 return ApplicationInsightsManagementClient(
28 cred,
29 subscription
30 )
31 cred, sub_id, _ = profile.get_login_credentials()
32 return ApplicationInsightsManagementClient(
33 cred,
34 sub_id
35 )
36
37
38 def cf_query(cli_ctx, _, subscription=None):
39 return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).query
40
41
42 def cf_metrics(cli_ctx, _, subscription=None):
43 return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).metrics
44
45
46 def cf_events(cli_ctx, _, subscription=None):
47 return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).events
48
49
50 def cf_components(cli_ctx, _, subscription=None):
51 return applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=subscription).components
52
53
54 def cf_api_key(cli_ctx, _, subscription=None):
55 return applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=subscription).api_keys
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/application-insights/azext_applicationinsights/_client_factory.py b/src/application-insights/azext_applicationinsights/_client_factory.py
--- a/src/application-insights/azext_applicationinsights/_client_factory.py
+++ b/src/application-insights/azext_applicationinsights/_client_factory.py
@@ -10,7 +10,7 @@
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx=cli_ctx)
cred, _, _ = profile.get_login_credentials(
- resource="https://api.applicationinsights.io",
+ resource=cli_ctx.cloud.endpoints.app_insights_resource_id,
subscription_id=subscription
)
return ApplicationInsightsDataClient(cred)
@@ -19,20 +19,8 @@
def applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=None):
"""Initialize Log Analytics mgmt client for use with CLI."""
from .vendored_sdks.mgmt_applicationinsights import ApplicationInsightsManagementClient
- from azure.cli.core._profile import Profile
- profile = Profile(cli_ctx=cli_ctx)
- # Use subscription from resource_id where possible, otherwise use login.
- if subscription:
- cred, _, _ = profile.get_login_credentials(subscription_id=subscription)
- return ApplicationInsightsManagementClient(
- cred,
- subscription
- )
- cred, sub_id, _ = profile.get_login_credentials()
- return ApplicationInsightsManagementClient(
- cred,
- sub_id
- )
+ from azure.cli.core.commands.client_factory import get_mgmt_service_client
+ return get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient, subscription_id=subscription)
def cf_query(cli_ctx, _, subscription=None):
diff --git a/src/application-insights/setup.py b/src/application-insights/setup.py
--- a/src/application-insights/setup.py
+++ b/src/application-insights/setup.py
@@ -8,7 +8,7 @@
from codecs import open
from setuptools import setup, find_packages
-VERSION = "0.1.1"
+VERSION = "0.1.2"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
| {"golden_diff": "diff --git a/src/application-insights/azext_applicationinsights/_client_factory.py b/src/application-insights/azext_applicationinsights/_client_factory.py\n--- a/src/application-insights/azext_applicationinsights/_client_factory.py\n+++ b/src/application-insights/azext_applicationinsights/_client_factory.py\n@@ -10,7 +10,7 @@\n from azure.cli.core._profile import Profile\n profile = Profile(cli_ctx=cli_ctx)\n cred, _, _ = profile.get_login_credentials(\n- resource=\"https://api.applicationinsights.io\",\n+ resource=cli_ctx.cloud.endpoints.app_insights_resource_id,\n subscription_id=subscription\n )\n return ApplicationInsightsDataClient(cred)\n@@ -19,20 +19,8 @@\n def applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=None):\n \"\"\"Initialize Log Analytics mgmt client for use with CLI.\"\"\"\n from .vendored_sdks.mgmt_applicationinsights import ApplicationInsightsManagementClient\n- from azure.cli.core._profile import Profile\n- profile = Profile(cli_ctx=cli_ctx)\n- # Use subscription from resource_id where possible, otherwise use login.\n- if subscription:\n- cred, _, _ = profile.get_login_credentials(subscription_id=subscription)\n- return ApplicationInsightsManagementClient(\n- cred,\n- subscription\n- )\n- cred, sub_id, _ = profile.get_login_credentials()\n- return ApplicationInsightsManagementClient(\n- cred,\n- sub_id\n- )\n+ from azure.cli.core.commands.client_factory import get_mgmt_service_client\n+ return get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient, subscription_id=subscription)\n \n \n def cf_query(cli_ctx, _, subscription=None):\ndiff --git a/src/application-insights/setup.py b/src/application-insights/setup.py\n--- a/src/application-insights/setup.py\n+++ b/src/application-insights/setup.py\n@@ -8,7 +8,7 @@\n from codecs import open\n from setuptools import setup, find_packages\n \n-VERSION = \"0.1.1\"\n+VERSION = \"0.1.2\"\n \n CLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n", "issue": "app-insights az cli extension seems not working properly in AzureChinaCloud (China East 2)\n- If the issue is to do with Azure CLI 2.0 in-particular, create an issue here at [Azure/azure-cli](https://github.com/Azure/azure-cli/issues)\r\n\r\n### Extension name (the extension in question)\r\napplication-insights\r\n\r\n### Description of issue (in as much detail as possible)\r\nI have properly logged in with a certain subscription in AzureChinaCloud (China East2)\r\n```\r\naz account show\r\n{\r\n \"environmentName\": \"AzureChinaCloud\",\r\n \"id\": \"5b5d5f37-8aca-49f8-8682-7ed6d2f00424\",\r\n \"isDefault\": true,\r\n \"name\": \"Project Vienna PROD - ChinaEast2\",\r\n \"state\": \"Enabled\",\r\n \"tenantId\": \"a55a4d5b-9241-49b1-b4ff-befa8db00269\",\r\n \"user\": {\r\n \"name\": \"[email protected]\",\r\n \"type\": \"user\"\r\n }\r\n}\r\n```\r\nHowever run any az monitor app-insights command always failed with error \"The subscription '5b5d5f37-8aca-49f8-8682-7ed6d2f00424' could not be found\" \r\n\r\nfor example, api-key show\r\n```\r\naz monitor app-insights api-key show --app model-monitoring-chinaeast2 --resource-group model-mgmt-chinaeast2 --api-key MMSModelMonitoringApiKey\r\nThe subscription '5b5d5f37-8aca-49f8-8682-7ed6d2f00424' could not be found.\r\n```\r\n\r\nI doubted application-insights is not working in AzureChinaCloud, any insight of it? Appreciate\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.1.1\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = []\n\nwith open('README.rst', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='application-insights',\n version=VERSION,\n description='Support for managing Application Insights components and querying metrics, events, and logs from such components.',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n author='Ace Eldeib',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/application-insights',\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=[\"tests\"]),\n package_data={'azext_applicationinsights': ['azext_metadata.json']},\n install_requires=DEPENDENCIES\n)\n", "path": "src/application-insights/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\ndef applicationinsights_data_plane_client(cli_ctx, _, subscription=None):\n \"\"\"Initialize Log Analytics data client for use with CLI.\"\"\"\n from .vendored_sdks.applicationinsights import ApplicationInsightsDataClient\n from azure.cli.core._profile import Profile\n profile = Profile(cli_ctx=cli_ctx)\n cred, _, _ = profile.get_login_credentials(\n resource=\"https://api.applicationinsights.io\",\n subscription_id=subscription\n )\n return ApplicationInsightsDataClient(cred)\n\n\ndef applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=None):\n \"\"\"Initialize Log Analytics mgmt client for use with CLI.\"\"\"\n from .vendored_sdks.mgmt_applicationinsights import ApplicationInsightsManagementClient\n from azure.cli.core._profile import Profile\n profile = Profile(cli_ctx=cli_ctx)\n # Use subscription from resource_id where possible, otherwise use login.\n if subscription:\n cred, _, _ = profile.get_login_credentials(subscription_id=subscription)\n return ApplicationInsightsManagementClient(\n cred,\n subscription\n )\n cred, sub_id, _ = profile.get_login_credentials()\n return ApplicationInsightsManagementClient(\n cred,\n sub_id\n )\n\n\ndef cf_query(cli_ctx, _, subscription=None):\n return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).query\n\n\ndef cf_metrics(cli_ctx, _, subscription=None):\n return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).metrics\n\n\ndef cf_events(cli_ctx, _, subscription=None):\n return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).events\n\n\ndef cf_components(cli_ctx, _, subscription=None):\n return applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=subscription).components\n\n\ndef cf_api_key(cli_ctx, _, subscription=None):\n return applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=subscription).api_keys\n", "path": "src/application-insights/azext_applicationinsights/_client_factory.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.1.2\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = []\n\nwith open('README.rst', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='application-insights',\n version=VERSION,\n description='Support for managing Application Insights components and querying metrics, events, and logs from such components.',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n author='Ace Eldeib',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/application-insights',\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=[\"tests\"]),\n package_data={'azext_applicationinsights': ['azext_metadata.json']},\n install_requires=DEPENDENCIES\n)\n", "path": "src/application-insights/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\ndef applicationinsights_data_plane_client(cli_ctx, _, subscription=None):\n \"\"\"Initialize Log Analytics data client for use with CLI.\"\"\"\n from .vendored_sdks.applicationinsights import ApplicationInsightsDataClient\n from azure.cli.core._profile import Profile\n profile = Profile(cli_ctx=cli_ctx)\n cred, _, _ = profile.get_login_credentials(\n resource=cli_ctx.cloud.endpoints.app_insights_resource_id,\n subscription_id=subscription\n )\n return ApplicationInsightsDataClient(cred)\n\n\ndef applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=None):\n \"\"\"Initialize Log Analytics mgmt client for use with CLI.\"\"\"\n from .vendored_sdks.mgmt_applicationinsights import ApplicationInsightsManagementClient\n from azure.cli.core.commands.client_factory import get_mgmt_service_client\n return get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient, subscription_id=subscription)\n\n\ndef cf_query(cli_ctx, _, subscription=None):\n return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).query\n\n\ndef cf_metrics(cli_ctx, _, subscription=None):\n return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).metrics\n\n\ndef cf_events(cli_ctx, _, subscription=None):\n return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).events\n\n\ndef cf_components(cli_ctx, _, subscription=None):\n return applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=subscription).components\n\n\ndef cf_api_key(cli_ctx, _, subscription=None):\n return applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=subscription).api_keys\n", "path": "src/application-insights/azext_applicationinsights/_client_factory.py"}]} | 1,726 | 472 |
gh_patches_debug_20364 | rasdani/github-patches | git_diff | numba__numba-1379 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash when returning a named tuple of lists
This crashes with a segmentation fault:
```
Point = collections.namedtuple('Point', ('x', 'y'))
@jit(nopython=True)
def list_tup(n):
return Point(list(range(n)), [42])
print(list_tup(3))
```
gdb shows it crashing in `wrapper..main..list_tup$1.int64`. There's probably a NRT incref missing somewhere.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numba/targets/tupleobj.py`
Content:
```
1 """
2 Implementation of tuple objects
3 """
4
5 from llvmlite import ir
6 import llvmlite.llvmpy.core as lc
7
8 from .imputils import (builtin, builtin_attr, implement, impl_attribute,
9 impl_attribute_generic, iternext_impl, struct_factory,
10 impl_ret_borrowed, impl_ret_untracked)
11 from .. import typing, types, cgutils
12
13
14 def generic_compare(context, builder, key, argtypes, args):
15 """
16 Compare the given LLVM values of the given Numba types using
17 the comparison *key* (e.g. '=='). The values are first cast to
18 a common safe conversion type.
19 """
20 at, bt = argtypes
21 av, bv = args
22 ty = context.typing_context.unify_types(at, bt)
23 cav = context.cast(builder, av, at, ty)
24 cbv = context.cast(builder, bv, bt, ty)
25 cmpsig = typing.signature(types.boolean, ty, ty)
26 cmpfunc = context.get_function(key, cmpsig)
27 return cmpfunc(builder, (cav, cbv))
28
29
30 @builtin
31 @implement(types.NamedTupleClass, types.VarArg(types.Any))
32 def namedtuple_constructor(context, builder, sig, args):
33 # A namedtuple has the same representation as a regular tuple
34 return context.make_tuple(builder, sig.return_type, args)
35
36 @builtin
37 @implement(types.len_type, types.Kind(types.BaseTuple))
38 def tuple_len(context, builder, sig, args):
39 tupty, = sig.args
40 retty = sig.return_type
41 res = context.get_constant(retty, len(tupty.types))
42 return impl_ret_untracked(context, builder, sig.return_type, res)
43
44 @builtin
45 @implement(bool, types.Kind(types.BaseTuple))
46 def tuple_bool(context, builder, sig, args):
47 tupty, = sig.args
48 if len(tupty):
49 return cgutils.true_bit
50 else:
51 return cgutils.false_bit
52
53 def tuple_cmp_ordered(context, builder, op, sig, args):
54 tu, tv = sig.args
55 u, v = args
56 res = cgutils.alloca_once_value(builder, cgutils.true_bit)
57 bbend = builder.append_basic_block("cmp_end")
58 for i, (ta, tb) in enumerate(zip(tu.types, tv.types)):
59 a = builder.extract_value(u, i)
60 b = builder.extract_value(v, i)
61 not_equal = generic_compare(context, builder, '!=', (ta, tb), (a, b))
62 with builder.if_then(not_equal):
63 pred = generic_compare(context, builder, op, (ta, tb), (a, b))
64 builder.store(pred, res)
65 builder.branch(bbend)
66 # Everything matched equal => compare lengths
67 len_compare = eval("%d %s %d" % (len(tu.types), op, len(tv.types)))
68 pred = context.get_constant(types.boolean, len_compare)
69 builder.store(pred, res)
70 builder.branch(bbend)
71 builder.position_at_end(bbend)
72 return builder.load(res)
73
74 @builtin
75 @implement('==', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))
76 def tuple_eq(context, builder, sig, args):
77 tu, tv = sig.args
78 u, v = args
79 if len(tu.types) != len(tv.types):
80 res = context.get_constant(types.boolean, False)
81 return impl_ret_untracked(context, builder, sig.return_type, res)
82 res = context.get_constant(types.boolean, True)
83 for i, (ta, tb) in enumerate(zip(tu.types, tv.types)):
84 a = builder.extract_value(u, i)
85 b = builder.extract_value(v, i)
86 pred = generic_compare(context, builder, "==", (ta, tb), (a, b))
87 res = builder.and_(res, pred)
88 return impl_ret_untracked(context, builder, sig.return_type, res)
89
90 @builtin
91 @implement('!=', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))
92 def tuple_ne(context, builder, sig, args):
93 res = builder.not_(tuple_eq(context, builder, sig, args))
94 return impl_ret_untracked(context, builder, sig.return_type, res)
95
96 @builtin
97 @implement('<', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))
98 def tuple_lt(context, builder, sig, args):
99 res = tuple_cmp_ordered(context, builder, '<', sig, args)
100 return impl_ret_untracked(context, builder, sig.return_type, res)
101
102 @builtin
103 @implement('<=', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))
104 def tuple_le(context, builder, sig, args):
105 res = tuple_cmp_ordered(context, builder, '<=', sig, args)
106 return impl_ret_untracked(context, builder, sig.return_type, res)
107
108 @builtin
109 @implement('>', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))
110 def tuple_gt(context, builder, sig, args):
111 res = tuple_cmp_ordered(context, builder, '>', sig, args)
112 return impl_ret_untracked(context, builder, sig.return_type, res)
113
114 @builtin
115 @implement('>=', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))
116 def tuple_ge(context, builder, sig, args):
117 res = tuple_cmp_ordered(context, builder, '>=', sig, args)
118 return impl_ret_untracked(context, builder, sig.return_type, res)
119
120
121 @builtin_attr
122 @impl_attribute_generic(types.Kind(types.BaseNamedTuple))
123 def namedtuple_getattr(context, builder, typ, value, attr):
124 """
125 Fetch a namedtuple's field.
126 """
127 index = typ.fields.index(attr)
128 return builder.extract_value(value, index)
129
130
131 #------------------------------------------------------------------------------
132 # Tuple iterators
133
134 @struct_factory(types.UniTupleIter)
135 def make_unituple_iter(tupiter):
136 """
137 Return the Structure representation of the given *tupiter* (an
138 instance of types.UniTupleIter).
139 """
140 return cgutils.create_struct_proxy(tupiter)
141
142
143 @builtin
144 @implement('getiter', types.Kind(types.UniTuple))
145 @implement('getiter', types.Kind(types.NamedUniTuple))
146 def getiter_unituple(context, builder, sig, args):
147 [tupty] = sig.args
148 [tup] = args
149
150 tupitercls = make_unituple_iter(types.UniTupleIter(tupty))
151 iterval = tupitercls(context, builder)
152
153 index0 = context.get_constant(types.intp, 0)
154 indexptr = cgutils.alloca_once(builder, index0.type)
155 builder.store(index0, indexptr)
156
157 iterval.index = indexptr
158 iterval.tuple = tup
159
160 res = iterval._getvalue()
161 return impl_ret_borrowed(context, builder, sig.return_type, res)
162
163
164 @builtin
165 @implement('iternext', types.Kind(types.UniTupleIter))
166 @iternext_impl
167 def iternext_unituple(context, builder, sig, args, result):
168 [tupiterty] = sig.args
169 [tupiter] = args
170
171 tupitercls = make_unituple_iter(tupiterty)
172 iterval = tupitercls(context, builder, value=tupiter)
173 tup = iterval.tuple
174 idxptr = iterval.index
175 idx = builder.load(idxptr)
176 count = context.get_constant(types.intp, tupiterty.unituple.count)
177
178 is_valid = builder.icmp(lc.ICMP_SLT, idx, count)
179 result.set_valid(is_valid)
180
181 with builder.if_then(is_valid):
182 getitem_sig = typing.signature(tupiterty.unituple.dtype,
183 tupiterty.unituple,
184 types.intp)
185 getitem_out = getitem_unituple(context, builder, getitem_sig,
186 [tup, idx])
187 result.yield_(getitem_out)
188 nidx = builder.add(idx, context.get_constant(types.intp, 1))
189 builder.store(nidx, iterval.index)
190
191
192 @builtin
193 @implement('getitem', types.Kind(types.UniTuple), types.intp)
194 @implement('getitem', types.Kind(types.NamedUniTuple), types.intp)
195 def getitem_unituple(context, builder, sig, args):
196 tupty, _ = sig.args
197 tup, idx = args
198
199 bbelse = builder.append_basic_block("switch.else")
200 bbend = builder.append_basic_block("switch.end")
201 switch = builder.switch(idx, bbelse, n=tupty.count)
202
203 with builder.goto_block(bbelse):
204 context.call_conv.return_user_exc(builder, IndexError,
205 ("tuple index out of range",))
206
207 lrtty = context.get_value_type(tupty.dtype)
208 with builder.goto_block(bbend):
209 phinode = builder.phi(lrtty)
210
211 for i in range(tupty.count):
212 ki = context.get_constant(types.intp, i)
213 bbi = builder.append_basic_block("switch.%d" % i)
214 switch.add_case(ki, bbi)
215 with builder.goto_block(bbi):
216 value = builder.extract_value(tup, i)
217 builder.branch(bbend)
218 phinode.add_incoming(value, bbi)
219
220 builder.position_at_end(bbend)
221 res = phinode
222 assert sig.return_type == tupty.dtype
223 return impl_ret_borrowed(context, builder, sig.return_type, res)
224
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numba/targets/tupleobj.py b/numba/targets/tupleobj.py
--- a/numba/targets/tupleobj.py
+++ b/numba/targets/tupleobj.py
@@ -31,7 +31,9 @@
@implement(types.NamedTupleClass, types.VarArg(types.Any))
def namedtuple_constructor(context, builder, sig, args):
# A namedtuple has the same representation as a regular tuple
- return context.make_tuple(builder, sig.return_type, args)
+ res = context.make_tuple(builder, sig.return_type, args)
+ # The tuple's contents are borrowed
+ return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement(types.len_type, types.Kind(types.BaseTuple))
@@ -125,7 +127,8 @@
Fetch a namedtuple's field.
"""
index = typ.fields.index(attr)
- return builder.extract_value(value, index)
+ res = builder.extract_value(value, index)
+ return impl_ret_borrowed(context, builder, typ[index], res)
#------------------------------------------------------------------------------
| {"golden_diff": "diff --git a/numba/targets/tupleobj.py b/numba/targets/tupleobj.py\n--- a/numba/targets/tupleobj.py\n+++ b/numba/targets/tupleobj.py\n@@ -31,7 +31,9 @@\n @implement(types.NamedTupleClass, types.VarArg(types.Any))\n def namedtuple_constructor(context, builder, sig, args):\n # A namedtuple has the same representation as a regular tuple\n- return context.make_tuple(builder, sig.return_type, args)\n+ res = context.make_tuple(builder, sig.return_type, args)\n+ # The tuple's contents are borrowed\n+ return impl_ret_borrowed(context, builder, sig.return_type, res)\n \n @builtin\n @implement(types.len_type, types.Kind(types.BaseTuple))\n@@ -125,7 +127,8 @@\n Fetch a namedtuple's field.\n \"\"\"\n index = typ.fields.index(attr)\n- return builder.extract_value(value, index)\n+ res = builder.extract_value(value, index)\n+ return impl_ret_borrowed(context, builder, typ[index], res)\n \n \n #------------------------------------------------------------------------------\n", "issue": "Crash when returning a named tuple of lists\nThis crashes with a segmentation fault:\n\n```\nPoint = collections.namedtuple('Point', ('x', 'y'))\n\n@jit(nopython=True)\ndef list_tup(n):\n return Point(list(range(n)), [42])\n\nprint(list_tup(3))\n```\n\ngdb shows it crashing in `wrapper..main..list_tup$1.int64`. There's probably a NRT incref missing somewhere.\n\n", "before_files": [{"content": "\"\"\"\nImplementation of tuple objects\n\"\"\"\n\nfrom llvmlite import ir\nimport llvmlite.llvmpy.core as lc\n\nfrom .imputils import (builtin, builtin_attr, implement, impl_attribute,\n impl_attribute_generic, iternext_impl, struct_factory,\n impl_ret_borrowed, impl_ret_untracked)\nfrom .. import typing, types, cgutils\n\n\ndef generic_compare(context, builder, key, argtypes, args):\n \"\"\"\n Compare the given LLVM values of the given Numba types using\n the comparison *key* (e.g. '=='). The values are first cast to\n a common safe conversion type.\n \"\"\"\n at, bt = argtypes\n av, bv = args\n ty = context.typing_context.unify_types(at, bt)\n cav = context.cast(builder, av, at, ty)\n cbv = context.cast(builder, bv, bt, ty)\n cmpsig = typing.signature(types.boolean, ty, ty)\n cmpfunc = context.get_function(key, cmpsig)\n return cmpfunc(builder, (cav, cbv))\n\n\n@builtin\n@implement(types.NamedTupleClass, types.VarArg(types.Any))\ndef namedtuple_constructor(context, builder, sig, args):\n # A namedtuple has the same representation as a regular tuple\n return context.make_tuple(builder, sig.return_type, args)\n\n@builtin\n@implement(types.len_type, types.Kind(types.BaseTuple))\ndef tuple_len(context, builder, sig, args):\n tupty, = sig.args\n retty = sig.return_type\n res = context.get_constant(retty, len(tupty.types))\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n@builtin\n@implement(bool, types.Kind(types.BaseTuple))\ndef tuple_bool(context, builder, sig, args):\n tupty, = sig.args\n if len(tupty):\n return cgutils.true_bit\n else:\n return cgutils.false_bit\n\ndef tuple_cmp_ordered(context, builder, op, sig, args):\n tu, tv = sig.args\n u, v = args\n res = cgutils.alloca_once_value(builder, cgutils.true_bit)\n bbend = builder.append_basic_block(\"cmp_end\")\n for i, (ta, tb) in enumerate(zip(tu.types, tv.types)):\n a = builder.extract_value(u, i)\n b = builder.extract_value(v, i)\n not_equal = generic_compare(context, builder, '!=', (ta, tb), (a, b))\n with builder.if_then(not_equal):\n pred = generic_compare(context, builder, op, (ta, tb), (a, b))\n builder.store(pred, res)\n builder.branch(bbend)\n # Everything matched equal => compare lengths\n len_compare = eval(\"%d %s %d\" % (len(tu.types), op, len(tv.types)))\n pred = context.get_constant(types.boolean, len_compare)\n builder.store(pred, res)\n builder.branch(bbend)\n builder.position_at_end(bbend)\n return builder.load(res)\n\n@builtin\n@implement('==', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))\ndef tuple_eq(context, builder, sig, args):\n tu, tv = sig.args\n u, v = args\n if len(tu.types) != len(tv.types):\n res = context.get_constant(types.boolean, False)\n return impl_ret_untracked(context, builder, sig.return_type, res)\n res = context.get_constant(types.boolean, True)\n for i, (ta, tb) in enumerate(zip(tu.types, tv.types)):\n a = builder.extract_value(u, i)\n b = builder.extract_value(v, i)\n pred = generic_compare(context, builder, \"==\", (ta, tb), (a, b))\n res = builder.and_(res, pred)\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n@builtin\n@implement('!=', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))\ndef tuple_ne(context, builder, sig, args):\n res = builder.not_(tuple_eq(context, builder, sig, args))\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n@builtin\n@implement('<', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))\ndef tuple_lt(context, builder, sig, args):\n res = tuple_cmp_ordered(context, builder, '<', sig, args)\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n@builtin\n@implement('<=', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))\ndef tuple_le(context, builder, sig, args):\n res = tuple_cmp_ordered(context, builder, '<=', sig, args)\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n@builtin\n@implement('>', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))\ndef tuple_gt(context, builder, sig, args):\n res = tuple_cmp_ordered(context, builder, '>', sig, args)\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n@builtin\n@implement('>=', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))\ndef tuple_ge(context, builder, sig, args):\n res = tuple_cmp_ordered(context, builder, '>=', sig, args)\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n\n@builtin_attr\n@impl_attribute_generic(types.Kind(types.BaseNamedTuple))\ndef namedtuple_getattr(context, builder, typ, value, attr):\n \"\"\"\n Fetch a namedtuple's field.\n \"\"\"\n index = typ.fields.index(attr)\n return builder.extract_value(value, index)\n\n\n#------------------------------------------------------------------------------\n# Tuple iterators\n\n@struct_factory(types.UniTupleIter)\ndef make_unituple_iter(tupiter):\n \"\"\"\n Return the Structure representation of the given *tupiter* (an\n instance of types.UniTupleIter).\n \"\"\"\n return cgutils.create_struct_proxy(tupiter)\n\n\n@builtin\n@implement('getiter', types.Kind(types.UniTuple))\n@implement('getiter', types.Kind(types.NamedUniTuple))\ndef getiter_unituple(context, builder, sig, args):\n [tupty] = sig.args\n [tup] = args\n\n tupitercls = make_unituple_iter(types.UniTupleIter(tupty))\n iterval = tupitercls(context, builder)\n\n index0 = context.get_constant(types.intp, 0)\n indexptr = cgutils.alloca_once(builder, index0.type)\n builder.store(index0, indexptr)\n\n iterval.index = indexptr\n iterval.tuple = tup\n\n res = iterval._getvalue()\n return impl_ret_borrowed(context, builder, sig.return_type, res)\n\n\n@builtin\n@implement('iternext', types.Kind(types.UniTupleIter))\n@iternext_impl\ndef iternext_unituple(context, builder, sig, args, result):\n [tupiterty] = sig.args\n [tupiter] = args\n\n tupitercls = make_unituple_iter(tupiterty)\n iterval = tupitercls(context, builder, value=tupiter)\n tup = iterval.tuple\n idxptr = iterval.index\n idx = builder.load(idxptr)\n count = context.get_constant(types.intp, tupiterty.unituple.count)\n\n is_valid = builder.icmp(lc.ICMP_SLT, idx, count)\n result.set_valid(is_valid)\n\n with builder.if_then(is_valid):\n getitem_sig = typing.signature(tupiterty.unituple.dtype,\n tupiterty.unituple,\n types.intp)\n getitem_out = getitem_unituple(context, builder, getitem_sig,\n [tup, idx])\n result.yield_(getitem_out)\n nidx = builder.add(idx, context.get_constant(types.intp, 1))\n builder.store(nidx, iterval.index)\n\n\n@builtin\n@implement('getitem', types.Kind(types.UniTuple), types.intp)\n@implement('getitem', types.Kind(types.NamedUniTuple), types.intp)\ndef getitem_unituple(context, builder, sig, args):\n tupty, _ = sig.args\n tup, idx = args\n\n bbelse = builder.append_basic_block(\"switch.else\")\n bbend = builder.append_basic_block(\"switch.end\")\n switch = builder.switch(idx, bbelse, n=tupty.count)\n\n with builder.goto_block(bbelse):\n context.call_conv.return_user_exc(builder, IndexError,\n (\"tuple index out of range\",))\n\n lrtty = context.get_value_type(tupty.dtype)\n with builder.goto_block(bbend):\n phinode = builder.phi(lrtty)\n\n for i in range(tupty.count):\n ki = context.get_constant(types.intp, i)\n bbi = builder.append_basic_block(\"switch.%d\" % i)\n switch.add_case(ki, bbi)\n with builder.goto_block(bbi):\n value = builder.extract_value(tup, i)\n builder.branch(bbend)\n phinode.add_incoming(value, bbi)\n\n builder.position_at_end(bbend)\n res = phinode\n assert sig.return_type == tupty.dtype\n return impl_ret_borrowed(context, builder, sig.return_type, res)\n", "path": "numba/targets/tupleobj.py"}], "after_files": [{"content": "\"\"\"\nImplementation of tuple objects\n\"\"\"\n\nfrom llvmlite import ir\nimport llvmlite.llvmpy.core as lc\n\nfrom .imputils import (builtin, builtin_attr, implement, impl_attribute,\n impl_attribute_generic, iternext_impl, struct_factory,\n impl_ret_borrowed, impl_ret_untracked)\nfrom .. import typing, types, cgutils\n\n\ndef generic_compare(context, builder, key, argtypes, args):\n \"\"\"\n Compare the given LLVM values of the given Numba types using\n the comparison *key* (e.g. '=='). The values are first cast to\n a common safe conversion type.\n \"\"\"\n at, bt = argtypes\n av, bv = args\n ty = context.typing_context.unify_types(at, bt)\n cav = context.cast(builder, av, at, ty)\n cbv = context.cast(builder, bv, bt, ty)\n cmpsig = typing.signature(types.boolean, ty, ty)\n cmpfunc = context.get_function(key, cmpsig)\n return cmpfunc(builder, (cav, cbv))\n\n\n@builtin\n@implement(types.NamedTupleClass, types.VarArg(types.Any))\ndef namedtuple_constructor(context, builder, sig, args):\n # A namedtuple has the same representation as a regular tuple\n res = context.make_tuple(builder, sig.return_type, args)\n # The tuple's contents are borrowed\n return impl_ret_borrowed(context, builder, sig.return_type, res)\n\n@builtin\n@implement(types.len_type, types.Kind(types.BaseTuple))\ndef tuple_len(context, builder, sig, args):\n tupty, = sig.args\n retty = sig.return_type\n res = context.get_constant(retty, len(tupty.types))\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n@builtin\n@implement(bool, types.Kind(types.BaseTuple))\ndef tuple_bool(context, builder, sig, args):\n tupty, = sig.args\n if len(tupty):\n return cgutils.true_bit\n else:\n return cgutils.false_bit\n\ndef tuple_cmp_ordered(context, builder, op, sig, args):\n tu, tv = sig.args\n u, v = args\n res = cgutils.alloca_once_value(builder, cgutils.true_bit)\n bbend = builder.append_basic_block(\"cmp_end\")\n for i, (ta, tb) in enumerate(zip(tu.types, tv.types)):\n a = builder.extract_value(u, i)\n b = builder.extract_value(v, i)\n not_equal = generic_compare(context, builder, '!=', (ta, tb), (a, b))\n with builder.if_then(not_equal):\n pred = generic_compare(context, builder, op, (ta, tb), (a, b))\n builder.store(pred, res)\n builder.branch(bbend)\n # Everything matched equal => compare lengths\n len_compare = eval(\"%d %s %d\" % (len(tu.types), op, len(tv.types)))\n pred = context.get_constant(types.boolean, len_compare)\n builder.store(pred, res)\n builder.branch(bbend)\n builder.position_at_end(bbend)\n return builder.load(res)\n\n@builtin\n@implement('==', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))\ndef tuple_eq(context, builder, sig, args):\n tu, tv = sig.args\n u, v = args\n if len(tu.types) != len(tv.types):\n res = context.get_constant(types.boolean, False)\n return impl_ret_untracked(context, builder, sig.return_type, res)\n res = context.get_constant(types.boolean, True)\n for i, (ta, tb) in enumerate(zip(tu.types, tv.types)):\n a = builder.extract_value(u, i)\n b = builder.extract_value(v, i)\n pred = generic_compare(context, builder, \"==\", (ta, tb), (a, b))\n res = builder.and_(res, pred)\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n@builtin\n@implement('!=', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))\ndef tuple_ne(context, builder, sig, args):\n res = builder.not_(tuple_eq(context, builder, sig, args))\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n@builtin\n@implement('<', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))\ndef tuple_lt(context, builder, sig, args):\n res = tuple_cmp_ordered(context, builder, '<', sig, args)\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n@builtin\n@implement('<=', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))\ndef tuple_le(context, builder, sig, args):\n res = tuple_cmp_ordered(context, builder, '<=', sig, args)\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n@builtin\n@implement('>', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))\ndef tuple_gt(context, builder, sig, args):\n res = tuple_cmp_ordered(context, builder, '>', sig, args)\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n@builtin\n@implement('>=', types.Kind(types.BaseTuple), types.Kind(types.BaseTuple))\ndef tuple_ge(context, builder, sig, args):\n res = tuple_cmp_ordered(context, builder, '>=', sig, args)\n return impl_ret_untracked(context, builder, sig.return_type, res)\n\n\n@builtin_attr\n@impl_attribute_generic(types.Kind(types.BaseNamedTuple))\ndef namedtuple_getattr(context, builder, typ, value, attr):\n \"\"\"\n Fetch a namedtuple's field.\n \"\"\"\n index = typ.fields.index(attr)\n res = builder.extract_value(value, index)\n return impl_ret_borrowed(context, builder, typ[index], res)\n\n\n#------------------------------------------------------------------------------\n# Tuple iterators\n\n@struct_factory(types.UniTupleIter)\ndef make_unituple_iter(tupiter):\n \"\"\"\n Return the Structure representation of the given *tupiter* (an\n instance of types.UniTupleIter).\n \"\"\"\n return cgutils.create_struct_proxy(tupiter)\n\n\n@builtin\n@implement('getiter', types.Kind(types.UniTuple))\n@implement('getiter', types.Kind(types.NamedUniTuple))\ndef getiter_unituple(context, builder, sig, args):\n [tupty] = sig.args\n [tup] = args\n\n tupitercls = make_unituple_iter(types.UniTupleIter(tupty))\n iterval = tupitercls(context, builder)\n\n index0 = context.get_constant(types.intp, 0)\n indexptr = cgutils.alloca_once(builder, index0.type)\n builder.store(index0, indexptr)\n\n iterval.index = indexptr\n iterval.tuple = tup\n\n res = iterval._getvalue()\n return impl_ret_borrowed(context, builder, sig.return_type, res)\n\n\n@builtin\n@implement('iternext', types.Kind(types.UniTupleIter))\n@iternext_impl\ndef iternext_unituple(context, builder, sig, args, result):\n [tupiterty] = sig.args\n [tupiter] = args\n\n tupitercls = make_unituple_iter(tupiterty)\n iterval = tupitercls(context, builder, value=tupiter)\n tup = iterval.tuple\n idxptr = iterval.index\n idx = builder.load(idxptr)\n count = context.get_constant(types.intp, tupiterty.unituple.count)\n\n is_valid = builder.icmp(lc.ICMP_SLT, idx, count)\n result.set_valid(is_valid)\n\n with builder.if_then(is_valid):\n getitem_sig = typing.signature(tupiterty.unituple.dtype,\n tupiterty.unituple,\n types.intp)\n getitem_out = getitem_unituple(context, builder, getitem_sig,\n [tup, idx])\n result.yield_(getitem_out)\n nidx = builder.add(idx, context.get_constant(types.intp, 1))\n builder.store(nidx, iterval.index)\n\n\n@builtin\n@implement('getitem', types.Kind(types.UniTuple), types.intp)\n@implement('getitem', types.Kind(types.NamedUniTuple), types.intp)\ndef getitem_unituple(context, builder, sig, args):\n tupty, _ = sig.args\n tup, idx = args\n\n bbelse = builder.append_basic_block(\"switch.else\")\n bbend = builder.append_basic_block(\"switch.end\")\n switch = builder.switch(idx, bbelse, n=tupty.count)\n\n with builder.goto_block(bbelse):\n context.call_conv.return_user_exc(builder, IndexError,\n (\"tuple index out of range\",))\n\n lrtty = context.get_value_type(tupty.dtype)\n with builder.goto_block(bbend):\n phinode = builder.phi(lrtty)\n\n for i in range(tupty.count):\n ki = context.get_constant(types.intp, i)\n bbi = builder.append_basic_block(\"switch.%d\" % i)\n switch.add_case(ki, bbi)\n with builder.goto_block(bbi):\n value = builder.extract_value(tup, i)\n builder.branch(bbend)\n phinode.add_incoming(value, bbi)\n\n builder.position_at_end(bbend)\n res = phinode\n assert sig.return_type == tupty.dtype\n return impl_ret_borrowed(context, builder, sig.return_type, res)\n", "path": "numba/targets/tupleobj.py"}]} | 2,935 | 243 |
gh_patches_debug_7150 | rasdani/github-patches | git_diff | searxng__searxng-3410 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Yahoo: Result titles getting mixed together
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
2024.4.19+0f70e4d30
**How did you install SearXNG?**
searxng-docker
**What happened?**
I've noticed that result titles are getting mixed up together when they shouldn't.
**How To Reproduce**
Search `!yh gba custom bios`.
**Expected behavior**
Result titles not being mixed together.
**Screenshots & Logs**
Highlighted in red are the parts where the title is wrong.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/yahoo.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 """Yahoo Search (Web)
3
4 Languages are supported by mapping the language to a domain. If domain is not
5 found in :py:obj:`lang2domain` URL ``<lang>.search.yahoo.com`` is used.
6
7 """
8
9 from urllib.parse import (
10 unquote,
11 urlencode,
12 )
13 from lxml import html
14
15 from searx.utils import (
16 eval_xpath_getindex,
17 eval_xpath_list,
18 extract_text,
19 )
20 from searx.enginelib.traits import EngineTraits
21
22 traits: EngineTraits
23
24 # about
25 about = {
26 "website": 'https://search.yahoo.com/',
27 "wikidata_id": None,
28 "official_api_documentation": 'https://developer.yahoo.com/api/',
29 "use_official_api": False,
30 "require_api_key": False,
31 "results": 'HTML',
32 }
33
34 # engine dependent config
35 categories = ['general', 'web']
36 paging = True
37 time_range_support = True
38 # send_accept_language_header = True
39
40 time_range_dict = {
41 'day': ('1d', 'd'),
42 'week': ('1w', 'w'),
43 'month': ('1m', 'm'),
44 }
45
46 lang2domain = {
47 'zh_chs': 'hk.search.yahoo.com',
48 'zh_cht': 'tw.search.yahoo.com',
49 'any': 'search.yahoo.com',
50 'en': 'search.yahoo.com',
51 'bg': 'search.yahoo.com',
52 'cs': 'search.yahoo.com',
53 'da': 'search.yahoo.com',
54 'el': 'search.yahoo.com',
55 'et': 'search.yahoo.com',
56 'he': 'search.yahoo.com',
57 'hr': 'search.yahoo.com',
58 'ja': 'search.yahoo.com',
59 'ko': 'search.yahoo.com',
60 'sk': 'search.yahoo.com',
61 'sl': 'search.yahoo.com',
62 }
63 """Map language to domain"""
64
65 locale_aliases = {
66 'zh': 'zh_Hans',
67 'zh-HK': 'zh_Hans',
68 'zh-CN': 'zh_Hans', # dead since 2015 / routed to hk.search.yahoo.com
69 'zh-TW': 'zh_Hant',
70 }
71
72
73 def request(query, params):
74 """build request"""
75
76 lang = locale_aliases.get(params['language'], None)
77 if not lang:
78 lang = params['language'].split('-')[0]
79 lang = traits.get_language(lang, traits.all_locale)
80
81 offset = (params['pageno'] - 1) * 7 + 1
82 age, btf = time_range_dict.get(params['time_range'], ('', ''))
83
84 args = urlencode(
85 {
86 'p': query,
87 'ei': 'UTF-8',
88 'fl': 1,
89 'vl': 'lang_' + lang,
90 'btf': btf,
91 'fr2': 'time',
92 'age': age,
93 'b': offset,
94 'xargs': 0,
95 }
96 )
97
98 domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang)
99 params['url'] = 'https://%s/search?%s' % (domain, args)
100 return params
101
102
103 def parse_url(url_string):
104 """remove yahoo-specific tracking-url"""
105
106 endings = ['/RS', '/RK']
107 endpositions = []
108 start = url_string.find('http', url_string.find('/RU=') + 1)
109
110 for ending in endings:
111 endpos = url_string.rfind(ending)
112 if endpos > -1:
113 endpositions.append(endpos)
114
115 if start == 0 or len(endpositions) == 0:
116 return url_string
117
118 end = min(endpositions)
119 return unquote(url_string[start:end])
120
121
122 def response(resp):
123 """parse response"""
124
125 results = []
126 dom = html.fromstring(resp.text)
127
128 # parse results
129 for result in eval_xpath_list(dom, '//div[contains(@class,"algo-sr")]'):
130 url = eval_xpath_getindex(result, './/h3/a/@href', 0, default=None)
131 if url is None:
132 continue
133 url = parse_url(url)
134
135 title = extract_text(result.xpath('.//h3//a/@aria-label'))
136 content = eval_xpath_getindex(result, './/div[contains(@class, "compText")]', 0, default='')
137 content = extract_text(content, allow_none=True)
138
139 # append result
140 results.append({'url': url, 'title': title, 'content': content})
141
142 for suggestion in eval_xpath_list(dom, '//div[contains(@class, "AlsoTry")]//table//a'):
143 # append suggestion
144 results.append({'suggestion': extract_text(suggestion)})
145
146 return results
147
148
149 def fetch_traits(engine_traits: EngineTraits):
150 """Fetch languages from yahoo"""
151
152 # pylint: disable=import-outside-toplevel
153 import babel
154 from searx import network
155 from searx.locales import language_tag
156
157 engine_traits.all_locale = 'any'
158
159 resp = network.get('https://search.yahoo.com/preferences/languages')
160 if not resp.ok:
161 print("ERROR: response from yahoo is not OK.")
162
163 dom = html.fromstring(resp.text)
164 offset = len('lang_')
165
166 eng2sxng = {'zh_chs': 'zh_Hans', 'zh_cht': 'zh_Hant'}
167
168 for val in eval_xpath_list(dom, '//div[contains(@class, "lang-item")]/input/@value'):
169 eng_tag = val[offset:]
170
171 try:
172 sxng_tag = language_tag(babel.Locale.parse(eng2sxng.get(eng_tag, eng_tag)))
173 except babel.UnknownLocaleError:
174 print('ERROR: unknown language --> %s' % eng_tag)
175 continue
176
177 conflict = engine_traits.languages.get(sxng_tag)
178 if conflict:
179 if conflict != eng_tag:
180 print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
181 continue
182 engine_traits.languages[sxng_tag] = eng_tag
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -132,7 +132,8 @@
continue
url = parse_url(url)
- title = extract_text(result.xpath('.//h3//a/@aria-label'))
+ title = eval_xpath_getindex(result, './/h3//a/@aria-label', 0, default='')
+ title = extract_text(title)
content = eval_xpath_getindex(result, './/div[contains(@class, "compText")]', 0, default='')
content = extract_text(content, allow_none=True)
| {"golden_diff": "diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py\n--- a/searx/engines/yahoo.py\n+++ b/searx/engines/yahoo.py\n@@ -132,7 +132,8 @@\n continue\n url = parse_url(url)\n \n- title = extract_text(result.xpath('.//h3//a/@aria-label'))\n+ title = eval_xpath_getindex(result, './/h3//a/@aria-label', 0, default='')\n+ title = extract_text(title)\n content = eval_xpath_getindex(result, './/div[contains(@class, \"compText\")]', 0, default='')\n content = extract_text(content, allow_none=True)\n", "issue": "Yahoo: Result titles getting mixed together\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n2024.4.19+0f70e4d30\r\n\r\n**How did you install SearXNG?**\r\nsearxng-docker\r\n\r\n**What happened?**\r\nI've noticed that result titles are getting mixed up together when they shouldn't.\r\n\r\n**How To Reproduce**\r\nSearch `!yh gba custom bios`.\r\n\r\n**Expected behavior**\r\nResult titles not being mixed together.\r\n\r\n**Screenshots & Logs**\r\nHighlighted in red are the parts where the title is wrong.\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Yahoo Search (Web)\n\nLanguages are supported by mapping the language to a domain. If domain is not\nfound in :py:obj:`lang2domain` URL ``<lang>.search.yahoo.com`` is used.\n\n\"\"\"\n\nfrom urllib.parse import (\n unquote,\n urlencode,\n)\nfrom lxml import html\n\nfrom searx.utils import (\n eval_xpath_getindex,\n eval_xpath_list,\n extract_text,\n)\nfrom searx.enginelib.traits import EngineTraits\n\ntraits: EngineTraits\n\n# about\nabout = {\n \"website\": 'https://search.yahoo.com/',\n \"wikidata_id\": None,\n \"official_api_documentation\": 'https://developer.yahoo.com/api/',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\n# engine dependent config\ncategories = ['general', 'web']\npaging = True\ntime_range_support = True\n# send_accept_language_header = True\n\ntime_range_dict = {\n 'day': ('1d', 'd'),\n 'week': ('1w', 'w'),\n 'month': ('1m', 'm'),\n}\n\nlang2domain = {\n 'zh_chs': 'hk.search.yahoo.com',\n 'zh_cht': 'tw.search.yahoo.com',\n 'any': 'search.yahoo.com',\n 'en': 'search.yahoo.com',\n 'bg': 'search.yahoo.com',\n 'cs': 'search.yahoo.com',\n 'da': 'search.yahoo.com',\n 'el': 'search.yahoo.com',\n 'et': 'search.yahoo.com',\n 'he': 'search.yahoo.com',\n 'hr': 'search.yahoo.com',\n 'ja': 'search.yahoo.com',\n 'ko': 'search.yahoo.com',\n 'sk': 'search.yahoo.com',\n 'sl': 'search.yahoo.com',\n}\n\"\"\"Map language to domain\"\"\"\n\nlocale_aliases = {\n 'zh': 'zh_Hans',\n 'zh-HK': 'zh_Hans',\n 'zh-CN': 'zh_Hans', # dead since 2015 / routed to hk.search.yahoo.com\n 'zh-TW': 'zh_Hant',\n}\n\n\ndef request(query, params):\n \"\"\"build request\"\"\"\n\n lang = locale_aliases.get(params['language'], None)\n if not lang:\n lang = params['language'].split('-')[0]\n lang = traits.get_language(lang, traits.all_locale)\n\n offset = (params['pageno'] - 1) * 7 + 1\n age, btf = time_range_dict.get(params['time_range'], ('', ''))\n\n args = urlencode(\n {\n 'p': query,\n 'ei': 'UTF-8',\n 'fl': 1,\n 'vl': 'lang_' + lang,\n 'btf': btf,\n 'fr2': 'time',\n 'age': age,\n 'b': offset,\n 'xargs': 0,\n }\n )\n\n domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang)\n params['url'] = 'https://%s/search?%s' % (domain, args)\n return params\n\n\ndef parse_url(url_string):\n \"\"\"remove yahoo-specific tracking-url\"\"\"\n\n endings = ['/RS', '/RK']\n endpositions = []\n start = url_string.find('http', url_string.find('/RU=') + 1)\n\n for ending in endings:\n endpos = url_string.rfind(ending)\n if endpos > -1:\n endpositions.append(endpos)\n\n if start == 0 or len(endpositions) == 0:\n return url_string\n\n end = min(endpositions)\n return unquote(url_string[start:end])\n\n\ndef response(resp):\n \"\"\"parse response\"\"\"\n\n results = []\n dom = html.fromstring(resp.text)\n\n # parse results\n for result in eval_xpath_list(dom, '//div[contains(@class,\"algo-sr\")]'):\n url = eval_xpath_getindex(result, './/h3/a/@href', 0, default=None)\n if url is None:\n continue\n url = parse_url(url)\n\n title = extract_text(result.xpath('.//h3//a/@aria-label'))\n content = eval_xpath_getindex(result, './/div[contains(@class, \"compText\")]', 0, default='')\n content = extract_text(content, allow_none=True)\n\n # append result\n results.append({'url': url, 'title': title, 'content': content})\n\n for suggestion in eval_xpath_list(dom, '//div[contains(@class, \"AlsoTry\")]//table//a'):\n # append suggestion\n results.append({'suggestion': extract_text(suggestion)})\n\n return results\n\n\ndef fetch_traits(engine_traits: EngineTraits):\n \"\"\"Fetch languages from yahoo\"\"\"\n\n # pylint: disable=import-outside-toplevel\n import babel\n from searx import network\n from searx.locales import language_tag\n\n engine_traits.all_locale = 'any'\n\n resp = network.get('https://search.yahoo.com/preferences/languages')\n if not resp.ok:\n print(\"ERROR: response from yahoo is not OK.\")\n\n dom = html.fromstring(resp.text)\n offset = len('lang_')\n\n eng2sxng = {'zh_chs': 'zh_Hans', 'zh_cht': 'zh_Hant'}\n\n for val in eval_xpath_list(dom, '//div[contains(@class, \"lang-item\")]/input/@value'):\n eng_tag = val[offset:]\n\n try:\n sxng_tag = language_tag(babel.Locale.parse(eng2sxng.get(eng_tag, eng_tag)))\n except babel.UnknownLocaleError:\n print('ERROR: unknown language --> %s' % eng_tag)\n continue\n\n conflict = engine_traits.languages.get(sxng_tag)\n if conflict:\n if conflict != eng_tag:\n print(\"CONFLICT: babel %s --> %s, %s\" % (sxng_tag, conflict, eng_tag))\n continue\n engine_traits.languages[sxng_tag] = eng_tag\n", "path": "searx/engines/yahoo.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Yahoo Search (Web)\n\nLanguages are supported by mapping the language to a domain. If domain is not\nfound in :py:obj:`lang2domain` URL ``<lang>.search.yahoo.com`` is used.\n\n\"\"\"\n\nfrom urllib.parse import (\n unquote,\n urlencode,\n)\nfrom lxml import html\n\nfrom searx.utils import (\n eval_xpath_getindex,\n eval_xpath_list,\n extract_text,\n)\nfrom searx.enginelib.traits import EngineTraits\n\ntraits: EngineTraits\n\n# about\nabout = {\n \"website\": 'https://search.yahoo.com/',\n \"wikidata_id\": None,\n \"official_api_documentation\": 'https://developer.yahoo.com/api/',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\n# engine dependent config\ncategories = ['general', 'web']\npaging = True\ntime_range_support = True\n# send_accept_language_header = True\n\ntime_range_dict = {\n 'day': ('1d', 'd'),\n 'week': ('1w', 'w'),\n 'month': ('1m', 'm'),\n}\n\nlang2domain = {\n 'zh_chs': 'hk.search.yahoo.com',\n 'zh_cht': 'tw.search.yahoo.com',\n 'any': 'search.yahoo.com',\n 'en': 'search.yahoo.com',\n 'bg': 'search.yahoo.com',\n 'cs': 'search.yahoo.com',\n 'da': 'search.yahoo.com',\n 'el': 'search.yahoo.com',\n 'et': 'search.yahoo.com',\n 'he': 'search.yahoo.com',\n 'hr': 'search.yahoo.com',\n 'ja': 'search.yahoo.com',\n 'ko': 'search.yahoo.com',\n 'sk': 'search.yahoo.com',\n 'sl': 'search.yahoo.com',\n}\n\"\"\"Map language to domain\"\"\"\n\nlocale_aliases = {\n 'zh': 'zh_Hans',\n 'zh-HK': 'zh_Hans',\n 'zh-CN': 'zh_Hans', # dead since 2015 / routed to hk.search.yahoo.com\n 'zh-TW': 'zh_Hant',\n}\n\n\ndef request(query, params):\n \"\"\"build request\"\"\"\n\n lang = locale_aliases.get(params['language'], None)\n if not lang:\n lang = params['language'].split('-')[0]\n lang = traits.get_language(lang, traits.all_locale)\n\n offset = (params['pageno'] - 1) * 7 + 1\n age, btf = time_range_dict.get(params['time_range'], ('', ''))\n\n args = urlencode(\n {\n 'p': query,\n 'ei': 'UTF-8',\n 'fl': 1,\n 'vl': 'lang_' + lang,\n 'btf': btf,\n 'fr2': 'time',\n 'age': age,\n 'b': offset,\n 'xargs': 0,\n }\n )\n\n domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang)\n params['url'] = 'https://%s/search?%s' % (domain, args)\n return params\n\n\ndef parse_url(url_string):\n \"\"\"remove yahoo-specific tracking-url\"\"\"\n\n endings = ['/RS', '/RK']\n endpositions = []\n start = url_string.find('http', url_string.find('/RU=') + 1)\n\n for ending in endings:\n endpos = url_string.rfind(ending)\n if endpos > -1:\n endpositions.append(endpos)\n\n if start == 0 or len(endpositions) == 0:\n return url_string\n\n end = min(endpositions)\n return unquote(url_string[start:end])\n\n\ndef response(resp):\n \"\"\"parse response\"\"\"\n\n results = []\n dom = html.fromstring(resp.text)\n\n # parse results\n for result in eval_xpath_list(dom, '//div[contains(@class,\"algo-sr\")]'):\n url = eval_xpath_getindex(result, './/h3/a/@href', 0, default=None)\n if url is None:\n continue\n url = parse_url(url)\n\n title = eval_xpath_getindex(result, './/h3//a/@aria-label', 0, default='')\n title = extract_text(title)\n content = eval_xpath_getindex(result, './/div[contains(@class, \"compText\")]', 0, default='')\n content = extract_text(content, allow_none=True)\n\n # append result\n results.append({'url': url, 'title': title, 'content': content})\n\n for suggestion in eval_xpath_list(dom, '//div[contains(@class, \"AlsoTry\")]//table//a'):\n # append suggestion\n results.append({'suggestion': extract_text(suggestion)})\n\n return results\n\n\ndef fetch_traits(engine_traits: EngineTraits):\n \"\"\"Fetch languages from yahoo\"\"\"\n\n # pylint: disable=import-outside-toplevel\n import babel\n from searx import network\n from searx.locales import language_tag\n\n engine_traits.all_locale = 'any'\n\n resp = network.get('https://search.yahoo.com/preferences/languages')\n if not resp.ok:\n print(\"ERROR: response from yahoo is not OK.\")\n\n dom = html.fromstring(resp.text)\n offset = len('lang_')\n\n eng2sxng = {'zh_chs': 'zh_Hans', 'zh_cht': 'zh_Hant'}\n\n for val in eval_xpath_list(dom, '//div[contains(@class, \"lang-item\")]/input/@value'):\n eng_tag = val[offset:]\n\n try:\n sxng_tag = language_tag(babel.Locale.parse(eng2sxng.get(eng_tag, eng_tag)))\n except babel.UnknownLocaleError:\n print('ERROR: unknown language --> %s' % eng_tag)\n continue\n\n conflict = engine_traits.languages.get(sxng_tag)\n if conflict:\n if conflict != eng_tag:\n print(\"CONFLICT: babel %s --> %s, %s\" % (sxng_tag, conflict, eng_tag))\n continue\n engine_traits.languages[sxng_tag] = eng_tag\n", "path": "searx/engines/yahoo.py"}]} | 2,242 | 161 |
gh_patches_debug_24974 | rasdani/github-patches | git_diff | pypa__pip-1830 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'pip freeze' fails with BadCommand if vcs program is not found
Although mechanisms are in place to catch errors in the get_src_requirement() path, a BadCommand exception reaches the top-level and makes pip unusable if any package is found that has a .git directory and 'git' is not installed (or not in path).
I think this should be caught and a warning displayed in [pip.vcs:get_src_requirement()](https://github.com/pypa/pip/blob/3e7c94e9/pip/vcs/__init__.py#L246) instead, allowing the program to continue.
```
> python -m pip -v freeze
Cannot find command 'git'
Exception information:
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\pip\basecommand.py", line 139, in main
status = self.run(options, args)
File "C:\Python27\lib\site-packages\pip\commands\freeze.py", line 73, in run
req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)
File "C:\Python27\lib\site-packages\pip\__init__.py", line 181, in from_dist
req = get_src_requirement(dist, location, find_tags)
File "C:\Python27\lib\site-packages\pip\vcs\__init__.py", line 249, in get_src_requirement
return version_control().get_src_requirement(dist, location, find_tags)
File "C:\Python27\lib\site-packages\pip\vcs\git.py", line 155, in get_src_requirement
repo = self.get_url(location)
File "C:\Python27\lib\site-packages\pip\vcs\git.py", line 123, in get_url
[self.cmd, 'config', 'remote.origin.url'],
File "C:\Python27\lib\site-packages\pip\vcs\__init__.py", line 110, in cmd
command = find_command(self.name)
File "C:\Python27\lib\site-packages\pip\util.py", line 107, in find_command
raise BadCommand('Cannot find command %r' % cmd)
BadCommand: Cannot find command 'git'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/vcs/__init__.py`
Content:
```
1 """Handles all VCS (version control) support"""
2
3 import os
4 import shutil
5
6 from pip.compat import urlparse, urllib
7 from pip.log import logger
8 from pip.util import (display_path, backup_dir, find_command,
9 rmtree, ask_path_exists)
10
11
12 __all__ = ['vcs', 'get_src_requirement']
13
14
15 class VcsSupport(object):
16 _registry = {}
17 schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
18
19 def __init__(self):
20 # Register more schemes with urlparse for various version control
21 # systems
22 urlparse.uses_netloc.extend(self.schemes)
23 # Python >= 2.7.4, 3.3 doesn't have uses_fragment
24 if getattr(urlparse, 'uses_fragment', None):
25 urlparse.uses_fragment.extend(self.schemes)
26 super(VcsSupport, self).__init__()
27
28 def __iter__(self):
29 return self._registry.__iter__()
30
31 @property
32 def backends(self):
33 return list(self._registry.values())
34
35 @property
36 def dirnames(self):
37 return [backend.dirname for backend in self.backends]
38
39 @property
40 def all_schemes(self):
41 schemes = []
42 for backend in self.backends:
43 schemes.extend(backend.schemes)
44 return schemes
45
46 def register(self, cls):
47 if not hasattr(cls, 'name'):
48 logger.warn('Cannot register VCS %s' % cls.__name__)
49 return
50 if cls.name not in self._registry:
51 self._registry[cls.name] = cls
52
53 def unregister(self, cls=None, name=None):
54 if name in self._registry:
55 del self._registry[name]
56 elif cls in self._registry.values():
57 del self._registry[cls.name]
58 else:
59 logger.warn('Cannot unregister because no class or name given')
60
61 def get_backend_name(self, location):
62 """
63 Return the name of the version control backend if found at given
64 location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')
65 """
66 for vc_type in self._registry.values():
67 path = os.path.join(location, vc_type.dirname)
68 if os.path.exists(path):
69 return vc_type.name
70 return None
71
72 def get_backend(self, name):
73 name = name.lower()
74 if name in self._registry:
75 return self._registry[name]
76
77 def get_backend_from_location(self, location):
78 vc_type = self.get_backend_name(location)
79 if vc_type:
80 return self.get_backend(vc_type)
81 return None
82
83
84 vcs = VcsSupport()
85
86
87 class VersionControl(object):
88 name = ''
89 dirname = ''
90
91 def __init__(self, url=None, *args, **kwargs):
92 self.url = url
93 self._cmd = None
94 super(VersionControl, self).__init__(*args, **kwargs)
95
96 def _filter(self, line):
97 return (logger.INFO, line)
98
99 def _is_local_repository(self, repo):
100 """
101 posix absolute paths start with os.path.sep,
102 win32 ones ones start with drive (like c:\\folder)
103 """
104 drive, tail = os.path.splitdrive(repo)
105 return repo.startswith(os.path.sep) or drive
106
107 @property
108 def cmd(self):
109 if self._cmd is not None:
110 return self._cmd
111 command = find_command(self.name)
112 logger.info('Found command %r at %r' % (self.name, command))
113 self._cmd = command
114 return command
115
116 def get_url_rev(self):
117 """
118 Returns the correct repository URL and revision by parsing the given
119 repository URL
120 """
121 error_message = (
122 "Sorry, '%s' is a malformed VCS url. "
123 "The format is <vcs>+<protocol>://<url>, "
124 "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp"
125 )
126 assert '+' in self.url, error_message % self.url
127 url = self.url.split('+', 1)[1]
128 scheme, netloc, path, query, frag = urlparse.urlsplit(url)
129 rev = None
130 if '@' in path:
131 path, rev = path.rsplit('@', 1)
132 url = urlparse.urlunsplit((scheme, netloc, path, query, ''))
133 return url, rev
134
135 def get_info(self, location):
136 """
137 Returns (url, revision), where both are strings
138 """
139 assert not location.rstrip('/').endswith(self.dirname), \
140 'Bad directory: %s' % location
141 return self.get_url(location), self.get_revision(location)
142
143 def normalize_url(self, url):
144 """
145 Normalize a URL for comparison by unquoting it and removing any
146 trailing slash.
147 """
148 return urllib.unquote(url).rstrip('/')
149
150 def compare_urls(self, url1, url2):
151 """
152 Compare two repo URLs for identity, ignoring incidental differences.
153 """
154 return (self.normalize_url(url1) == self.normalize_url(url2))
155
156 def obtain(self, dest):
157 """
158 Called when installing or updating an editable package, takes the
159 source path of the checkout.
160 """
161 raise NotImplementedError
162
163 def switch(self, dest, url, rev_options):
164 """
165 Switch the repo at ``dest`` to point to ``URL``.
166 """
167 raise NotImplemented
168
169 def update(self, dest, rev_options):
170 """
171 Update an already-existing repo to the given ``rev_options``.
172 """
173 raise NotImplementedError
174
175 def check_destination(self, dest, url, rev_options, rev_display):
176 """
177 Prepare a location to receive a checkout/clone.
178
179 Return True if the location is ready for (and requires) a
180 checkout/clone, False otherwise.
181 """
182 checkout = True
183 prompt = False
184 if os.path.exists(dest):
185 checkout = False
186 if os.path.exists(os.path.join(dest, self.dirname)):
187 existing_url = self.get_url(dest)
188 if self.compare_urls(existing_url, url):
189 logger.info('%s in %s exists, and has correct URL (%s)' %
190 (self.repo_name.title(), display_path(dest),
191 url))
192 logger.notify('Updating %s %s%s' %
193 (display_path(dest), self.repo_name,
194 rev_display))
195 self.update(dest, rev_options)
196 else:
197 logger.warn('%s %s in %s exists with URL %s' %
198 (self.name, self.repo_name,
199 display_path(dest), existing_url))
200 prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
201 ('s', 'i', 'w', 'b'))
202 else:
203 logger.warn('Directory %s already exists, '
204 'and is not a %s %s.' %
205 (dest, self.name, self.repo_name))
206 prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))
207 if prompt:
208 logger.warn('The plan is to install the %s repository %s' %
209 (self.name, url))
210 response = ask_path_exists('What to do? %s' % prompt[0],
211 prompt[1])
212
213 if response == 's':
214 logger.notify('Switching %s %s to %s%s' %
215 (self.repo_name, display_path(dest), url,
216 rev_display))
217 self.switch(dest, url, rev_options)
218 elif response == 'i':
219 # do nothing
220 pass
221 elif response == 'w':
222 logger.warn('Deleting %s' % display_path(dest))
223 rmtree(dest)
224 checkout = True
225 elif response == 'b':
226 dest_dir = backup_dir(dest)
227 logger.warn('Backing up %s to %s'
228 % (display_path(dest), dest_dir))
229 shutil.move(dest, dest_dir)
230 checkout = True
231 return checkout
232
233 def unpack(self, location):
234 if os.path.exists(location):
235 rmtree(location)
236 self.obtain(location)
237
238 def get_src_requirement(self, dist, location, find_tags=False):
239 raise NotImplementedError
240
241
242 def get_src_requirement(dist, location, find_tags):
243 version_control = vcs.get_backend_from_location(location)
244 if version_control:
245 return version_control().get_src_requirement(dist, location, find_tags)
246 logger.warn(
247 'cannot determine version of editable source in %s (is not SVN '
248 'checkout, Git clone, Mercurial clone or Bazaar branch)' % location
249 )
250 return dist.as_requirement()
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pip/vcs/__init__.py b/pip/vcs/__init__.py
--- a/pip/vcs/__init__.py
+++ b/pip/vcs/__init__.py
@@ -4,6 +4,7 @@
import shutil
from pip.compat import urlparse, urllib
+from pip.exceptions import BadCommand
from pip.log import logger
from pip.util import (display_path, backup_dir, find_command,
rmtree, ask_path_exists)
@@ -242,7 +243,15 @@
def get_src_requirement(dist, location, find_tags):
version_control = vcs.get_backend_from_location(location)
if version_control:
- return version_control().get_src_requirement(dist, location, find_tags)
+ try:
+ return version_control().get_src_requirement(dist,
+ location,
+ find_tags)
+ except BadCommand:
+ logger.warn('cannot determine version of editable source in %s '
+ '(%s command not found in path)' % (
+ location, version_control.name))
+ return dist.as_requirement()
logger.warn(
'cannot determine version of editable source in %s (is not SVN '
'checkout, Git clone, Mercurial clone or Bazaar branch)' % location
| {"golden_diff": "diff --git a/pip/vcs/__init__.py b/pip/vcs/__init__.py\n--- a/pip/vcs/__init__.py\n+++ b/pip/vcs/__init__.py\n@@ -4,6 +4,7 @@\n import shutil\n \n from pip.compat import urlparse, urllib\n+from pip.exceptions import BadCommand\n from pip.log import logger\n from pip.util import (display_path, backup_dir, find_command,\n rmtree, ask_path_exists)\n@@ -242,7 +243,15 @@\n def get_src_requirement(dist, location, find_tags):\n version_control = vcs.get_backend_from_location(location)\n if version_control:\n- return version_control().get_src_requirement(dist, location, find_tags)\n+ try:\n+ return version_control().get_src_requirement(dist,\n+ location,\n+ find_tags)\n+ except BadCommand:\n+ logger.warn('cannot determine version of editable source in %s '\n+ '(%s command not found in path)' % (\n+ location, version_control.name))\n+ return dist.as_requirement()\n logger.warn(\n 'cannot determine version of editable source in %s (is not SVN '\n 'checkout, Git clone, Mercurial clone or Bazaar branch)' % location\n", "issue": "'pip freeze' fails with BadCommand if vcs program is not found\nAlthough mechanisms are in place to catch errors in the get_src_requirement() path, a BadCommand exception reaches the top-level and makes pip unusable if any package is found that has a .git directory and 'git' is not installed (or not in path).\n\nI think this should be caught and a warning displayed in [pip.vcs:get_src_requirement()](https://github.com/pypa/pip/blob/3e7c94e9/pip/vcs/__init__.py#L246) instead, allowing the program to continue.\n\n```\n> python -m pip -v freeze\nCannot find command 'git'\nException information:\nTraceback (most recent call last):\n File \"C:\\Python27\\lib\\site-packages\\pip\\basecommand.py\", line 139, in main\n status = self.run(options, args)\n File \"C:\\Python27\\lib\\site-packages\\pip\\commands\\freeze.py\", line 73, in run\n req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)\n File \"C:\\Python27\\lib\\site-packages\\pip\\__init__.py\", line 181, in from_dist\n req = get_src_requirement(dist, location, find_tags)\n File \"C:\\Python27\\lib\\site-packages\\pip\\vcs\\__init__.py\", line 249, in get_src_requirement\n return version_control().get_src_requirement(dist, location, find_tags)\n File \"C:\\Python27\\lib\\site-packages\\pip\\vcs\\git.py\", line 155, in get_src_requirement\n repo = self.get_url(location)\n File \"C:\\Python27\\lib\\site-packages\\pip\\vcs\\git.py\", line 123, in get_url\n [self.cmd, 'config', 'remote.origin.url'],\n File \"C:\\Python27\\lib\\site-packages\\pip\\vcs\\__init__.py\", line 110, in cmd\n command = find_command(self.name)\n File \"C:\\Python27\\lib\\site-packages\\pip\\util.py\", line 107, in find_command\n raise BadCommand('Cannot find command %r' % cmd)\nBadCommand: Cannot find command 'git'\n```\n\n", "before_files": [{"content": "\"\"\"Handles all VCS (version control) support\"\"\"\n\nimport os\nimport shutil\n\nfrom pip.compat import urlparse, urllib\nfrom pip.log import logger\nfrom pip.util import (display_path, backup_dir, find_command,\n rmtree, ask_path_exists)\n\n\n__all__ = ['vcs', 'get_src_requirement']\n\n\nclass VcsSupport(object):\n _registry = {}\n schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']\n\n def __init__(self):\n # Register more schemes with urlparse for various version control\n # systems\n urlparse.uses_netloc.extend(self.schemes)\n # Python >= 2.7.4, 3.3 doesn't have uses_fragment\n if getattr(urlparse, 'uses_fragment', None):\n urlparse.uses_fragment.extend(self.schemes)\n super(VcsSupport, self).__init__()\n\n def __iter__(self):\n return self._registry.__iter__()\n\n @property\n def backends(self):\n return list(self._registry.values())\n\n @property\n def dirnames(self):\n return [backend.dirname for backend in self.backends]\n\n @property\n def all_schemes(self):\n schemes = []\n for backend in self.backends:\n schemes.extend(backend.schemes)\n return schemes\n\n def register(self, cls):\n if not hasattr(cls, 'name'):\n logger.warn('Cannot register VCS %s' % cls.__name__)\n return\n if cls.name not in self._registry:\n self._registry[cls.name] = cls\n\n def unregister(self, cls=None, name=None):\n if name in self._registry:\n del self._registry[name]\n elif cls in self._registry.values():\n del self._registry[cls.name]\n else:\n logger.warn('Cannot unregister because no class or name given')\n\n def get_backend_name(self, location):\n \"\"\"\n Return the name of the version control backend if found at given\n location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')\n \"\"\"\n for vc_type in self._registry.values():\n path = os.path.join(location, vc_type.dirname)\n if os.path.exists(path):\n return vc_type.name\n return None\n\n def get_backend(self, name):\n name = name.lower()\n if name in self._registry:\n return self._registry[name]\n\n def get_backend_from_location(self, location):\n vc_type = self.get_backend_name(location)\n if vc_type:\n return self.get_backend(vc_type)\n return None\n\n\nvcs = VcsSupport()\n\n\nclass VersionControl(object):\n name = ''\n dirname = ''\n\n def __init__(self, url=None, *args, **kwargs):\n self.url = url\n self._cmd = None\n super(VersionControl, self).__init__(*args, **kwargs)\n\n def _filter(self, line):\n return (logger.INFO, line)\n\n def _is_local_repository(self, repo):\n \"\"\"\n posix absolute paths start with os.path.sep,\n win32 ones ones start with drive (like c:\\\\folder)\n \"\"\"\n drive, tail = os.path.splitdrive(repo)\n return repo.startswith(os.path.sep) or drive\n\n @property\n def cmd(self):\n if self._cmd is not None:\n return self._cmd\n command = find_command(self.name)\n logger.info('Found command %r at %r' % (self.name, command))\n self._cmd = command\n return command\n\n def get_url_rev(self):\n \"\"\"\n Returns the correct repository URL and revision by parsing the given\n repository URL\n \"\"\"\n error_message = (\n \"Sorry, '%s' is a malformed VCS url. \"\n \"The format is <vcs>+<protocol>://<url>, \"\n \"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp\"\n )\n assert '+' in self.url, error_message % self.url\n url = self.url.split('+', 1)[1]\n scheme, netloc, path, query, frag = urlparse.urlsplit(url)\n rev = None\n if '@' in path:\n path, rev = path.rsplit('@', 1)\n url = urlparse.urlunsplit((scheme, netloc, path, query, ''))\n return url, rev\n\n def get_info(self, location):\n \"\"\"\n Returns (url, revision), where both are strings\n \"\"\"\n assert not location.rstrip('/').endswith(self.dirname), \\\n 'Bad directory: %s' % location\n return self.get_url(location), self.get_revision(location)\n\n def normalize_url(self, url):\n \"\"\"\n Normalize a URL for comparison by unquoting it and removing any\n trailing slash.\n \"\"\"\n return urllib.unquote(url).rstrip('/')\n\n def compare_urls(self, url1, url2):\n \"\"\"\n Compare two repo URLs for identity, ignoring incidental differences.\n \"\"\"\n return (self.normalize_url(url1) == self.normalize_url(url2))\n\n def obtain(self, dest):\n \"\"\"\n Called when installing or updating an editable package, takes the\n source path of the checkout.\n \"\"\"\n raise NotImplementedError\n\n def switch(self, dest, url, rev_options):\n \"\"\"\n Switch the repo at ``dest`` to point to ``URL``.\n \"\"\"\n raise NotImplemented\n\n def update(self, dest, rev_options):\n \"\"\"\n Update an already-existing repo to the given ``rev_options``.\n \"\"\"\n raise NotImplementedError\n\n def check_destination(self, dest, url, rev_options, rev_display):\n \"\"\"\n Prepare a location to receive a checkout/clone.\n\n Return True if the location is ready for (and requires) a\n checkout/clone, False otherwise.\n \"\"\"\n checkout = True\n prompt = False\n if os.path.exists(dest):\n checkout = False\n if os.path.exists(os.path.join(dest, self.dirname)):\n existing_url = self.get_url(dest)\n if self.compare_urls(existing_url, url):\n logger.info('%s in %s exists, and has correct URL (%s)' %\n (self.repo_name.title(), display_path(dest),\n url))\n logger.notify('Updating %s %s%s' %\n (display_path(dest), self.repo_name,\n rev_display))\n self.update(dest, rev_options)\n else:\n logger.warn('%s %s in %s exists with URL %s' %\n (self.name, self.repo_name,\n display_path(dest), existing_url))\n prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',\n ('s', 'i', 'w', 'b'))\n else:\n logger.warn('Directory %s already exists, '\n 'and is not a %s %s.' %\n (dest, self.name, self.repo_name))\n prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))\n if prompt:\n logger.warn('The plan is to install the %s repository %s' %\n (self.name, url))\n response = ask_path_exists('What to do? %s' % prompt[0],\n prompt[1])\n\n if response == 's':\n logger.notify('Switching %s %s to %s%s' %\n (self.repo_name, display_path(dest), url,\n rev_display))\n self.switch(dest, url, rev_options)\n elif response == 'i':\n # do nothing\n pass\n elif response == 'w':\n logger.warn('Deleting %s' % display_path(dest))\n rmtree(dest)\n checkout = True\n elif response == 'b':\n dest_dir = backup_dir(dest)\n logger.warn('Backing up %s to %s'\n % (display_path(dest), dest_dir))\n shutil.move(dest, dest_dir)\n checkout = True\n return checkout\n\n def unpack(self, location):\n if os.path.exists(location):\n rmtree(location)\n self.obtain(location)\n\n def get_src_requirement(self, dist, location, find_tags=False):\n raise NotImplementedError\n\n\ndef get_src_requirement(dist, location, find_tags):\n version_control = vcs.get_backend_from_location(location)\n if version_control:\n return version_control().get_src_requirement(dist, location, find_tags)\n logger.warn(\n 'cannot determine version of editable source in %s (is not SVN '\n 'checkout, Git clone, Mercurial clone or Bazaar branch)' % location\n )\n return dist.as_requirement()\n", "path": "pip/vcs/__init__.py"}], "after_files": [{"content": "\"\"\"Handles all VCS (version control) support\"\"\"\n\nimport os\nimport shutil\n\nfrom pip.compat import urlparse, urllib\nfrom pip.exceptions import BadCommand\nfrom pip.log import logger\nfrom pip.util import (display_path, backup_dir, find_command,\n rmtree, ask_path_exists)\n\n\n__all__ = ['vcs', 'get_src_requirement']\n\n\nclass VcsSupport(object):\n _registry = {}\n schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']\n\n def __init__(self):\n # Register more schemes with urlparse for various version control\n # systems\n urlparse.uses_netloc.extend(self.schemes)\n # Python >= 2.7.4, 3.3 doesn't have uses_fragment\n if getattr(urlparse, 'uses_fragment', None):\n urlparse.uses_fragment.extend(self.schemes)\n super(VcsSupport, self).__init__()\n\n def __iter__(self):\n return self._registry.__iter__()\n\n @property\n def backends(self):\n return list(self._registry.values())\n\n @property\n def dirnames(self):\n return [backend.dirname for backend in self.backends]\n\n @property\n def all_schemes(self):\n schemes = []\n for backend in self.backends:\n schemes.extend(backend.schemes)\n return schemes\n\n def register(self, cls):\n if not hasattr(cls, 'name'):\n logger.warn('Cannot register VCS %s' % cls.__name__)\n return\n if cls.name not in self._registry:\n self._registry[cls.name] = cls\n\n def unregister(self, cls=None, name=None):\n if name in self._registry:\n del self._registry[name]\n elif cls in self._registry.values():\n del self._registry[cls.name]\n else:\n logger.warn('Cannot unregister because no class or name given')\n\n def get_backend_name(self, location):\n \"\"\"\n Return the name of the version control backend if found at given\n location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')\n \"\"\"\n for vc_type in self._registry.values():\n path = os.path.join(location, vc_type.dirname)\n if os.path.exists(path):\n return vc_type.name\n return None\n\n def get_backend(self, name):\n name = name.lower()\n if name in self._registry:\n return self._registry[name]\n\n def get_backend_from_location(self, location):\n vc_type = self.get_backend_name(location)\n if vc_type:\n return self.get_backend(vc_type)\n return None\n\n\nvcs = VcsSupport()\n\n\nclass VersionControl(object):\n name = ''\n dirname = ''\n\n def __init__(self, url=None, *args, **kwargs):\n self.url = url\n self._cmd = None\n super(VersionControl, self).__init__(*args, **kwargs)\n\n def _filter(self, line):\n return (logger.INFO, line)\n\n def _is_local_repository(self, repo):\n \"\"\"\n posix absolute paths start with os.path.sep,\n win32 ones ones start with drive (like c:\\\\folder)\n \"\"\"\n drive, tail = os.path.splitdrive(repo)\n return repo.startswith(os.path.sep) or drive\n\n @property\n def cmd(self):\n if self._cmd is not None:\n return self._cmd\n command = find_command(self.name)\n logger.info('Found command %r at %r' % (self.name, command))\n self._cmd = command\n return command\n\n def get_url_rev(self):\n \"\"\"\n Returns the correct repository URL and revision by parsing the given\n repository URL\n \"\"\"\n error_message = (\n \"Sorry, '%s' is a malformed VCS url. \"\n \"The format is <vcs>+<protocol>://<url>, \"\n \"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp\"\n )\n assert '+' in self.url, error_message % self.url\n url = self.url.split('+', 1)[1]\n scheme, netloc, path, query, frag = urlparse.urlsplit(url)\n rev = None\n if '@' in path:\n path, rev = path.rsplit('@', 1)\n url = urlparse.urlunsplit((scheme, netloc, path, query, ''))\n return url, rev\n\n def get_info(self, location):\n \"\"\"\n Returns (url, revision), where both are strings\n \"\"\"\n assert not location.rstrip('/').endswith(self.dirname), \\\n 'Bad directory: %s' % location\n return self.get_url(location), self.get_revision(location)\n\n def normalize_url(self, url):\n \"\"\"\n Normalize a URL for comparison by unquoting it and removing any\n trailing slash.\n \"\"\"\n return urllib.unquote(url).rstrip('/')\n\n def compare_urls(self, url1, url2):\n \"\"\"\n Compare two repo URLs for identity, ignoring incidental differences.\n \"\"\"\n return (self.normalize_url(url1) == self.normalize_url(url2))\n\n def obtain(self, dest):\n \"\"\"\n Called when installing or updating an editable package, takes the\n source path of the checkout.\n \"\"\"\n raise NotImplementedError\n\n def switch(self, dest, url, rev_options):\n \"\"\"\n Switch the repo at ``dest`` to point to ``URL``.\n \"\"\"\n raise NotImplemented\n\n def update(self, dest, rev_options):\n \"\"\"\n Update an already-existing repo to the given ``rev_options``.\n \"\"\"\n raise NotImplementedError\n\n def check_destination(self, dest, url, rev_options, rev_display):\n \"\"\"\n Prepare a location to receive a checkout/clone.\n\n Return True if the location is ready for (and requires) a\n checkout/clone, False otherwise.\n \"\"\"\n checkout = True\n prompt = False\n if os.path.exists(dest):\n checkout = False\n if os.path.exists(os.path.join(dest, self.dirname)):\n existing_url = self.get_url(dest)\n if self.compare_urls(existing_url, url):\n logger.info('%s in %s exists, and has correct URL (%s)' %\n (self.repo_name.title(), display_path(dest),\n url))\n logger.notify('Updating %s %s%s' %\n (display_path(dest), self.repo_name,\n rev_display))\n self.update(dest, rev_options)\n else:\n logger.warn('%s %s in %s exists with URL %s' %\n (self.name, self.repo_name,\n display_path(dest), existing_url))\n prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',\n ('s', 'i', 'w', 'b'))\n else:\n logger.warn('Directory %s already exists, '\n 'and is not a %s %s.' %\n (dest, self.name, self.repo_name))\n prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))\n if prompt:\n logger.warn('The plan is to install the %s repository %s' %\n (self.name, url))\n response = ask_path_exists('What to do? %s' % prompt[0],\n prompt[1])\n\n if response == 's':\n logger.notify('Switching %s %s to %s%s' %\n (self.repo_name, display_path(dest), url,\n rev_display))\n self.switch(dest, url, rev_options)\n elif response == 'i':\n # do nothing\n pass\n elif response == 'w':\n logger.warn('Deleting %s' % display_path(dest))\n rmtree(dest)\n checkout = True\n elif response == 'b':\n dest_dir = backup_dir(dest)\n logger.warn('Backing up %s to %s'\n % (display_path(dest), dest_dir))\n shutil.move(dest, dest_dir)\n checkout = True\n return checkout\n\n def unpack(self, location):\n if os.path.exists(location):\n rmtree(location)\n self.obtain(location)\n\n def get_src_requirement(self, dist, location, find_tags=False):\n raise NotImplementedError\n\n\ndef get_src_requirement(dist, location, find_tags):\n version_control = vcs.get_backend_from_location(location)\n if version_control:\n try:\n return version_control().get_src_requirement(dist,\n location,\n find_tags)\n except BadCommand:\n logger.warn('cannot determine version of editable source in %s '\n '(%s command not found in path)' % (\n location, version_control.name))\n return dist.as_requirement()\n logger.warn(\n 'cannot determine version of editable source in %s (is not SVN '\n 'checkout, Git clone, Mercurial clone or Bazaar branch)' % location\n )\n return dist.as_requirement()\n", "path": "pip/vcs/__init__.py"}]} | 3,291 | 276 |
gh_patches_debug_29991 | rasdani/github-patches | git_diff | explosion__thinc-639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cupy TypeError when using asarray with torch tensors
Hi,
I'm chaining a custom feature extraction model with a PyTorchWrapper model. In the feature extraction part, I'm using asarray function on a torch tensor but as you can see below, cupy raises an error when MemoryPointer is called. Do you have any idea why that line raises an exception?
Thanks,
```
Traceback (most recent call last):
...
File "/storage/myuser/myproject/main.py", line 180, in forward
features.append(model.ops.asarray2f(attrs, dtype="float32"))
File "/storage/miniconda/envs/safirx-ml/lib/python3.9/site-packages/thinc/backends/ops.py", line 494, in asarray2f
return cast(Floats2d, self.asarray(data, dtype=dtype))
File "/storage/miniconda/envs/safirx-ml/lib/python3.9/site-packages/thinc/backends/cupy_ops.py", line 67, in asarray
pointer = cupy.cuda.MemoryPointer(data.data_ptr())
File "cupy/cuda/memory.pyx", line 343, in cupy.cuda.memory.MemoryPointer.__init__
TypeError: __init__() takes exactly 2 positional arguments (1 given)```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `thinc/backends/cupy_ops.py`
Content:
```
1 import numpy
2
3 try:
4 import cupy
5 import cupyx
6 import cupy.cuda
7 from cupy.cuda.compiler import compile_with_cache # noqa: F401
8
9 has_cupy = True
10
11 # We no longer have to set up the memory pool, fortunately.
12 except ImportError:
13 cupy = None
14 cupyx = None
15 has_cupy = False
16
17 from .. import registry
18 from .ops import Ops
19 from .numpy_ops import NumpyOps
20 from . import _custom_kernels
21 from ..types import DeviceTypes
22
23
24 @registry.ops("CupyOps")
25 class CupyOps(Ops):
26 name = "cupy"
27 xp = cupy
28 _xp2 = cupyx
29
30 def __init__(
31 self, device_type: DeviceTypes = "gpu", device_id: int = 0, **kwargs
32 ) -> None:
33 self.device_type = device_type
34 self.device_id = device_id
35
36 def to_numpy(self, data, *, byte_order=None):
37 if not isinstance(data, numpy.ndarray):
38 data = data.get()
39 if byte_order:
40 dtype = data.dtype.newbyteorder(byte_order)
41 data = numpy.asarray(data, dtype=dtype)
42 return data
43
44 def gelu(self, X, inplace=False):
45 if X.dtype == "float32":
46 return _custom_kernels.gelu(X, inplace=inplace, threshold=6.0)
47 else:
48 return super().gelu(X, inplace=inplace)
49
50 def backprop_gelu(self, dY, X, inplace=False):
51 if X.dtype == "float32" and dY.dtype == "float32":
52 return _custom_kernels.backprop_gelu(dY, X, inplace=inplace, threshold=6.0)
53 else:
54 return super().backprop_gelu(dY, X, inplace=inplace)
55
56 def gemm(self, x, y, out=None, trans1=False, trans2=False):
57 if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray):
58 raise ValueError(
59 "Encountered a numpy array when processing with cupy. "
60 "Did you call model.ops.asarray on your data?"
61 )
62 if trans1:
63 x = x.T
64 if trans2:
65 y = y.T
66 if out is None:
67 return self.xp.dot(x, y)
68 else:
69 self.xp.dot(x, y, out=out)
70 return out
71
72 def asarray(self, data, dtype=None):
73 # This is sort of frustrating, but we can't easily otherwise pass
74 # forward "unset".
75 dtype = {"dtype": dtype} if dtype is not None else {}
76 if isinstance(data, cupy.ndarray):
77 return self.xp.asarray(data, **dtype)
78 elif hasattr(data, "data_ptr"):
79 # Handles PyTorch Tensors
80 pointer = cupy.cuda.MemoryPointer(data.data_ptr())
81 shape = data.stride()
82 array = self.xp.ndarray(shape, memptr=pointer, **dtype)
83 return array
84 else:
85 result = self.xp.array(data, **dtype)
86 return result
87
88 def maxout(self, X):
89 return _custom_kernels.maxout(X)
90
91 def backprop_maxout(self, dY, which, P):
92 return _custom_kernels.backprop_maxout(dY, which, P)
93
94 def relu(self, X, inplace=False):
95 if not inplace:
96 return X * (X > 0)
97 else:
98 X *= X > 0
99 return X
100
101 def backprop_relu(self, dY, Y, inplace=False):
102 if not inplace:
103 return dY * (Y > 0)
104 dY *= Y > 0
105 return dY
106
107 def clipped_linear(
108 self,
109 X,
110 slope: float = 1.0,
111 offset: float = 0.0,
112 min_val: float = 0.0,
113 max_val: float = 1.0,
114 inplace: bool = False,
115 ):
116 if X.dtype == "float32":
117 return _custom_kernels.clipped_linear(
118 X,
119 inplace=inplace,
120 slope=slope,
121 offset=offset,
122 min_val=min_val,
123 max_val=max_val,
124 )
125 else:
126 return super().clipped_linear(
127 X,
128 inplace=inplace,
129 slope=slope,
130 offset=offset,
131 min_val=min_val,
132 max_val=max_val,
133 )
134
135 def backprop_clipped_linear(
136 self,
137 dY,
138 X,
139 slope: float = 1.0,
140 offset: float = 0.0,
141 min_val: float = 0.0,
142 max_val: float = 1.0,
143 inplace: bool = False,
144 ):
145 if X.dtype == "float32" and dY.dtype == "float32":
146 return _custom_kernels.backprop_clipped_linear(
147 dY=dY,
148 X=X,
149 slope=slope,
150 offset=offset,
151 min_val=min_val,
152 max_val=max_val,
153 inplace=inplace,
154 )
155 else:
156 return super().backprop_clipped_linear(
157 dY=dY,
158 X=X,
159 slope=slope,
160 offset=offset,
161 min_val=min_val,
162 max_val=max_val,
163 inplace=inplace,
164 )
165
166 def backprop_hard_swish(self, dY, X, inplace: bool = False):
167 if X.dtype == "float32" and dY.dtype == "float32":
168 return _custom_kernels.backprop_hard_swish(dY, X, inplace=inplace)
169 else:
170 return super().backprop_hard_swish(dY, X, inplace=inplace)
171
172 def backprop_hard_swish_mobilenet(self, dY, X, inplace: bool = False):
173 if X.dtype == "float32" and dY.dtype == "float32":
174 return _custom_kernels.backprop_hard_swish_mobilenet(dY, X, inplace=inplace)
175 else:
176 return super().backprop_hard_swish_mobilenet(dY, X, inplace=inplace)
177
178 def mish(self, X, threshold=20.0, inplace=False):
179 if X.dtype == "float32" and not inplace:
180 return _custom_kernels.mish(X, threshold=threshold, out=None)
181 else:
182 return super().mish(X, threshold, inplace)
183
184 def backprop_mish(self, dY, X, threshold=20.0, inplace=False):
185 if dY.dtype == "float32" and X.dtype == "float32" and not inplace:
186 return _custom_kernels.backprop_mish(dY, X, threshold=threshold)
187 else:
188 return super().backprop_mish(dY, X, threshold, inplace)
189
190 def swish(self, X, inplace=False):
191 if X.dtype == "float32":
192 return _custom_kernels.swish(X, inplace=inplace, threshold=17.0)
193 else:
194 return super().swish(X, inplace=inplace)
195
196 def backprop_swish(self, dY, X, Y, inplace=False):
197 if X.dtype == "float32" and dY.dtype == "float32":
198 return _custom_kernels.backprop_swish(
199 dY, X, Y, inplace=inplace, threshold=17.0
200 )
201 else:
202 return super().backprop_swish(dY, X, Y, inplace=inplace)
203
204 def clip_gradient(self, gradient, threshold):
205 # We do not use CuPy's linalg.norm, since it uses scalar reductions
206 # using one CUDA block. This is a lot slower than the cuBLAS
207 # implementation.
208 def frobenius_norm(X):
209 X_vec = X.reshape(-1)
210 return cupy.cublas.nrm2(X_vec)
211
212 grad_norm = cupy.maximum(frobenius_norm(gradient), 1e-12)
213 gradient *= cupy.minimum(threshold, grad_norm) / grad_norm
214 return gradient
215
216 def seq2col(self, seq, nW, *, lengths=None):
217 """Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence.
218 The new sequence is constructed by concatenating nW preceding and succeeding
219 vectors onto each column in the sequence, to extract a window of features.
220 """
221 return _custom_kernels.seq2col(seq, nW, lengths=lengths)
222
223 def backprop_seq2col(self, dY, nW, *, lengths=None):
224 return _custom_kernels.backprop_seq2col(dY, nW, lengths=lengths)
225
226 def reduce_mean(self, X, lengths):
227 return _custom_kernels.reduce_mean(X, lengths)
228
229 def backprop_reduce_mean(self, d_means, lengths):
230 return _custom_kernels.backprop_reduce_mean(d_means, lengths)
231
232 def reduce_max(self, X, lengths):
233 return _custom_kernels.reduce_max(X, lengths)
234
235 def backprop_reduce_max(self, d_maxes, which, lengths):
236 return _custom_kernels.backprop_reduce_max(d_maxes, which, lengths)
237
238 def reduce_sum(self, X, lengths):
239 return _custom_kernels.reduce_sum(X, lengths)
240
241 def backprop_reduce_sum(self, d_sums, lengths):
242 return _custom_kernels.backprop_reduce_sum(d_sums, lengths)
243
244 def hash(self, ids, seed):
245 return _custom_kernels.hash(ids, seed)
246
247 def scatter_add(self, table, indices, values):
248 self._xp2.scatter_add(table, indices, values)
249
250 def adam(
251 self, weights, gradient, mom1, mom2, beta1, beta2, eps, learn_rate, mod_rate=1.0
252 ):
253 adam_kernel(
254 gradient, learn_rate, 1 - beta1, 1 - beta2, eps, weights, mom1, mom2
255 )
256 gradient.fill(0)
257 return weights, gradient, mom1, mom2
258
259 def position_encode(self, N, D, period=10000, out=None):
260 positions = NumpyOps().position_encode(N, D, period=period, out=out)
261 return self.asarray(positions)
262
263
264 if cupy is not None:
265 adam_kernel = cupy.ElementwiseKernel(
266 "T grad, T lr, T one_minus_beta1, T one_minus_beta2, T eps",
267 "T param, T m, T v",
268 """m += one_minus_beta1 * (grad - m);
269 v += one_minus_beta2 * (grad * grad - v);
270 param -= lr * m / (sqrt(v) + eps);""",
271 "adam",
272 )
273 else:
274 adam_kernel = None
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/thinc/backends/cupy_ops.py b/thinc/backends/cupy_ops.py
--- a/thinc/backends/cupy_ops.py
+++ b/thinc/backends/cupy_ops.py
@@ -19,6 +19,8 @@
from .numpy_ops import NumpyOps
from . import _custom_kernels
from ..types import DeviceTypes
+from ..util import torch2xp, tensorflow2xp, mxnet2xp
+from ..util import is_torch_array, is_tensorflow_array, is_mxnet_array
@registry.ops("CupyOps")
@@ -73,17 +75,28 @@
# This is sort of frustrating, but we can't easily otherwise pass
# forward "unset".
dtype = {"dtype": dtype} if dtype is not None else {}
+
+ # We'll try to perform a zero-copy conversion if possible.
+ array = None
+ cast_array = False
if isinstance(data, cupy.ndarray):
- return self.xp.asarray(data, **dtype)
- elif hasattr(data, "data_ptr"):
- # Handles PyTorch Tensors
- pointer = cupy.cuda.MemoryPointer(data.data_ptr())
- shape = data.stride()
- array = self.xp.ndarray(shape, memptr=pointer, **dtype)
- return array
+ array = self.xp.asarray(data, **dtype)
+ elif is_torch_array(data) and data.device.type == "cuda":
+ array = torch2xp(data)
+ cast_array = True
+ elif is_tensorflow_array(data) and "GPU:" in data.device:
+ array = tensorflow2xp(data)
+ cast_array = True
+ elif is_mxnet_array(data) and data.context.device_type != "cpu":
+ array = mxnet2xp(data)
+ cast_array = True
else:
- result = self.xp.array(data, **dtype)
- return result
+ array = self.xp.array(data, **dtype)
+
+ if cast_array and dtype != {}:
+ array = array.astype(dtype["dtype"])
+
+ return array
def maxout(self, X):
return _custom_kernels.maxout(X)
| {"golden_diff": "diff --git a/thinc/backends/cupy_ops.py b/thinc/backends/cupy_ops.py\n--- a/thinc/backends/cupy_ops.py\n+++ b/thinc/backends/cupy_ops.py\n@@ -19,6 +19,8 @@\n from .numpy_ops import NumpyOps\n from . import _custom_kernels\n from ..types import DeviceTypes\n+from ..util import torch2xp, tensorflow2xp, mxnet2xp\n+from ..util import is_torch_array, is_tensorflow_array, is_mxnet_array\n \n \n @registry.ops(\"CupyOps\")\n@@ -73,17 +75,28 @@\n # This is sort of frustrating, but we can't easily otherwise pass\n # forward \"unset\".\n dtype = {\"dtype\": dtype} if dtype is not None else {}\n+\n+ # We'll try to perform a zero-copy conversion if possible.\n+ array = None\n+ cast_array = False\n if isinstance(data, cupy.ndarray):\n- return self.xp.asarray(data, **dtype)\n- elif hasattr(data, \"data_ptr\"):\n- # Handles PyTorch Tensors\n- pointer = cupy.cuda.MemoryPointer(data.data_ptr())\n- shape = data.stride()\n- array = self.xp.ndarray(shape, memptr=pointer, **dtype)\n- return array\n+ array = self.xp.asarray(data, **dtype)\n+ elif is_torch_array(data) and data.device.type == \"cuda\":\n+ array = torch2xp(data)\n+ cast_array = True\n+ elif is_tensorflow_array(data) and \"GPU:\" in data.device:\n+ array = tensorflow2xp(data)\n+ cast_array = True\n+ elif is_mxnet_array(data) and data.context.device_type != \"cpu\":\n+ array = mxnet2xp(data)\n+ cast_array = True\n else:\n- result = self.xp.array(data, **dtype)\n- return result\n+ array = self.xp.array(data, **dtype)\n+\n+ if cast_array and dtype != {}:\n+ array = array.astype(dtype[\"dtype\"])\n+\n+ return array\n \n def maxout(self, X):\n return _custom_kernels.maxout(X)\n", "issue": "Cupy TypeError when using asarray with torch tensors\nHi,\r\n\r\nI'm chaining a custom feature extraction model with a PyTorchWrapper model. In the feature extraction part, I'm using asarray function on a torch tensor but as you can see below, cupy raises an error when MemoryPointer is called. Do you have any idea why that line raises an exception?\r\n\r\nThanks,\r\n\r\n```\r\nTraceback (most recent call last): \r\n...\r\n File \"/storage/myuser/myproject/main.py\", line 180, in forward\r\n features.append(model.ops.asarray2f(attrs, dtype=\"float32\"))\r\n File \"/storage/miniconda/envs/safirx-ml/lib/python3.9/site-packages/thinc/backends/ops.py\", line 494, in asarray2f\r\n return cast(Floats2d, self.asarray(data, dtype=dtype))\r\n File \"/storage/miniconda/envs/safirx-ml/lib/python3.9/site-packages/thinc/backends/cupy_ops.py\", line 67, in asarray\r\n pointer = cupy.cuda.MemoryPointer(data.data_ptr())\r\n File \"cupy/cuda/memory.pyx\", line 343, in cupy.cuda.memory.MemoryPointer.__init__\r\nTypeError: __init__() takes exactly 2 positional arguments (1 given)```\n", "before_files": [{"content": "import numpy\n\ntry:\n import cupy\n import cupyx\n import cupy.cuda\n from cupy.cuda.compiler import compile_with_cache # noqa: F401\n\n has_cupy = True\n\n # We no longer have to set up the memory pool, fortunately.\nexcept ImportError:\n cupy = None\n cupyx = None\n has_cupy = False\n\nfrom .. import registry\nfrom .ops import Ops\nfrom .numpy_ops import NumpyOps\nfrom . import _custom_kernels\nfrom ..types import DeviceTypes\n\n\[email protected](\"CupyOps\")\nclass CupyOps(Ops):\n name = \"cupy\"\n xp = cupy\n _xp2 = cupyx\n\n def __init__(\n self, device_type: DeviceTypes = \"gpu\", device_id: int = 0, **kwargs\n ) -> None:\n self.device_type = device_type\n self.device_id = device_id\n\n def to_numpy(self, data, *, byte_order=None):\n if not isinstance(data, numpy.ndarray):\n data = data.get()\n if byte_order:\n dtype = data.dtype.newbyteorder(byte_order)\n data = numpy.asarray(data, dtype=dtype)\n return data\n\n def gelu(self, X, inplace=False):\n if X.dtype == \"float32\":\n return _custom_kernels.gelu(X, inplace=inplace, threshold=6.0)\n else:\n return super().gelu(X, inplace=inplace)\n\n def backprop_gelu(self, dY, X, inplace=False):\n if X.dtype == \"float32\" and dY.dtype == \"float32\":\n return _custom_kernels.backprop_gelu(dY, X, inplace=inplace, threshold=6.0)\n else:\n return super().backprop_gelu(dY, X, inplace=inplace)\n\n def gemm(self, x, y, out=None, trans1=False, trans2=False):\n if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray):\n raise ValueError(\n \"Encountered a numpy array when processing with cupy. \"\n \"Did you call model.ops.asarray on your data?\"\n )\n if trans1:\n x = x.T\n if trans2:\n y = y.T\n if out is None:\n return self.xp.dot(x, y)\n else:\n self.xp.dot(x, y, out=out)\n return out\n\n def asarray(self, data, dtype=None):\n # This is sort of frustrating, but we can't easily otherwise pass\n # forward \"unset\".\n dtype = {\"dtype\": dtype} if dtype is not None else {}\n if isinstance(data, cupy.ndarray):\n return self.xp.asarray(data, **dtype)\n elif hasattr(data, \"data_ptr\"):\n # Handles PyTorch Tensors\n pointer = cupy.cuda.MemoryPointer(data.data_ptr())\n shape = data.stride()\n array = self.xp.ndarray(shape, memptr=pointer, **dtype)\n return array\n else:\n result = self.xp.array(data, **dtype)\n return result\n\n def maxout(self, X):\n return _custom_kernels.maxout(X)\n\n def backprop_maxout(self, dY, which, P):\n return _custom_kernels.backprop_maxout(dY, which, P)\n\n def relu(self, X, inplace=False):\n if not inplace:\n return X * (X > 0)\n else:\n X *= X > 0\n return X\n\n def backprop_relu(self, dY, Y, inplace=False):\n if not inplace:\n return dY * (Y > 0)\n dY *= Y > 0\n return dY\n\n def clipped_linear(\n self,\n X,\n slope: float = 1.0,\n offset: float = 0.0,\n min_val: float = 0.0,\n max_val: float = 1.0,\n inplace: bool = False,\n ):\n if X.dtype == \"float32\":\n return _custom_kernels.clipped_linear(\n X,\n inplace=inplace,\n slope=slope,\n offset=offset,\n min_val=min_val,\n max_val=max_val,\n )\n else:\n return super().clipped_linear(\n X,\n inplace=inplace,\n slope=slope,\n offset=offset,\n min_val=min_val,\n max_val=max_val,\n )\n\n def backprop_clipped_linear(\n self,\n dY,\n X,\n slope: float = 1.0,\n offset: float = 0.0,\n min_val: float = 0.0,\n max_val: float = 1.0,\n inplace: bool = False,\n ):\n if X.dtype == \"float32\" and dY.dtype == \"float32\":\n return _custom_kernels.backprop_clipped_linear(\n dY=dY,\n X=X,\n slope=slope,\n offset=offset,\n min_val=min_val,\n max_val=max_val,\n inplace=inplace,\n )\n else:\n return super().backprop_clipped_linear(\n dY=dY,\n X=X,\n slope=slope,\n offset=offset,\n min_val=min_val,\n max_val=max_val,\n inplace=inplace,\n )\n\n def backprop_hard_swish(self, dY, X, inplace: bool = False):\n if X.dtype == \"float32\" and dY.dtype == \"float32\":\n return _custom_kernels.backprop_hard_swish(dY, X, inplace=inplace)\n else:\n return super().backprop_hard_swish(dY, X, inplace=inplace)\n\n def backprop_hard_swish_mobilenet(self, dY, X, inplace: bool = False):\n if X.dtype == \"float32\" and dY.dtype == \"float32\":\n return _custom_kernels.backprop_hard_swish_mobilenet(dY, X, inplace=inplace)\n else:\n return super().backprop_hard_swish_mobilenet(dY, X, inplace=inplace)\n\n def mish(self, X, threshold=20.0, inplace=False):\n if X.dtype == \"float32\" and not inplace:\n return _custom_kernels.mish(X, threshold=threshold, out=None)\n else:\n return super().mish(X, threshold, inplace)\n\n def backprop_mish(self, dY, X, threshold=20.0, inplace=False):\n if dY.dtype == \"float32\" and X.dtype == \"float32\" and not inplace:\n return _custom_kernels.backprop_mish(dY, X, threshold=threshold)\n else:\n return super().backprop_mish(dY, X, threshold, inplace)\n\n def swish(self, X, inplace=False):\n if X.dtype == \"float32\":\n return _custom_kernels.swish(X, inplace=inplace, threshold=17.0)\n else:\n return super().swish(X, inplace=inplace)\n\n def backprop_swish(self, dY, X, Y, inplace=False):\n if X.dtype == \"float32\" and dY.dtype == \"float32\":\n return _custom_kernels.backprop_swish(\n dY, X, Y, inplace=inplace, threshold=17.0\n )\n else:\n return super().backprop_swish(dY, X, Y, inplace=inplace)\n\n def clip_gradient(self, gradient, threshold):\n # We do not use CuPy's linalg.norm, since it uses scalar reductions\n # using one CUDA block. This is a lot slower than the cuBLAS\n # implementation.\n def frobenius_norm(X):\n X_vec = X.reshape(-1)\n return cupy.cublas.nrm2(X_vec)\n\n grad_norm = cupy.maximum(frobenius_norm(gradient), 1e-12)\n gradient *= cupy.minimum(threshold, grad_norm) / grad_norm\n return gradient\n\n def seq2col(self, seq, nW, *, lengths=None):\n \"\"\"Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence.\n The new sequence is constructed by concatenating nW preceding and succeeding\n vectors onto each column in the sequence, to extract a window of features.\n \"\"\"\n return _custom_kernels.seq2col(seq, nW, lengths=lengths)\n\n def backprop_seq2col(self, dY, nW, *, lengths=None):\n return _custom_kernels.backprop_seq2col(dY, nW, lengths=lengths)\n\n def reduce_mean(self, X, lengths):\n return _custom_kernels.reduce_mean(X, lengths)\n\n def backprop_reduce_mean(self, d_means, lengths):\n return _custom_kernels.backprop_reduce_mean(d_means, lengths)\n\n def reduce_max(self, X, lengths):\n return _custom_kernels.reduce_max(X, lengths)\n\n def backprop_reduce_max(self, d_maxes, which, lengths):\n return _custom_kernels.backprop_reduce_max(d_maxes, which, lengths)\n\n def reduce_sum(self, X, lengths):\n return _custom_kernels.reduce_sum(X, lengths)\n\n def backprop_reduce_sum(self, d_sums, lengths):\n return _custom_kernels.backprop_reduce_sum(d_sums, lengths)\n\n def hash(self, ids, seed):\n return _custom_kernels.hash(ids, seed)\n\n def scatter_add(self, table, indices, values):\n self._xp2.scatter_add(table, indices, values)\n\n def adam(\n self, weights, gradient, mom1, mom2, beta1, beta2, eps, learn_rate, mod_rate=1.0\n ):\n adam_kernel(\n gradient, learn_rate, 1 - beta1, 1 - beta2, eps, weights, mom1, mom2\n )\n gradient.fill(0)\n return weights, gradient, mom1, mom2\n\n def position_encode(self, N, D, period=10000, out=None):\n positions = NumpyOps().position_encode(N, D, period=period, out=out)\n return self.asarray(positions)\n\n\nif cupy is not None:\n adam_kernel = cupy.ElementwiseKernel(\n \"T grad, T lr, T one_minus_beta1, T one_minus_beta2, T eps\",\n \"T param, T m, T v\",\n \"\"\"m += one_minus_beta1 * (grad - m);\n v += one_minus_beta2 * (grad * grad - v);\n param -= lr * m / (sqrt(v) + eps);\"\"\",\n \"adam\",\n )\nelse:\n adam_kernel = None\n", "path": "thinc/backends/cupy_ops.py"}], "after_files": [{"content": "import numpy\n\ntry:\n import cupy\n import cupyx\n import cupy.cuda\n from cupy.cuda.compiler import compile_with_cache # noqa: F401\n\n has_cupy = True\n\n # We no longer have to set up the memory pool, fortunately.\nexcept ImportError:\n cupy = None\n cupyx = None\n has_cupy = False\n\nfrom .. import registry\nfrom .ops import Ops\nfrom .numpy_ops import NumpyOps\nfrom . import _custom_kernels\nfrom ..types import DeviceTypes\nfrom ..util import torch2xp, tensorflow2xp, mxnet2xp\nfrom ..util import is_torch_array, is_tensorflow_array, is_mxnet_array\n\n\[email protected](\"CupyOps\")\nclass CupyOps(Ops):\n name = \"cupy\"\n xp = cupy\n _xp2 = cupyx\n\n def __init__(\n self, device_type: DeviceTypes = \"gpu\", device_id: int = 0, **kwargs\n ) -> None:\n self.device_type = device_type\n self.device_id = device_id\n\n def to_numpy(self, data, *, byte_order=None):\n if not isinstance(data, numpy.ndarray):\n data = data.get()\n if byte_order:\n dtype = data.dtype.newbyteorder(byte_order)\n data = numpy.asarray(data, dtype=dtype)\n return data\n\n def gelu(self, X, inplace=False):\n if X.dtype == \"float32\":\n return _custom_kernels.gelu(X, inplace=inplace, threshold=6.0)\n else:\n return super().gelu(X, inplace=inplace)\n\n def backprop_gelu(self, dY, X, inplace=False):\n if X.dtype == \"float32\" and dY.dtype == \"float32\":\n return _custom_kernels.backprop_gelu(dY, X, inplace=inplace, threshold=6.0)\n else:\n return super().backprop_gelu(dY, X, inplace=inplace)\n\n def gemm(self, x, y, out=None, trans1=False, trans2=False):\n if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray):\n raise ValueError(\n \"Encountered a numpy array when processing with cupy. \"\n \"Did you call model.ops.asarray on your data?\"\n )\n if trans1:\n x = x.T\n if trans2:\n y = y.T\n if out is None:\n return self.xp.dot(x, y)\n else:\n self.xp.dot(x, y, out=out)\n return out\n\n def asarray(self, data, dtype=None):\n # This is sort of frustrating, but we can't easily otherwise pass\n # forward \"unset\".\n dtype = {\"dtype\": dtype} if dtype is not None else {}\n\n # We'll try to perform a zero-copy conversion if possible.\n array = None\n cast_array = False\n if isinstance(data, cupy.ndarray):\n array = self.xp.asarray(data, **dtype)\n elif is_torch_array(data) and data.device.type == \"cuda\":\n array = torch2xp(data)\n cast_array = True\n elif is_tensorflow_array(data) and \"GPU:\" in data.device:\n array = tensorflow2xp(data)\n cast_array = True\n elif is_mxnet_array(data) and data.context.device_type != \"cpu\":\n array = mxnet2xp(data)\n cast_array = True\n else:\n array = self.xp.array(data, **dtype)\n\n if cast_array and dtype != {}:\n array = array.astype(dtype[\"dtype\"])\n\n return array\n\n def maxout(self, X):\n return _custom_kernels.maxout(X)\n\n def backprop_maxout(self, dY, which, P):\n return _custom_kernels.backprop_maxout(dY, which, P)\n\n def relu(self, X, inplace=False):\n if not inplace:\n return X * (X > 0)\n else:\n X *= X > 0\n return X\n\n def backprop_relu(self, dY, Y, inplace=False):\n if not inplace:\n return dY * (Y > 0)\n dY *= Y > 0\n return dY\n\n def clipped_linear(\n self,\n X,\n slope: float = 1.0,\n offset: float = 0.0,\n min_val: float = 0.0,\n max_val: float = 1.0,\n inplace: bool = False,\n ):\n if X.dtype == \"float32\":\n return _custom_kernels.clipped_linear(\n X,\n inplace=inplace,\n slope=slope,\n offset=offset,\n min_val=min_val,\n max_val=max_val,\n )\n else:\n return super().clipped_linear(\n X,\n inplace=inplace,\n slope=slope,\n offset=offset,\n min_val=min_val,\n max_val=max_val,\n )\n\n def backprop_clipped_linear(\n self,\n dY,\n X,\n slope: float = 1.0,\n offset: float = 0.0,\n min_val: float = 0.0,\n max_val: float = 1.0,\n inplace: bool = False,\n ):\n if X.dtype == \"float32\" and dY.dtype == \"float32\":\n return _custom_kernels.backprop_clipped_linear(\n dY=dY,\n X=X,\n slope=slope,\n offset=offset,\n min_val=min_val,\n max_val=max_val,\n inplace=inplace,\n )\n else:\n return super().backprop_clipped_linear(\n dY=dY,\n X=X,\n slope=slope,\n offset=offset,\n min_val=min_val,\n max_val=max_val,\n inplace=inplace,\n )\n\n def backprop_hard_swish(self, dY, X, inplace: bool = False):\n if X.dtype == \"float32\" and dY.dtype == \"float32\":\n return _custom_kernels.backprop_hard_swish(dY, X, inplace=inplace)\n else:\n return super().backprop_hard_swish(dY, X, inplace=inplace)\n\n def backprop_hard_swish_mobilenet(self, dY, X, inplace: bool = False):\n if X.dtype == \"float32\" and dY.dtype == \"float32\":\n return _custom_kernels.backprop_hard_swish_mobilenet(dY, X, inplace=inplace)\n else:\n return super().backprop_hard_swish_mobilenet(dY, X, inplace=inplace)\n\n def mish(self, X, threshold=20.0, inplace=False):\n if X.dtype == \"float32\" and not inplace:\n return _custom_kernels.mish(X, threshold=threshold, out=None)\n else:\n return super().mish(X, threshold, inplace)\n\n def backprop_mish(self, dY, X, threshold=20.0, inplace=False):\n if dY.dtype == \"float32\" and X.dtype == \"float32\" and not inplace:\n return _custom_kernels.backprop_mish(dY, X, threshold=threshold)\n else:\n return super().backprop_mish(dY, X, threshold, inplace)\n\n def swish(self, X, inplace=False):\n if X.dtype == \"float32\":\n return _custom_kernels.swish(X, inplace=inplace, threshold=17.0)\n else:\n return super().swish(X, inplace=inplace)\n\n def backprop_swish(self, dY, X, Y, inplace=False):\n if X.dtype == \"float32\" and dY.dtype == \"float32\":\n return _custom_kernels.backprop_swish(\n dY, X, Y, inplace=inplace, threshold=17.0\n )\n else:\n return super().backprop_swish(dY, X, Y, inplace=inplace)\n\n def clip_gradient(self, gradient, threshold):\n # We do not use CuPy's linalg.norm, since it uses scalar reductions\n # using one CUDA block. This is a lot slower than the cuBLAS\n # implementation.\n def frobenius_norm(X):\n X_vec = X.reshape(-1)\n return cupy.cublas.nrm2(X_vec)\n\n grad_norm = cupy.maximum(frobenius_norm(gradient), 1e-12)\n gradient *= cupy.minimum(threshold, grad_norm) / grad_norm\n return gradient\n\n def seq2col(self, seq, nW, *, lengths=None):\n \"\"\"Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence.\n The new sequence is constructed by concatenating nW preceding and succeeding\n vectors onto each column in the sequence, to extract a window of features.\n \"\"\"\n return _custom_kernels.seq2col(seq, nW, lengths=lengths)\n\n def backprop_seq2col(self, dY, nW, *, lengths=None):\n return _custom_kernels.backprop_seq2col(dY, nW, lengths=lengths)\n\n def reduce_mean(self, X, lengths):\n return _custom_kernels.reduce_mean(X, lengths)\n\n def backprop_reduce_mean(self, d_means, lengths):\n return _custom_kernels.backprop_reduce_mean(d_means, lengths)\n\n def reduce_max(self, X, lengths):\n return _custom_kernels.reduce_max(X, lengths)\n\n def backprop_reduce_max(self, d_maxes, which, lengths):\n return _custom_kernels.backprop_reduce_max(d_maxes, which, lengths)\n\n def reduce_sum(self, X, lengths):\n return _custom_kernels.reduce_sum(X, lengths)\n\n def backprop_reduce_sum(self, d_sums, lengths):\n return _custom_kernels.backprop_reduce_sum(d_sums, lengths)\n\n def hash(self, ids, seed):\n return _custom_kernels.hash(ids, seed)\n\n def scatter_add(self, table, indices, values):\n self._xp2.scatter_add(table, indices, values)\n\n def adam(\n self, weights, gradient, mom1, mom2, beta1, beta2, eps, learn_rate, mod_rate=1.0\n ):\n adam_kernel(\n gradient, learn_rate, 1 - beta1, 1 - beta2, eps, weights, mom1, mom2\n )\n gradient.fill(0)\n return weights, gradient, mom1, mom2\n\n def position_encode(self, N, D, period=10000, out=None):\n positions = NumpyOps().position_encode(N, D, period=period, out=out)\n return self.asarray(positions)\n\n\nif cupy is not None:\n adam_kernel = cupy.ElementwiseKernel(\n \"T grad, T lr, T one_minus_beta1, T one_minus_beta2, T eps\",\n \"T param, T m, T v\",\n \"\"\"m += one_minus_beta1 * (grad - m);\n v += one_minus_beta2 * (grad * grad - v);\n param -= lr * m / (sqrt(v) + eps);\"\"\",\n \"adam\",\n )\nelse:\n adam_kernel = None\n", "path": "thinc/backends/cupy_ops.py"}]} | 3,630 | 484 |
gh_patches_debug_34862 | rasdani/github-patches | git_diff | qtile__qtile-3204 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
multiple systray widgets are racy
The Distrotube Guy reports via https://www.youtube.com/watch?v=9NO2DJARbU0 that multiple systrays are racy.
The spec says: https://specifications.freedesktop.org/systemtray-spec/systemtray-spec-0.1.html#locating
that we should use `_NET_SYSETM_TRAY_Sn` for the nth system tray widget, but I think our code calculating this is wrong:
https://github.com/qtile/qtile/blob/master/libqtile/widget/systray.py#L129
This code always uses qtile's current screen, instead of the screen that the bar is on. It should at least use the right value for `n`.
Perhaps instead, we could make multiple Systray() objects be an error, and just have people specify one and let it mirror everywhere.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/widget/systray.py`
Content:
```
1 # Copyright (c) 2010 Aldo Cortesi
2 # Copyright (c) 2010-2011 dequis
3 # Copyright (c) 2010, 2012 roger
4 # Copyright (c) 2011 Mounier Florian
5 # Copyright (c) 2011-2012, 2014 Tycho Andersen
6 # Copyright (c) 2012 dmpayton
7 # Copyright (c) 2012-2013 Craig Barnes
8 # Copyright (c) 2013 hbc
9 # Copyright (c) 2013 Tao Sauvage
10 # Copyright (c) 2014 Sean Vig
11 #
12 # Permission is hereby granted, free of charge, to any person obtaining a copy
13 # of this software and associated documentation files (the "Software"), to deal
14 # in the Software without restriction, including without limitation the rights
15 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 # copies of the Software, and to permit persons to whom the Software is
17 # furnished to do so, subject to the following conditions:
18 #
19 # The above copyright notice and this permission notice shall be included in
20 # all copies or substantial portions of the Software.
21 #
22 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 # SOFTWARE.
29 from typing import TYPE_CHECKING
30
31 import xcffib
32 from xcffib.xproto import ClientMessageData, ClientMessageEvent, EventMask, SetMode
33
34 from libqtile import bar
35 from libqtile.backend.x11 import window
36 from libqtile.widget import base
37
38 if TYPE_CHECKING:
39 from typing import List, Optional
40
41 XEMBED_PROTOCOL_VERSION = 0
42
43
44 class Icon(window._Window):
45 _window_mask = EventMask.StructureNotify | EventMask.PropertyChange | EventMask.Exposure
46
47 def __init__(self, win, qtile, systray):
48 window._Window.__init__(self, win, qtile)
49 self.systray = systray
50 # win.get_name() may return None when apps provide a temporary window before the icon window
51 # we need something in self.name in order to sort icons so we use the window's WID.
52 self.name = win.get_name() or str(win.wid)
53 self.update_size()
54
55 def __eq__(self, other):
56 if not isinstance(other, Icon):
57 return False
58
59 return self.window.wid == other.window.wid
60
61 def update_size(self):
62 icon_size = self.systray.icon_size
63 self.update_hints()
64
65 width = self.hints.get("min_width", icon_size)
66 height = self.hints.get("min_height", icon_size)
67
68 width = max(width, icon_size)
69 height = max(height, icon_size)
70
71 if height > icon_size:
72 width = width * icon_size // height
73 height = icon_size
74
75 self.width = width
76 self.height = height
77 return False
78
79 def handle_PropertyNotify(self, e): # noqa: N802
80 name = self.qtile.core.conn.atoms.get_name(e.atom)
81 if name == "_XEMBED_INFO":
82 info = self.window.get_property("_XEMBED_INFO", unpack=int)
83 if info and info[1]:
84 self.systray.bar.draw()
85
86 return False
87
88 def handle_DestroyNotify(self, event): # noqa: N802
89 wid = event.window
90 icon = self.qtile.windows_map.pop(wid)
91 self.systray.tray_icons.remove(icon)
92 self.systray.bar.draw()
93 return False
94
95 handle_UnmapNotify = handle_DestroyNotify # noqa: N815
96
97
98 class Systray(window._Window, base._Widget):
99 """
100 A widget that manages system tray.
101
102 .. note::
103 Icons will not render correctly where the bar/widget is
104 drawn with a semi-transparent background. Instead, icons
105 will be drawn with a transparent background.
106
107 If using this widget it is therefore recommended to use
108 a fully opaque background colour or a fully transparent
109 one.
110 """
111
112 _window_mask = EventMask.StructureNotify | EventMask.Exposure
113
114 orientations = base.ORIENTATION_BOTH
115
116 defaults = [
117 ("icon_size", 20, "Icon width"),
118 ("padding", 5, "Padding between icons"),
119 ]
120
121 def __init__(self, **config):
122 base._Widget.__init__(self, bar.CALCULATED, **config)
123 self.add_defaults(Systray.defaults)
124 self.tray_icons = []
125 self.screen = 0
126 self._name = config.get("name", "systray")
127 self._wm_class: Optional[List[str]] = None
128
129 def calculate_length(self):
130 if self.bar.horizontal:
131 length = sum(i.width for i in self.tray_icons)
132 else:
133 length = sum(i.height for i in self.tray_icons)
134 length += self.padding * len(self.tray_icons)
135 return length
136
137 def _configure(self, qtile, bar):
138 base._Widget._configure(self, qtile, bar)
139
140 if self.configured:
141 return
142
143 self.conn = conn = qtile.core.conn
144 win = conn.create_window(-1, -1, 1, 1)
145 window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)
146 qtile.windows_map[win.wid] = self
147
148 # window._Window.__init__ overwrites the widget name so we need to restore it
149 self.name = self._name
150
151 # Even when we have multiple "Screen"s, we are setting up as the system
152 # tray on a particular X display, that is the screen we need to
153 # reference in the atom
154 if qtile.current_screen:
155 self.screen = qtile.current_screen.index
156 self.bar = bar
157 atoms = conn.atoms
158
159 # We need tray to tell icons which visual to use.
160 # This needs to be the same as the bar/widget.
161 # This mainly benefits transparent bars.
162 conn.conn.core.ChangeProperty(
163 xcffib.xproto.PropMode.Replace,
164 win.wid,
165 atoms["_NET_SYSTEM_TRAY_VISUAL"],
166 xcffib.xproto.Atom.VISUALID,
167 32,
168 1,
169 [self.drawer._visual.visual_id],
170 )
171
172 conn.conn.core.SetSelectionOwner(
173 win.wid, atoms["_NET_SYSTEM_TRAY_S{:d}".format(self.screen)], xcffib.CurrentTime
174 )
175 data = [
176 xcffib.CurrentTime,
177 atoms["_NET_SYSTEM_TRAY_S{:d}".format(self.screen)],
178 win.wid,
179 0,
180 0,
181 ]
182 union = ClientMessageData.synthetic(data, "I" * 5)
183 event = ClientMessageEvent.synthetic(
184 format=32, window=qtile.core._root.wid, type=atoms["MANAGER"], data=union
185 )
186 qtile.core._root.send_event(event, mask=EventMask.StructureNotify)
187
188 def handle_ClientMessage(self, event): # noqa: N802
189 atoms = self.conn.atoms
190
191 opcode = event.type
192 data = event.data.data32
193 message = data[1]
194 wid = data[2]
195
196 parent = self.bar.window.window
197
198 if opcode == atoms["_NET_SYSTEM_TRAY_OPCODE"] and message == 0:
199 w = window.XWindow(self.conn, wid)
200 icon = Icon(w, self.qtile, self)
201 if icon not in self.tray_icons:
202 self.tray_icons.append(icon)
203 self.tray_icons.sort(key=lambda icon: icon.name)
204 self.qtile.windows_map[wid] = icon
205
206 self.conn.conn.core.ChangeSaveSet(SetMode.Insert, wid)
207 self.conn.conn.core.ReparentWindow(wid, parent.wid, 0, 0)
208 self.conn.conn.flush()
209
210 info = icon.window.get_property("_XEMBED_INFO", unpack=int)
211
212 if not info:
213 self.bar.draw()
214 return False
215
216 if info[1]:
217 self.bar.draw()
218
219 return False
220
221 def draw(self):
222 offset = self.padding
223 self.drawer.clear(self.background or self.bar.background)
224 self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length)
225 for pos, icon in enumerate(self.tray_icons):
226 icon.window.set_attribute(backpixmap=self.drawer.pixmap)
227 if self.bar.horizontal:
228 xoffset = self.offsetx + offset
229 yoffset = self.bar.height // 2 - self.icon_size // 2 + self.offsety
230 step = icon.width
231 else:
232 xoffset = self.bar.width // 2 - self.icon_size // 2 + self.offsetx
233 yoffset = self.offsety + offset
234 step = icon.height
235
236 icon.place(xoffset, yoffset, icon.width, self.icon_size, 0, None)
237 if icon.hidden:
238 icon.unhide()
239 data = [
240 self.conn.atoms["_XEMBED_EMBEDDED_NOTIFY"],
241 xcffib.xproto.Time.CurrentTime,
242 0,
243 self.bar.window.wid,
244 XEMBED_PROTOCOL_VERSION,
245 ]
246 u = xcffib.xproto.ClientMessageData.synthetic(data, "I" * 5)
247 event = xcffib.xproto.ClientMessageEvent.synthetic(
248 format=32, window=icon.wid, type=self.conn.atoms["_XEMBED"], data=u
249 )
250 self.window.send_event(event)
251
252 offset += step + self.padding
253
254 def finalize(self):
255 base._Widget.finalize(self)
256 atoms = self.conn.atoms
257 self.conn.conn.core.SetSelectionOwner(
258 0,
259 atoms["_NET_SYSTEM_TRAY_S{:d}".format(self.screen)],
260 xcffib.CurrentTime,
261 )
262 self.hide()
263
264 root = self.qtile.core._root.wid
265 for icon in self.tray_icons:
266 self.conn.conn.core.ReparentWindow(icon.window.wid, root, 0, 0)
267 self.conn.conn.flush()
268
269 del self.qtile.windows_map[self.wid]
270 self.conn.conn.core.DestroyWindow(self.wid)
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/widget/systray.py b/libqtile/widget/systray.py
--- a/libqtile/widget/systray.py
+++ b/libqtile/widget/systray.py
@@ -33,6 +33,7 @@
from libqtile import bar
from libqtile.backend.x11 import window
+from libqtile.confreader import ConfigError
from libqtile.widget import base
if TYPE_CHECKING:
@@ -99,6 +100,9 @@
"""
A widget that manages system tray.
+ Only one Systray widget is allowed. Adding additional Systray
+ widgets will result in a ConfigError.
+
.. note::
Icons will not render correctly where the bar/widget is
drawn with a semi-transparent background. Instead, icons
@@ -109,6 +113,8 @@
one.
"""
+ _instances = 0
+
_window_mask = EventMask.StructureNotify | EventMask.Exposure
orientations = base.ORIENTATION_BOTH
@@ -137,6 +143,9 @@
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
+ if Systray._instances > 0:
+ raise ConfigError("Only one Systray can be used.")
+
if self.configured:
return
@@ -185,6 +194,17 @@
)
qtile.core._root.send_event(event, mask=EventMask.StructureNotify)
+ Systray._instances += 1
+
+ def create_mirror(self):
+ """
+ Systray cannot be mirrored as we do not use a Drawer object to render icons.
+
+ Return itself so that, when the bar tries to configure it again, a ConfigError
+ is raised.
+ """
+ return self
+
def handle_ClientMessage(self, event): # noqa: N802
atoms = self.conn.atoms
@@ -268,3 +288,5 @@
del self.qtile.windows_map[self.wid]
self.conn.conn.core.DestroyWindow(self.wid)
+
+ Systray._instances -= 1
| {"golden_diff": "diff --git a/libqtile/widget/systray.py b/libqtile/widget/systray.py\n--- a/libqtile/widget/systray.py\n+++ b/libqtile/widget/systray.py\n@@ -33,6 +33,7 @@\n \n from libqtile import bar\n from libqtile.backend.x11 import window\n+from libqtile.confreader import ConfigError\n from libqtile.widget import base\n \n if TYPE_CHECKING:\n@@ -99,6 +100,9 @@\n \"\"\"\n A widget that manages system tray.\n \n+ Only one Systray widget is allowed. Adding additional Systray\n+ widgets will result in a ConfigError.\n+\n .. note::\n Icons will not render correctly where the bar/widget is\n drawn with a semi-transparent background. Instead, icons\n@@ -109,6 +113,8 @@\n one.\n \"\"\"\n \n+ _instances = 0\n+\n _window_mask = EventMask.StructureNotify | EventMask.Exposure\n \n orientations = base.ORIENTATION_BOTH\n@@ -137,6 +143,9 @@\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n \n+ if Systray._instances > 0:\n+ raise ConfigError(\"Only one Systray can be used.\")\n+\n if self.configured:\n return\n \n@@ -185,6 +194,17 @@\n )\n qtile.core._root.send_event(event, mask=EventMask.StructureNotify)\n \n+ Systray._instances += 1\n+\n+ def create_mirror(self):\n+ \"\"\"\n+ Systray cannot be mirrored as we do not use a Drawer object to render icons.\n+\n+ Return itself so that, when the bar tries to configure it again, a ConfigError\n+ is raised.\n+ \"\"\"\n+ return self\n+\n def handle_ClientMessage(self, event): # noqa: N802\n atoms = self.conn.atoms\n \n@@ -268,3 +288,5 @@\n \n del self.qtile.windows_map[self.wid]\n self.conn.conn.core.DestroyWindow(self.wid)\n+\n+ Systray._instances -= 1\n", "issue": "multiple systray widgets are racy\nThe Distrotube Guy reports via https://www.youtube.com/watch?v=9NO2DJARbU0 that multiple systrays are racy.\r\n\r\nThe spec says: https://specifications.freedesktop.org/systemtray-spec/systemtray-spec-0.1.html#locating\r\n\r\nthat we should use `_NET_SYSETM_TRAY_Sn` for the nth system tray widget, but I think our code calculating this is wrong:\r\n\r\nhttps://github.com/qtile/qtile/blob/master/libqtile/widget/systray.py#L129\r\n\r\nThis code always uses qtile's current screen, instead of the screen that the bar is on. It should at least use the right value for `n`.\r\n\r\nPerhaps instead, we could make multiple Systray() objects be an error, and just have people specify one and let it mirror everywhere.\n", "before_files": [{"content": "# Copyright (c) 2010 Aldo Cortesi\n# Copyright (c) 2010-2011 dequis\n# Copyright (c) 2010, 2012 roger\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2011-2012, 2014 Tycho Andersen\n# Copyright (c) 2012 dmpayton\n# Copyright (c) 2012-2013 Craig Barnes\n# Copyright (c) 2013 hbc\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2014 Sean Vig\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom typing import TYPE_CHECKING\n\nimport xcffib\nfrom xcffib.xproto import ClientMessageData, ClientMessageEvent, EventMask, SetMode\n\nfrom libqtile import bar\nfrom libqtile.backend.x11 import window\nfrom libqtile.widget import base\n\nif TYPE_CHECKING:\n from typing import List, Optional\n\nXEMBED_PROTOCOL_VERSION = 0\n\n\nclass Icon(window._Window):\n _window_mask = EventMask.StructureNotify | EventMask.PropertyChange | EventMask.Exposure\n\n def __init__(self, win, qtile, systray):\n window._Window.__init__(self, win, qtile)\n self.systray = systray\n # win.get_name() may return None when apps provide a temporary window before the icon window\n # we need something in self.name in order to sort icons so we use the window's WID.\n self.name = win.get_name() or str(win.wid)\n self.update_size()\n\n def __eq__(self, other):\n if not isinstance(other, Icon):\n return False\n\n return self.window.wid == other.window.wid\n\n def update_size(self):\n icon_size = self.systray.icon_size\n self.update_hints()\n\n width = self.hints.get(\"min_width\", icon_size)\n height = self.hints.get(\"min_height\", icon_size)\n\n width = max(width, icon_size)\n height = max(height, icon_size)\n\n if height > icon_size:\n width = width * icon_size // height\n height = icon_size\n\n self.width = width\n self.height = height\n return False\n\n def handle_PropertyNotify(self, e): # noqa: N802\n name = self.qtile.core.conn.atoms.get_name(e.atom)\n if name == \"_XEMBED_INFO\":\n info = self.window.get_property(\"_XEMBED_INFO\", unpack=int)\n if info and info[1]:\n self.systray.bar.draw()\n\n return False\n\n def handle_DestroyNotify(self, event): # noqa: N802\n wid = event.window\n icon = self.qtile.windows_map.pop(wid)\n self.systray.tray_icons.remove(icon)\n self.systray.bar.draw()\n return False\n\n handle_UnmapNotify = handle_DestroyNotify # noqa: N815\n\n\nclass Systray(window._Window, base._Widget):\n \"\"\"\n A widget that manages system tray.\n\n .. note::\n Icons will not render correctly where the bar/widget is\n drawn with a semi-transparent background. Instead, icons\n will be drawn with a transparent background.\n\n If using this widget it is therefore recommended to use\n a fully opaque background colour or a fully transparent\n one.\n \"\"\"\n\n _window_mask = EventMask.StructureNotify | EventMask.Exposure\n\n orientations = base.ORIENTATION_BOTH\n\n defaults = [\n (\"icon_size\", 20, \"Icon width\"),\n (\"padding\", 5, \"Padding between icons\"),\n ]\n\n def __init__(self, **config):\n base._Widget.__init__(self, bar.CALCULATED, **config)\n self.add_defaults(Systray.defaults)\n self.tray_icons = []\n self.screen = 0\n self._name = config.get(\"name\", \"systray\")\n self._wm_class: Optional[List[str]] = None\n\n def calculate_length(self):\n if self.bar.horizontal:\n length = sum(i.width for i in self.tray_icons)\n else:\n length = sum(i.height for i in self.tray_icons)\n length += self.padding * len(self.tray_icons)\n return length\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n\n if self.configured:\n return\n\n self.conn = conn = qtile.core.conn\n win = conn.create_window(-1, -1, 1, 1)\n window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)\n qtile.windows_map[win.wid] = self\n\n # window._Window.__init__ overwrites the widget name so we need to restore it\n self.name = self._name\n\n # Even when we have multiple \"Screen\"s, we are setting up as the system\n # tray on a particular X display, that is the screen we need to\n # reference in the atom\n if qtile.current_screen:\n self.screen = qtile.current_screen.index\n self.bar = bar\n atoms = conn.atoms\n\n # We need tray to tell icons which visual to use.\n # This needs to be the same as the bar/widget.\n # This mainly benefits transparent bars.\n conn.conn.core.ChangeProperty(\n xcffib.xproto.PropMode.Replace,\n win.wid,\n atoms[\"_NET_SYSTEM_TRAY_VISUAL\"],\n xcffib.xproto.Atom.VISUALID,\n 32,\n 1,\n [self.drawer._visual.visual_id],\n )\n\n conn.conn.core.SetSelectionOwner(\n win.wid, atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)], xcffib.CurrentTime\n )\n data = [\n xcffib.CurrentTime,\n atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)],\n win.wid,\n 0,\n 0,\n ]\n union = ClientMessageData.synthetic(data, \"I\" * 5)\n event = ClientMessageEvent.synthetic(\n format=32, window=qtile.core._root.wid, type=atoms[\"MANAGER\"], data=union\n )\n qtile.core._root.send_event(event, mask=EventMask.StructureNotify)\n\n def handle_ClientMessage(self, event): # noqa: N802\n atoms = self.conn.atoms\n\n opcode = event.type\n data = event.data.data32\n message = data[1]\n wid = data[2]\n\n parent = self.bar.window.window\n\n if opcode == atoms[\"_NET_SYSTEM_TRAY_OPCODE\"] and message == 0:\n w = window.XWindow(self.conn, wid)\n icon = Icon(w, self.qtile, self)\n if icon not in self.tray_icons:\n self.tray_icons.append(icon)\n self.tray_icons.sort(key=lambda icon: icon.name)\n self.qtile.windows_map[wid] = icon\n\n self.conn.conn.core.ChangeSaveSet(SetMode.Insert, wid)\n self.conn.conn.core.ReparentWindow(wid, parent.wid, 0, 0)\n self.conn.conn.flush()\n\n info = icon.window.get_property(\"_XEMBED_INFO\", unpack=int)\n\n if not info:\n self.bar.draw()\n return False\n\n if info[1]:\n self.bar.draw()\n\n return False\n\n def draw(self):\n offset = self.padding\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length)\n for pos, icon in enumerate(self.tray_icons):\n icon.window.set_attribute(backpixmap=self.drawer.pixmap)\n if self.bar.horizontal:\n xoffset = self.offsetx + offset\n yoffset = self.bar.height // 2 - self.icon_size // 2 + self.offsety\n step = icon.width\n else:\n xoffset = self.bar.width // 2 - self.icon_size // 2 + self.offsetx\n yoffset = self.offsety + offset\n step = icon.height\n\n icon.place(xoffset, yoffset, icon.width, self.icon_size, 0, None)\n if icon.hidden:\n icon.unhide()\n data = [\n self.conn.atoms[\"_XEMBED_EMBEDDED_NOTIFY\"],\n xcffib.xproto.Time.CurrentTime,\n 0,\n self.bar.window.wid,\n XEMBED_PROTOCOL_VERSION,\n ]\n u = xcffib.xproto.ClientMessageData.synthetic(data, \"I\" * 5)\n event = xcffib.xproto.ClientMessageEvent.synthetic(\n format=32, window=icon.wid, type=self.conn.atoms[\"_XEMBED\"], data=u\n )\n self.window.send_event(event)\n\n offset += step + self.padding\n\n def finalize(self):\n base._Widget.finalize(self)\n atoms = self.conn.atoms\n self.conn.conn.core.SetSelectionOwner(\n 0,\n atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)],\n xcffib.CurrentTime,\n )\n self.hide()\n\n root = self.qtile.core._root.wid\n for icon in self.tray_icons:\n self.conn.conn.core.ReparentWindow(icon.window.wid, root, 0, 0)\n self.conn.conn.flush()\n\n del self.qtile.windows_map[self.wid]\n self.conn.conn.core.DestroyWindow(self.wid)\n", "path": "libqtile/widget/systray.py"}], "after_files": [{"content": "# Copyright (c) 2010 Aldo Cortesi\n# Copyright (c) 2010-2011 dequis\n# Copyright (c) 2010, 2012 roger\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2011-2012, 2014 Tycho Andersen\n# Copyright (c) 2012 dmpayton\n# Copyright (c) 2012-2013 Craig Barnes\n# Copyright (c) 2013 hbc\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2014 Sean Vig\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom typing import TYPE_CHECKING\n\nimport xcffib\nfrom xcffib.xproto import ClientMessageData, ClientMessageEvent, EventMask, SetMode\n\nfrom libqtile import bar\nfrom libqtile.backend.x11 import window\nfrom libqtile.confreader import ConfigError\nfrom libqtile.widget import base\n\nif TYPE_CHECKING:\n from typing import List, Optional\n\nXEMBED_PROTOCOL_VERSION = 0\n\n\nclass Icon(window._Window):\n _window_mask = EventMask.StructureNotify | EventMask.PropertyChange | EventMask.Exposure\n\n def __init__(self, win, qtile, systray):\n window._Window.__init__(self, win, qtile)\n self.systray = systray\n # win.get_name() may return None when apps provide a temporary window before the icon window\n # we need something in self.name in order to sort icons so we use the window's WID.\n self.name = win.get_name() or str(win.wid)\n self.update_size()\n\n def __eq__(self, other):\n if not isinstance(other, Icon):\n return False\n\n return self.window.wid == other.window.wid\n\n def update_size(self):\n icon_size = self.systray.icon_size\n self.update_hints()\n\n width = self.hints.get(\"min_width\", icon_size)\n height = self.hints.get(\"min_height\", icon_size)\n\n width = max(width, icon_size)\n height = max(height, icon_size)\n\n if height > icon_size:\n width = width * icon_size // height\n height = icon_size\n\n self.width = width\n self.height = height\n return False\n\n def handle_PropertyNotify(self, e): # noqa: N802\n name = self.qtile.core.conn.atoms.get_name(e.atom)\n if name == \"_XEMBED_INFO\":\n info = self.window.get_property(\"_XEMBED_INFO\", unpack=int)\n if info and info[1]:\n self.systray.bar.draw()\n\n return False\n\n def handle_DestroyNotify(self, event): # noqa: N802\n wid = event.window\n icon = self.qtile.windows_map.pop(wid)\n self.systray.tray_icons.remove(icon)\n self.systray.bar.draw()\n return False\n\n handle_UnmapNotify = handle_DestroyNotify # noqa: N815\n\n\nclass Systray(window._Window, base._Widget):\n \"\"\"\n A widget that manages system tray.\n\n Only one Systray widget is allowed. Adding additional Systray\n widgets will result in a ConfigError.\n\n .. note::\n Icons will not render correctly where the bar/widget is\n drawn with a semi-transparent background. Instead, icons\n will be drawn with a transparent background.\n\n If using this widget it is therefore recommended to use\n a fully opaque background colour or a fully transparent\n one.\n \"\"\"\n\n _instances = 0\n\n _window_mask = EventMask.StructureNotify | EventMask.Exposure\n\n orientations = base.ORIENTATION_BOTH\n\n defaults = [\n (\"icon_size\", 20, \"Icon width\"),\n (\"padding\", 5, \"Padding between icons\"),\n ]\n\n def __init__(self, **config):\n base._Widget.__init__(self, bar.CALCULATED, **config)\n self.add_defaults(Systray.defaults)\n self.tray_icons = []\n self.screen = 0\n self._name = config.get(\"name\", \"systray\")\n self._wm_class: Optional[List[str]] = None\n\n def calculate_length(self):\n if self.bar.horizontal:\n length = sum(i.width for i in self.tray_icons)\n else:\n length = sum(i.height for i in self.tray_icons)\n length += self.padding * len(self.tray_icons)\n return length\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n\n if Systray._instances > 0:\n raise ConfigError(\"Only one Systray can be used.\")\n\n if self.configured:\n return\n\n self.conn = conn = qtile.core.conn\n win = conn.create_window(-1, -1, 1, 1)\n window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)\n qtile.windows_map[win.wid] = self\n\n # window._Window.__init__ overwrites the widget name so we need to restore it\n self.name = self._name\n\n # Even when we have multiple \"Screen\"s, we are setting up as the system\n # tray on a particular X display, that is the screen we need to\n # reference in the atom\n if qtile.current_screen:\n self.screen = qtile.current_screen.index\n self.bar = bar\n atoms = conn.atoms\n\n # We need tray to tell icons which visual to use.\n # This needs to be the same as the bar/widget.\n # This mainly benefits transparent bars.\n conn.conn.core.ChangeProperty(\n xcffib.xproto.PropMode.Replace,\n win.wid,\n atoms[\"_NET_SYSTEM_TRAY_VISUAL\"],\n xcffib.xproto.Atom.VISUALID,\n 32,\n 1,\n [self.drawer._visual.visual_id],\n )\n\n conn.conn.core.SetSelectionOwner(\n win.wid, atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)], xcffib.CurrentTime\n )\n data = [\n xcffib.CurrentTime,\n atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)],\n win.wid,\n 0,\n 0,\n ]\n union = ClientMessageData.synthetic(data, \"I\" * 5)\n event = ClientMessageEvent.synthetic(\n format=32, window=qtile.core._root.wid, type=atoms[\"MANAGER\"], data=union\n )\n qtile.core._root.send_event(event, mask=EventMask.StructureNotify)\n\n Systray._instances += 1\n\n def create_mirror(self):\n \"\"\"\n Systray cannot be mirrored as we do not use a Drawer object to render icons.\n\n Return itself so that, when the bar tries to configure it again, a ConfigError\n is raised.\n \"\"\"\n return self\n\n def handle_ClientMessage(self, event): # noqa: N802\n atoms = self.conn.atoms\n\n opcode = event.type\n data = event.data.data32\n message = data[1]\n wid = data[2]\n\n parent = self.bar.window.window\n\n if opcode == atoms[\"_NET_SYSTEM_TRAY_OPCODE\"] and message == 0:\n w = window.XWindow(self.conn, wid)\n icon = Icon(w, self.qtile, self)\n if icon not in self.tray_icons:\n self.tray_icons.append(icon)\n self.tray_icons.sort(key=lambda icon: icon.name)\n self.qtile.windows_map[wid] = icon\n\n self.conn.conn.core.ChangeSaveSet(SetMode.Insert, wid)\n self.conn.conn.core.ReparentWindow(wid, parent.wid, 0, 0)\n self.conn.conn.flush()\n\n info = icon.window.get_property(\"_XEMBED_INFO\", unpack=int)\n\n if not info:\n self.bar.draw()\n return False\n\n if info[1]:\n self.bar.draw()\n\n return False\n\n def draw(self):\n offset = self.padding\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length)\n for pos, icon in enumerate(self.tray_icons):\n icon.window.set_attribute(backpixmap=self.drawer.pixmap)\n if self.bar.horizontal:\n xoffset = self.offsetx + offset\n yoffset = self.bar.height // 2 - self.icon_size // 2 + self.offsety\n step = icon.width\n else:\n xoffset = self.bar.width // 2 - self.icon_size // 2 + self.offsetx\n yoffset = self.offsety + offset\n step = icon.height\n\n icon.place(xoffset, yoffset, icon.width, self.icon_size, 0, None)\n if icon.hidden:\n icon.unhide()\n data = [\n self.conn.atoms[\"_XEMBED_EMBEDDED_NOTIFY\"],\n xcffib.xproto.Time.CurrentTime,\n 0,\n self.bar.window.wid,\n XEMBED_PROTOCOL_VERSION,\n ]\n u = xcffib.xproto.ClientMessageData.synthetic(data, \"I\" * 5)\n event = xcffib.xproto.ClientMessageEvent.synthetic(\n format=32, window=icon.wid, type=self.conn.atoms[\"_XEMBED\"], data=u\n )\n self.window.send_event(event)\n\n offset += step + self.padding\n\n def finalize(self):\n base._Widget.finalize(self)\n atoms = self.conn.atoms\n self.conn.conn.core.SetSelectionOwner(\n 0,\n atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)],\n xcffib.CurrentTime,\n )\n self.hide()\n\n root = self.qtile.core._root.wid\n for icon in self.tray_icons:\n self.conn.conn.core.ReparentWindow(icon.window.wid, root, 0, 0)\n self.conn.conn.flush()\n\n del self.qtile.windows_map[self.wid]\n self.conn.conn.core.DestroyWindow(self.wid)\n\n Systray._instances -= 1\n", "path": "libqtile/widget/systray.py"}]} | 3,491 | 500 |
gh_patches_debug_64261 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-574 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[INF] Allow `import_message()` to be Python distribution flexible
# Brief Description
<!-- Please provide a brief description of what you'd like to propose. -->
Currently, if a user attempts to user a feature of an optional external package (`rdkit`, `biopython`, `unyt`, `pyspark`) which is not installed, the user receives an error that directs them on how to install it. The error message is displayed by `import_message()` which passes instructions on how to install it. Ex:
```
To use the janitor submodule spark, you need to install pyspark.
To do so, use the following command:
conda install -c conda-forge pyspark
```
With the exception of `rdkit`, I think all of these packages are `pip` installable. It would be nice if this message could decide whether to provide `conda` vs `pip` instructions to the user. Or tell them that the package can only be installed with `conda`.
This is how the function is currently called:
```python
import_message(submodule="spark", package="pyspark",
installation="conda install -c conda-forge pyspark")
```
Not all `conda` installs will use the same channel. One option is to provide both `conda` and `pip` instructions as arguments in the call, and it figures out which to send to the user. If either are `None`, then it is understood to be `pip` or `conda` only.
# Example API
One verbose option would be to extend what currently exists:
```python
import_message(submodule="spark", package="pyspark",
conda_installation="conda install -c conda-forge pyspark",
pip_installation="pip install pyspark")
```
A more succinct version could be:
```python
import_message(submodule="spark", package="pyspark",
conda_channel="conda-forge", pip_install=True)
```
which would use the provided `package` argument, and `conda_channel` could be `None` if it doesn't exist on `conda`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `janitor/spark/functions.py`
Content:
```
1 """
2 General purpose data cleaning functions for pyspark.
3 """
4
5 import re
6
7 from .. import functions as janitor_func
8 from .. import utils as janitor_utils
9 from . import backend
10
11 try:
12 from pyspark.sql import DataFrame
13 except ImportError:
14 import_message(
15 submodule="spark",
16 package="pyspark",
17 conda_channel="conda-forge",
18 pip_install=True,
19 )
20
21
22 @backend.register_dataframe_method
23 def clean_names(
24 df: DataFrame,
25 case_type: str = "lower",
26 remove_special: bool = False,
27 strip_underscores: str = None,
28 ) -> DataFrame:
29 """
30 Clean column names for pyspark dataframe.
31
32 Takes all column names, converts them to lowercase, then replaces all
33 spaces with underscores.
34
35 This method does not mutate the original DataFrame.
36
37 Functional usage example:
38
39 .. code-block:: python
40
41 df = clean_names(df)
42
43 Method chaining example:
44
45 .. code-block:: python
46
47 from pyspark.sql import DataFrame
48 import janitor.spark
49 df = DataFrame(...).clean_names()
50
51 :Example of transformation:
52
53 .. code-block:: python
54
55 Columns before: First Name, Last Name, Employee Status, Subject
56 Columns after: first_name, last_name, employee_status, subject
57
58 :param df: Spark DataFrame object.
59 :param strip_underscores: (optional) Removes the outer underscores from all
60 column names. Default None keeps outer underscores. Values can be
61 either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'
62 and True.
63 :param case_type: (optional) Whether to make columns lower or uppercase.
64 Current case may be preserved with 'preserve',
65 while snake case conversion (from CamelCase or camelCase only)
66 can be turned on using "snake".
67 Default 'lower' makes all characters lowercase.
68 :param remove_special: (optional) Remove special characters from columns.
69 Only letters, numbers and underscores are preserved.
70 :returns: A Spark DataFrame.
71 """
72
73 cols = df.columns
74
75 cols = [janitor_func._change_case(col, case_type) for col in cols]
76
77 cols = [janitor_func._normalize_1(col) for col in cols]
78
79 if remove_special:
80 cols = [janitor_func._remove_special(col) for col in cols]
81
82 cols = [re.sub("_+", "_", col) for col in cols]
83
84 cols = [
85 janitor_utils._strip_underscores_func(col, strip_underscores)
86 for col in cols
87 ]
88
89 cols = [
90 f"`{col}` AS `{new_col}`" for col, new_col in zip(df.columns, cols)
91 ]
92
93 return df.selectExpr(*cols)
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/janitor/spark/functions.py b/janitor/spark/functions.py
--- a/janitor/spark/functions.py
+++ b/janitor/spark/functions.py
@@ -11,7 +11,7 @@
try:
from pyspark.sql import DataFrame
except ImportError:
- import_message(
+ janitor_utils.import_message(
submodule="spark",
package="pyspark",
conda_channel="conda-forge",
| {"golden_diff": "diff --git a/janitor/spark/functions.py b/janitor/spark/functions.py\n--- a/janitor/spark/functions.py\n+++ b/janitor/spark/functions.py\n@@ -11,7 +11,7 @@\n try:\n from pyspark.sql import DataFrame\n except ImportError:\n- import_message(\n+ janitor_utils.import_message(\n submodule=\"spark\",\n package=\"pyspark\",\n conda_channel=\"conda-forge\",\n", "issue": "[INF] Allow `import_message()` to be Python distribution flexible\n# Brief Description\r\n\r\n<!-- Please provide a brief description of what you'd like to propose. -->\r\n\r\nCurrently, if a user attempts to user a feature of an optional external package (`rdkit`, `biopython`, `unyt`, `pyspark`) which is not installed, the user receives an error that directs them on how to install it. The error message is displayed by `import_message()` which passes instructions on how to install it. Ex:\r\n```\r\nTo use the janitor submodule spark, you need to install pyspark.\r\nTo do so, use the following command:\r\n conda install -c conda-forge pyspark\r\n```\r\nWith the exception of `rdkit`, I think all of these packages are `pip` installable. It would be nice if this message could decide whether to provide `conda` vs `pip` instructions to the user. Or tell them that the package can only be installed with `conda`. \r\n\r\nThis is how the function is currently called:\r\n```python\r\nimport_message(submodule=\"spark\", package=\"pyspark\", \r\n installation=\"conda install -c conda-forge pyspark\")\r\n```\r\n\r\nNot all `conda` installs will use the same channel. One option is to provide both `conda` and `pip` instructions as arguments in the call, and it figures out which to send to the user. If either are `None`, then it is understood to be `pip` or `conda` only.\r\n\r\n# Example API\r\n\r\nOne verbose option would be to extend what currently exists:\r\n```python\r\nimport_message(submodule=\"spark\", package=\"pyspark\", \r\n conda_installation=\"conda install -c conda-forge pyspark\", \r\n pip_installation=\"pip install pyspark\")\r\n```\r\n\r\nA more succinct version could be:\r\n```python\r\nimport_message(submodule=\"spark\", package=\"pyspark\", \r\n conda_channel=\"conda-forge\", pip_install=True)\r\n```\r\nwhich would use the provided `package` argument, and `conda_channel` could be `None` if it doesn't exist on `conda`.\n", "before_files": [{"content": "\"\"\"\nGeneral purpose data cleaning functions for pyspark.\n\"\"\"\n\nimport re\n\nfrom .. import functions as janitor_func\nfrom .. import utils as janitor_utils\nfrom . import backend\n\ntry:\n from pyspark.sql import DataFrame\nexcept ImportError:\n import_message(\n submodule=\"spark\",\n package=\"pyspark\",\n conda_channel=\"conda-forge\",\n pip_install=True,\n )\n\n\[email protected]_dataframe_method\ndef clean_names(\n df: DataFrame,\n case_type: str = \"lower\",\n remove_special: bool = False,\n strip_underscores: str = None,\n) -> DataFrame:\n \"\"\"\n Clean column names for pyspark dataframe.\n\n Takes all column names, converts them to lowercase, then replaces all\n spaces with underscores.\n\n This method does not mutate the original DataFrame.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = clean_names(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n from pyspark.sql import DataFrame\n import janitor.spark\n df = DataFrame(...).clean_names()\n\n :Example of transformation:\n\n .. code-block:: python\n\n Columns before: First Name, Last Name, Employee Status, Subject\n Columns after: first_name, last_name, employee_status, subject\n\n :param df: Spark DataFrame object.\n :param strip_underscores: (optional) Removes the outer underscores from all\n column names. Default None keeps outer underscores. Values can be\n either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'\n and True.\n :param case_type: (optional) Whether to make columns lower or uppercase.\n Current case may be preserved with 'preserve',\n while snake case conversion (from CamelCase or camelCase only)\n can be turned on using \"snake\".\n Default 'lower' makes all characters lowercase.\n :param remove_special: (optional) Remove special characters from columns.\n Only letters, numbers and underscores are preserved.\n :returns: A Spark DataFrame.\n \"\"\"\n\n cols = df.columns\n\n cols = [janitor_func._change_case(col, case_type) for col in cols]\n\n cols = [janitor_func._normalize_1(col) for col in cols]\n\n if remove_special:\n cols = [janitor_func._remove_special(col) for col in cols]\n\n cols = [re.sub(\"_+\", \"_\", col) for col in cols]\n\n cols = [\n janitor_utils._strip_underscores_func(col, strip_underscores)\n for col in cols\n ]\n\n cols = [\n f\"`{col}` AS `{new_col}`\" for col, new_col in zip(df.columns, cols)\n ]\n\n return df.selectExpr(*cols)\n", "path": "janitor/spark/functions.py"}], "after_files": [{"content": "\"\"\"\nGeneral purpose data cleaning functions for pyspark.\n\"\"\"\n\nimport re\n\nfrom .. import functions as janitor_func\nfrom .. import utils as janitor_utils\nfrom . import backend\n\ntry:\n from pyspark.sql import DataFrame\nexcept ImportError:\n janitor_utils.import_message(\n submodule=\"spark\",\n package=\"pyspark\",\n conda_channel=\"conda-forge\",\n pip_install=True,\n )\n\n\[email protected]_dataframe_method\ndef clean_names(\n df: DataFrame,\n case_type: str = \"lower\",\n remove_special: bool = False,\n strip_underscores: str = None,\n) -> DataFrame:\n \"\"\"\n Clean column names for pyspark dataframe.\n\n Takes all column names, converts them to lowercase, then replaces all\n spaces with underscores.\n\n This method does not mutate the original DataFrame.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = clean_names(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n from pyspark.sql import DataFrame\n import janitor.spark\n df = DataFrame(...).clean_names()\n\n :Example of transformation:\n\n .. code-block:: python\n\n Columns before: First Name, Last Name, Employee Status, Subject\n Columns after: first_name, last_name, employee_status, subject\n\n :param df: Spark DataFrame object.\n :param strip_underscores: (optional) Removes the outer underscores from all\n column names. Default None keeps outer underscores. Values can be\n either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'\n and True.\n :param case_type: (optional) Whether to make columns lower or uppercase.\n Current case may be preserved with 'preserve',\n while snake case conversion (from CamelCase or camelCase only)\n can be turned on using \"snake\".\n Default 'lower' makes all characters lowercase.\n :param remove_special: (optional) Remove special characters from columns.\n Only letters, numbers and underscores are preserved.\n :returns: A Spark DataFrame.\n \"\"\"\n\n cols = df.columns\n\n cols = [janitor_func._change_case(col, case_type) for col in cols]\n\n cols = [janitor_func._normalize_1(col) for col in cols]\n\n if remove_special:\n cols = [janitor_func._remove_special(col) for col in cols]\n\n cols = [re.sub(\"_+\", \"_\", col) for col in cols]\n\n cols = [\n janitor_utils._strip_underscores_func(col, strip_underscores)\n for col in cols\n ]\n\n cols = [\n f\"`{col}` AS `{new_col}`\" for col, new_col in zip(df.columns, cols)\n ]\n\n return df.selectExpr(*cols)\n", "path": "janitor/spark/functions.py"}]} | 1,485 | 99 |
gh_patches_debug_737 | rasdani/github-patches | git_diff | graspologic-org__graspologic-176 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
change semipar and nonpar names?
What do people think? @jovo brought up that the current names are uninformative. I agree, but don't really have a strong opinion on it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `graspy/inference/__init__.py`
Content:
```
1 from .semipar import SemiparametricTest
2 from .nonpar import NonparametricTest
3
4 __all__ = ["SemiparametricTest", "NonparametricTest"]
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/graspy/inference/__init__.py b/graspy/inference/__init__.py
--- a/graspy/inference/__init__.py
+++ b/graspy/inference/__init__.py
@@ -1,4 +1,4 @@
-from .semipar import SemiparametricTest
-from .nonpar import NonparametricTest
+from .latent_position_test import LatentPositionTest
+from .latent_distribution_test import LatentDistributionTest
-__all__ = ["SemiparametricTest", "NonparametricTest"]
+__all__ = ["LatentPositionTest", "LatentDistributionTest"]
| {"golden_diff": "diff --git a/graspy/inference/__init__.py b/graspy/inference/__init__.py\n--- a/graspy/inference/__init__.py\n+++ b/graspy/inference/__init__.py\n@@ -1,4 +1,4 @@\n-from .semipar import SemiparametricTest\n-from .nonpar import NonparametricTest\n+from .latent_position_test import LatentPositionTest\n+from .latent_distribution_test import LatentDistributionTest\n \n-__all__ = [\"SemiparametricTest\", \"NonparametricTest\"]\n+__all__ = [\"LatentPositionTest\", \"LatentDistributionTest\"]\n", "issue": "change semipar and nonpar names?\nWhat do people think? @jovo brought up that the current names are uninformative. I agree, but don't really have a strong opinion on it \n", "before_files": [{"content": "from .semipar import SemiparametricTest\nfrom .nonpar import NonparametricTest\n\n__all__ = [\"SemiparametricTest\", \"NonparametricTest\"]\n", "path": "graspy/inference/__init__.py"}], "after_files": [{"content": "from .latent_position_test import LatentPositionTest\nfrom .latent_distribution_test import LatentDistributionTest\n\n__all__ = [\"LatentPositionTest\", \"LatentDistributionTest\"]\n", "path": "graspy/inference/__init__.py"}]} | 350 | 139 |
gh_patches_debug_57590 | rasdani/github-patches | git_diff | joke2k__faker-305 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fail to run the tests
Another problem I have when building the RPM is when I try to run the tests. This is in a minimal build environment, so maybe some library is missing. I have tried to use PYTHONPATH like in issue #291 but with no success.
Could you help me?
```
+ /usr/bin/python2 setup.py test
running test
running egg_info
writing requirements to fake_factory.egg-info/requires.txt
writing fake_factory.egg-info/PKG-INFO
writing top-level names to fake_factory.egg-info/top_level.txt
writing dependency_links to fake_factory.egg-info/dependency_links.txt
writing entry points to fake_factory.egg-info/entry_points.txt
reading manifest file 'fake_factory.egg-info/SOURCES.txt'
reading manifest template 'MANIFEST.in'
warning: no files found matching '*.md'
writing manifest file 'fake_factory.egg-info/SOURCES.txt'
running build_ext
Traceback (most recent call last):
File "setup.py", line 58, in <module>
':python_version=="3.0"': ['importlib'],
File "/usr/lib64/python2.7/distutils/core.py", line 151, in setup
dist.run_commands()
File "/usr/lib64/python2.7/distutils/dist.py", line 953, in run_commands
self.run_command(cmd)
File "/usr/lib64/python2.7/distutils/dist.py", line 972, in run_command
cmd_obj.run()
File "/usr/lib/python2.7/site-packages/setuptools/command/test.py", line 142, in run
self.with_project_on_sys_path(self.run_tests)
File "/usr/lib/python2.7/site-packages/setuptools/command/test.py", line 122, in with_project_on_sys_path
func()
File "/usr/lib/python2.7/site-packages/setuptools/command/test.py", line 163, in run_tests
testRunner=self._resolve_as_ep(self.test_runner),
File "/usr/lib64/python2.7/unittest/main.py", line 94, in __init__
self.parseArgs(argv)
File "/usr/lib64/python2.7/unittest/main.py", line 149, in parseArgs
self.createTests()
File "/usr/lib64/python2.7/unittest/main.py", line 158, in createTests
self.module)
File "/usr/lib64/python2.7/unittest/loader.py", line 130, in loadTestsFromNames
suites = [self.loadTestsFromName(name, module) for name in names]
File "/usr/lib64/python2.7/unittest/loader.py", line 100, in loadTestsFromName
parent, obj = obj, getattr(obj, part)
AttributeError: 'module' object has no attribute 'tests'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # coding=utf-8
3
4 import os
5 import io
6 from setuptools import setup, find_packages
7
8 here = os.path.abspath(os.path.dirname(__file__))
9 README = io.open(os.path.join(here, 'README.rst'), encoding="utf8").read()
10 NEWS = io.open(os.path.join(here, 'CHANGELOG.rst'), encoding="utf8").read()
11
12
13 version = '0.5.3'
14
15 # this module can be zip-safe if the zipimporter implements iter_modules or if
16 # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
17 try:
18 import pkgutil
19 import zipimport
20 zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
21 zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
22 except (ImportError, AttributeError):
23 zip_safe = False
24
25 setup(
26 name='fake-factory',
27 version=version,
28 description="Faker is a Python package that generates fake data for you.",
29 long_description=README + '\n\n' + NEWS,
30 entry_points={
31 'console_scripts': ['faker=faker.cli:execute_from_command_line'],
32 },
33 classifiers=[
34 # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
35 'Development Status :: 3 - Alpha',
36 'Environment :: Console',
37 'Intended Audience :: Developers',
38 'Programming Language :: Python',
39 'Programming Language :: Python :: 2',
40 'Programming Language :: Python :: 3',
41 'Programming Language :: Python :: 3.4',
42 'Topic :: Software Development :: Libraries :: Python Modules',
43 'Topic :: Software Development :: Testing',
44 'Topic :: Utilities',
45 'License :: OSI Approved :: MIT License'
46 ],
47 keywords='faker fixtures data test mock generator',
48 author='joke2k',
49 author_email='[email protected]',
50 url='http://github.com/joke2k/faker',
51 license='MIT License',
52 packages=find_packages(exclude=['*.tests']),
53 platforms=["any"],
54 test_suite='faker.tests',
55 zip_safe=zip_safe,
56 extras_require={
57 ':python_version=="2.6"': ['importlib'],
58 ':python_version=="3.0"': ['importlib'],
59 }
60 )
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -49,7 +49,7 @@
author_email='[email protected]',
url='http://github.com/joke2k/faker',
license='MIT License',
- packages=find_packages(exclude=['*.tests']),
+ packages=find_packages(),
platforms=["any"],
test_suite='faker.tests',
zip_safe=zip_safe,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -49,7 +49,7 @@\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n- packages=find_packages(exclude=['*.tests']),\n+ packages=find_packages(),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n", "issue": "Fail to run the tests\nAnother problem I have when building the RPM is when I try to run the tests. This is in a minimal build environment, so maybe some library is missing. I have tried to use PYTHONPATH like in issue #291 but with no success.\n\nCould you help me?\n\n```\n+ /usr/bin/python2 setup.py test\nrunning test\nrunning egg_info\nwriting requirements to fake_factory.egg-info/requires.txt\nwriting fake_factory.egg-info/PKG-INFO\nwriting top-level names to fake_factory.egg-info/top_level.txt\nwriting dependency_links to fake_factory.egg-info/dependency_links.txt\nwriting entry points to fake_factory.egg-info/entry_points.txt\nreading manifest file 'fake_factory.egg-info/SOURCES.txt'\nreading manifest template 'MANIFEST.in'\nwarning: no files found matching '*.md'\nwriting manifest file 'fake_factory.egg-info/SOURCES.txt'\nrunning build_ext\nTraceback (most recent call last):\n File \"setup.py\", line 58, in <module>\n ':python_version==\"3.0\"': ['importlib'],\n File \"/usr/lib64/python2.7/distutils/core.py\", line 151, in setup\n dist.run_commands()\n File \"/usr/lib64/python2.7/distutils/dist.py\", line 953, in run_commands\n self.run_command(cmd)\n File \"/usr/lib64/python2.7/distutils/dist.py\", line 972, in run_command\n cmd_obj.run()\n File \"/usr/lib/python2.7/site-packages/setuptools/command/test.py\", line 142, in run\n self.with_project_on_sys_path(self.run_tests)\n File \"/usr/lib/python2.7/site-packages/setuptools/command/test.py\", line 122, in with_project_on_sys_path\n func()\n File \"/usr/lib/python2.7/site-packages/setuptools/command/test.py\", line 163, in run_tests\n testRunner=self._resolve_as_ep(self.test_runner),\n File \"/usr/lib64/python2.7/unittest/main.py\", line 94, in __init__\n self.parseArgs(argv)\n File \"/usr/lib64/python2.7/unittest/main.py\", line 149, in parseArgs\n self.createTests()\n File \"/usr/lib64/python2.7/unittest/main.py\", line 158, in createTests\n self.module)\n File \"/usr/lib64/python2.7/unittest/loader.py\", line 130, in loadTestsFromNames\n suites = [self.loadTestsFromName(name, module) for name in names]\n File \"/usr/lib64/python2.7/unittest/loader.py\", line 100, in loadTestsFromName\n parent, obj = obj, getattr(obj, part)\nAttributeError: 'module' object has no attribute 'tests'\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport io\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.rst'), encoding=\"utf8\").read()\nNEWS = io.open(os.path.join(here, 'CHANGELOG.rst'), encoding=\"utf8\").read()\n\n\nversion = '0.5.3'\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='fake-factory',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=['*.tests']),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n extras_require={\n ':python_version==\"2.6\"': ['importlib'],\n ':python_version==\"3.0\"': ['importlib'],\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport io\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.rst'), encoding=\"utf8\").read()\nNEWS = io.open(os.path.join(here, 'CHANGELOG.rst'), encoding=\"utf8\").read()\n\n\nversion = '0.5.3'\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='fake-factory',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n extras_require={\n ':python_version==\"2.6\"': ['importlib'],\n ':python_version==\"3.0\"': ['importlib'],\n }\n)\n", "path": "setup.py"}]} | 1,488 | 99 |
gh_patches_debug_54121 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-1439 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
System metrics instrumentation not working with custom defined configuration
System metric instrumentation is not functional if configuration on which metrics to be exported is explicitly provided. As a minimal example, this code
```python
from opentelemetry.metrics import set_meter_provider
from opentelemetry.instrumentation.system_metrics import SystemMetricsInstrumentor
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import (
ConsoleMetricExporter,
PeriodicExportingMetricReader,
)
exporter = ConsoleMetricExporter()
set_meter_provider(MeterProvider([PeriodicExportingMetricReader(exporter)]))
configuration = {
"runtime.memory": ["rss", "vms"],
"runtime.cpu.time": ["user", "system"],
}
SystemMetricsInstrumentor(config=configuration).instrument()
```
results in
```
Traceback (most recent call last):
File ".../test.py", line 15, in <module>
SystemMetricsInstrumentor(config=configuration).instrument()
File ".../lib/python3.10/site-packages/opentelemetry/instrumentation/instrumentor.py", line 51, in __new__
cls._instance = object.__new__(cls, *args, **kwargs)
TypeError: object.__new__() takes exactly one argument (the type to instantiate)
```
I am happy to look into fixing this. Removing `*args` and `**kwargs` in `opentelemetry/instrumentation/instrumentor.py:51` actually solves the issue here but I'd like to understand the implications as this implies changing the interface class.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # type: ignore
15
16 """
17 OpenTelemetry Base Instrumentor
18 """
19
20 from abc import ABC, abstractmethod
21 from logging import getLogger
22 from typing import Collection, Optional
23
24 from opentelemetry.instrumentation.dependencies import (
25 DependencyConflict,
26 get_dependency_conflicts,
27 )
28
29 _LOG = getLogger(__name__)
30
31
32 class BaseInstrumentor(ABC):
33 """An ABC for instrumentors
34
35 Child classes of this ABC should instrument specific third
36 party libraries or frameworks either by using the
37 ``opentelemetry-instrument`` command or by calling their methods
38 directly.
39
40 Since every third party library or framework is different and has different
41 instrumentation needs, more methods can be added to the child classes as
42 needed to provide practical instrumentation to the end user.
43 """
44
45 _instance = None
46 _is_instrumented_by_opentelemetry = False
47
48 def __new__(cls, *args, **kwargs):
49 if cls._instance is None:
50 cls._instance = object.__new__(cls, *args, **kwargs)
51
52 return cls._instance
53
54 @property
55 def is_instrumented_by_opentelemetry(self):
56 return self._is_instrumented_by_opentelemetry
57
58 @abstractmethod
59 def instrumentation_dependencies(self) -> Collection[str]:
60 """Return a list of python packages with versions that the will be instrumented.
61
62 The format should be the same as used in requirements.txt or pyproject.toml.
63
64 For example, if an instrumentation instruments requests 1.x, this method should look
65 like:
66
67 def instrumentation_dependencies(self) -> Collection[str]:
68 return ['requests ~= 1.0']
69
70 This will ensure that the instrumentation will only be used when the specified library
71 is present in the environment.
72 """
73
74 def _instrument(self, **kwargs):
75 """Instrument the library"""
76
77 @abstractmethod
78 def _uninstrument(self, **kwargs):
79 """Uninstrument the library"""
80
81 def _check_dependency_conflicts(self) -> Optional[DependencyConflict]:
82 dependencies = self.instrumentation_dependencies()
83 return get_dependency_conflicts(dependencies)
84
85 def instrument(self, **kwargs):
86 """Instrument the library
87
88 This method will be called without any optional arguments by the
89 ``opentelemetry-instrument`` command.
90
91 This means that calling this method directly without passing any
92 optional values should do the very same thing that the
93 ``opentelemetry-instrument`` command does.
94 """
95
96 if self._is_instrumented_by_opentelemetry:
97 _LOG.warning("Attempting to instrument while already instrumented")
98 return None
99
100 # check if instrumentor has any missing or conflicting dependencies
101 skip_dep_check = kwargs.pop("skip_dep_check", False)
102 if not skip_dep_check:
103 conflict = self._check_dependency_conflicts()
104 if conflict:
105 _LOG.error(conflict)
106 return None
107
108 result = self._instrument( # pylint: disable=assignment-from-no-return
109 **kwargs
110 )
111 self._is_instrumented_by_opentelemetry = True
112 return result
113
114 def uninstrument(self, **kwargs):
115 """Uninstrument the library
116
117 See ``BaseInstrumentor.instrument`` for more information regarding the
118 usage of ``kwargs``.
119 """
120
121 if self._is_instrumented_by_opentelemetry:
122 result = self._uninstrument(**kwargs)
123 self._is_instrumented_by_opentelemetry = False
124 return result
125
126 _LOG.warning("Attempting to uninstrument while already uninstrumented")
127
128 return None
129
130
131 __all__ = ["BaseInstrumentor"]
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py
--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py
+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py
@@ -47,7 +47,7 @@
def __new__(cls, *args, **kwargs):
if cls._instance is None:
- cls._instance = object.__new__(cls, *args, **kwargs)
+ cls._instance = object.__new__(cls)
return cls._instance
| {"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py\n@@ -47,7 +47,7 @@\n \n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n- cls._instance = object.__new__(cls, *args, **kwargs)\n+ cls._instance = object.__new__(cls)\n \n return cls._instance\n", "issue": "System metrics instrumentation not working with custom defined configuration\nSystem metric instrumentation is not functional if configuration on which metrics to be exported is explicitly provided. As a minimal example, this code\r\n\r\n```python\r\nfrom opentelemetry.metrics import set_meter_provider\r\nfrom opentelemetry.instrumentation.system_metrics import SystemMetricsInstrumentor\r\nfrom opentelemetry.sdk.metrics import MeterProvider\r\nfrom opentelemetry.sdk.metrics.export import (\r\n ConsoleMetricExporter,\r\n PeriodicExportingMetricReader,\r\n)\r\n\r\nexporter = ConsoleMetricExporter()\r\nset_meter_provider(MeterProvider([PeriodicExportingMetricReader(exporter)]))\r\n\r\nconfiguration = {\r\n \"runtime.memory\": [\"rss\", \"vms\"],\r\n \"runtime.cpu.time\": [\"user\", \"system\"],\r\n}\r\n\r\nSystemMetricsInstrumentor(config=configuration).instrument()\r\n```\r\n\r\nresults in\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \".../test.py\", line 15, in <module>\r\n SystemMetricsInstrumentor(config=configuration).instrument()\r\n File \".../lib/python3.10/site-packages/opentelemetry/instrumentation/instrumentor.py\", line 51, in __new__\r\n cls._instance = object.__new__(cls, *args, **kwargs)\r\nTypeError: object.__new__() takes exactly one argument (the type to instantiate)\r\n```\r\n\r\nI am happy to look into fixing this. Removing `*args` and `**kwargs` in `opentelemetry/instrumentation/instrumentor.py:51` actually solves the issue here but I'd like to understand the implications as this implies changing the interface class.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# type: ignore\n\n\"\"\"\nOpenTelemetry Base Instrumentor\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom logging import getLogger\nfrom typing import Collection, Optional\n\nfrom opentelemetry.instrumentation.dependencies import (\n DependencyConflict,\n get_dependency_conflicts,\n)\n\n_LOG = getLogger(__name__)\n\n\nclass BaseInstrumentor(ABC):\n \"\"\"An ABC for instrumentors\n\n Child classes of this ABC should instrument specific third\n party libraries or frameworks either by using the\n ``opentelemetry-instrument`` command or by calling their methods\n directly.\n\n Since every third party library or framework is different and has different\n instrumentation needs, more methods can be added to the child classes as\n needed to provide practical instrumentation to the end user.\n \"\"\"\n\n _instance = None\n _is_instrumented_by_opentelemetry = False\n\n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = object.__new__(cls, *args, **kwargs)\n\n return cls._instance\n\n @property\n def is_instrumented_by_opentelemetry(self):\n return self._is_instrumented_by_opentelemetry\n\n @abstractmethod\n def instrumentation_dependencies(self) -> Collection[str]:\n \"\"\"Return a list of python packages with versions that the will be instrumented.\n\n The format should be the same as used in requirements.txt or pyproject.toml.\n\n For example, if an instrumentation instruments requests 1.x, this method should look\n like:\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return ['requests ~= 1.0']\n\n This will ensure that the instrumentation will only be used when the specified library\n is present in the environment.\n \"\"\"\n\n def _instrument(self, **kwargs):\n \"\"\"Instrument the library\"\"\"\n\n @abstractmethod\n def _uninstrument(self, **kwargs):\n \"\"\"Uninstrument the library\"\"\"\n\n def _check_dependency_conflicts(self) -> Optional[DependencyConflict]:\n dependencies = self.instrumentation_dependencies()\n return get_dependency_conflicts(dependencies)\n\n def instrument(self, **kwargs):\n \"\"\"Instrument the library\n\n This method will be called without any optional arguments by the\n ``opentelemetry-instrument`` command.\n\n This means that calling this method directly without passing any\n optional values should do the very same thing that the\n ``opentelemetry-instrument`` command does.\n \"\"\"\n\n if self._is_instrumented_by_opentelemetry:\n _LOG.warning(\"Attempting to instrument while already instrumented\")\n return None\n\n # check if instrumentor has any missing or conflicting dependencies\n skip_dep_check = kwargs.pop(\"skip_dep_check\", False)\n if not skip_dep_check:\n conflict = self._check_dependency_conflicts()\n if conflict:\n _LOG.error(conflict)\n return None\n\n result = self._instrument( # pylint: disable=assignment-from-no-return\n **kwargs\n )\n self._is_instrumented_by_opentelemetry = True\n return result\n\n def uninstrument(self, **kwargs):\n \"\"\"Uninstrument the library\n\n See ``BaseInstrumentor.instrument`` for more information regarding the\n usage of ``kwargs``.\n \"\"\"\n\n if self._is_instrumented_by_opentelemetry:\n result = self._uninstrument(**kwargs)\n self._is_instrumented_by_opentelemetry = False\n return result\n\n _LOG.warning(\"Attempting to uninstrument while already uninstrumented\")\n\n return None\n\n\n__all__ = [\"BaseInstrumentor\"]\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# type: ignore\n\n\"\"\"\nOpenTelemetry Base Instrumentor\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom logging import getLogger\nfrom typing import Collection, Optional\n\nfrom opentelemetry.instrumentation.dependencies import (\n DependencyConflict,\n get_dependency_conflicts,\n)\n\n_LOG = getLogger(__name__)\n\n\nclass BaseInstrumentor(ABC):\n \"\"\"An ABC for instrumentors\n\n Child classes of this ABC should instrument specific third\n party libraries or frameworks either by using the\n ``opentelemetry-instrument`` command or by calling their methods\n directly.\n\n Since every third party library or framework is different and has different\n instrumentation needs, more methods can be added to the child classes as\n needed to provide practical instrumentation to the end user.\n \"\"\"\n\n _instance = None\n _is_instrumented_by_opentelemetry = False\n\n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = object.__new__(cls)\n\n return cls._instance\n\n @property\n def is_instrumented_by_opentelemetry(self):\n return self._is_instrumented_by_opentelemetry\n\n @abstractmethod\n def instrumentation_dependencies(self) -> Collection[str]:\n \"\"\"Return a list of python packages with versions that the will be instrumented.\n\n The format should be the same as used in requirements.txt or pyproject.toml.\n\n For example, if an instrumentation instruments requests 1.x, this method should look\n like:\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return ['requests ~= 1.0']\n\n This will ensure that the instrumentation will only be used when the specified library\n is present in the environment.\n \"\"\"\n\n def _instrument(self, **kwargs):\n \"\"\"Instrument the library\"\"\"\n\n @abstractmethod\n def _uninstrument(self, **kwargs):\n \"\"\"Uninstrument the library\"\"\"\n\n def _check_dependency_conflicts(self) -> Optional[DependencyConflict]:\n dependencies = self.instrumentation_dependencies()\n return get_dependency_conflicts(dependencies)\n\n def instrument(self, **kwargs):\n \"\"\"Instrument the library\n\n This method will be called without any optional arguments by the\n ``opentelemetry-instrument`` command.\n\n This means that calling this method directly without passing any\n optional values should do the very same thing that the\n ``opentelemetry-instrument`` command does.\n \"\"\"\n\n if self._is_instrumented_by_opentelemetry:\n _LOG.warning(\"Attempting to instrument while already instrumented\")\n return None\n\n # check if instrumentor has any missing or conflicting dependencies\n skip_dep_check = kwargs.pop(\"skip_dep_check\", False)\n if not skip_dep_check:\n conflict = self._check_dependency_conflicts()\n if conflict:\n _LOG.error(conflict)\n return None\n\n result = self._instrument( # pylint: disable=assignment-from-no-return\n **kwargs\n )\n self._is_instrumented_by_opentelemetry = True\n return result\n\n def uninstrument(self, **kwargs):\n \"\"\"Uninstrument the library\n\n See ``BaseInstrumentor.instrument`` for more information regarding the\n usage of ``kwargs``.\n \"\"\"\n\n if self._is_instrumented_by_opentelemetry:\n result = self._uninstrument(**kwargs)\n self._is_instrumented_by_opentelemetry = False\n return result\n\n _LOG.warning(\"Attempting to uninstrument while already uninstrumented\")\n\n return None\n\n\n__all__ = [\"BaseInstrumentor\"]\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py"}]} | 1,789 | 151 |
gh_patches_debug_11160 | rasdani/github-patches | git_diff | ivy-llc__ivy-14394 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
unique
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/torch/reduction_ops.py`
Content:
```
1 import ivy
2 from ivy.func_wrapper import with_unsupported_dtypes
3 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
4 from collections import namedtuple
5
6
7 @to_ivy_arrays_and_back
8 def dist(input, other, p=2):
9 return ivy.vector_norm(ivy.subtract(input, other), ord=p)
10
11
12 @to_ivy_arrays_and_back
13 def argmax(input, dim=None, keepdim=False):
14 return ivy.argmax(input, axis=dim, keepdims=keepdim)
15
16
17 @to_ivy_arrays_and_back
18 def argmin(input, dim=None, keepdim=False):
19 return ivy.argmin(input, axis=dim, keepdims=keepdim).astype(ivy.int64)
20
21
22 @to_ivy_arrays_and_back
23 def amax(input, dim=None, keepdim=False, *, out=None):
24 return ivy.max(input, axis=dim, keepdims=keepdim, out=out)
25
26
27 @to_ivy_arrays_and_back
28 def amin(input, dim=None, keepdim=False, *, out=None):
29 return ivy.min(input, axis=dim, keepdims=keepdim, out=out)
30
31
32 @to_ivy_arrays_and_back
33 def all(input, dim=None, keepdim=False, *, out=None):
34 input_dtype = ivy.as_ivy_dtype(input.dtype)
35 ret = ivy.all(input, axis=dim, keepdims=keepdim, out=out)
36 if ivy.is_uint_dtype(input_dtype):
37 ret = ivy.astype(ret, input_dtype, out=out)
38 return ret
39
40
41 @to_ivy_arrays_and_back
42 def any(input, dim=None, keepdim=False, *, out=None):
43 input_dtype = ivy.as_ivy_dtype(input.dtype)
44 ret = ivy.any(input, axis=dim, keepdims=keepdim, out=out)
45 if ivy.is_uint_dtype(input_dtype):
46 ret = ivy.astype(ret, input_dtype, out=out)
47 return ret
48
49
50 @to_ivy_arrays_and_back
51 def sum(input, dim=None, keepdim=False, *, out=None):
52 return ivy.sum(input, axis=dim, keepdims=keepdim, out=out)
53
54
55 @to_ivy_arrays_and_back
56 def mean(input, dim=None, keepdim=False, *, out=None):
57 return ivy.mean(input, axis=dim, keepdims=keepdim, out=out)
58
59
60 @to_ivy_arrays_and_back
61 def nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None):
62 return ivy.nanmean(input, axis=dim, keepdims=keepdim, dtype=dtype, out=out)
63
64
65 @to_ivy_arrays_and_back
66 def median(input, dim=None, keepdim=False, *, out=None):
67 if dim is None:
68 input = ivy.reshape(input, (-1,))
69 sorted_input = ivy.sort(input)
70 return sorted_input[(sorted_input.shape[0] - 1) // 2]
71
72 median_tuple = namedtuple("median", ["values", "indices"])
73
74 if input.ndim == 0:
75 result = median_tuple(input, ivy.array(0))
76 else:
77 sorted_indices = ivy.argsort(input, axis=dim)
78 median_indices = ivy.gather(
79 sorted_indices, (sorted_indices.shape[dim] - 1) // 2, axis=dim
80 )
81 median_values = ivy.take_along_axis(
82 input, ivy.expand_dims(median_indices, axis=dim), dim
83 ).squeeze(dim)
84
85 if keepdim:
86 median_values = ivy.expand_dims(median_values, axis=dim)
87 median_indices = ivy.expand_dims(median_indices, axis=dim)
88
89 result = median_tuple(median_values, median_indices)
90 if out is not None:
91 ivy.inplace_update(out[0], result.values)
92 ivy.inplace_update(out[1], result.indices)
93 return out
94 return result
95
96
97 @to_ivy_arrays_and_back
98 def std(input, dim, unbiased, keepdim=False, *, out=None):
99 return ivy.std(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)
100
101
102 @to_ivy_arrays_and_back
103 @with_unsupported_dtypes(
104 {
105 "1.11.0 and below": (
106 "float16",
107 "bfloat16",
108 )
109 },
110 "torch",
111 )
112 # TODO: the original torch.prod places * right before `dtype`
113 def prod(input, dim, *, keepdim=False, dtype=None):
114 if not dtype:
115 if "int" in input.dtype:
116 dtype = ivy.int64
117 elif "float" in input.dtype:
118 dtype = ivy.float32
119 return ivy.prod(input, axis=dim, dtype=dtype, keepdims=keepdim)
120
121
122 @to_ivy_arrays_and_back
123 def var(input, dim, unbiased, keepdim=False, *, out=None):
124 return ivy.var(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)
125
126
127 @to_ivy_arrays_and_back
128 def min(input, dim=None, keepdim=False, *, out=None):
129 if dim is None:
130 return ivy.min(input, axis=dim, keepdims=keepdim, out=out)
131 elif out is not None:
132 ivy.min(input, axis=dim, keepdims=keepdim, out=out[0])
133 ivy.argmin(input, axis=dim, keepdims=keepdim, out=out[1])
134 return out
135 else:
136 min_tuple = namedtuple("min", ["values", "indices"])
137 return min_tuple(
138 ivy.min(input, axis=dim, keepdims=keepdim),
139 ivy.argmin(input, axis=dim, keepdims=keepdim),
140 )
141
142
143 @to_ivy_arrays_and_back
144 def max(input, dim=None, keepdim=False, *, out=None):
145 if dim is None:
146 return ivy.max(input, axis=dim, keepdims=keepdim, out=out)
147 elif out is not None:
148 ivy.max(input, axis=dim, keepdims=keepdim, out=out[0])
149 ivy.argmax(input, axis=dim, keepdims=keepdim, out=out[1])
150 return out
151 else:
152 max_tuple = namedtuple("max", ["values", "indices"])
153 return max_tuple(
154 ivy.max(input, axis=dim, keepdims=keepdim),
155 ivy.argmax(input, axis=dim, keepdims=keepdim),
156 )
157
158
159 @to_ivy_arrays_and_back
160 def moveaxis(input, source, destination):
161 return ivy.moveaxis(input, source, destination)
162
163
164 @to_ivy_arrays_and_back
165 def std_mean(input, dim, unbiased, keepdim=False, *, out=None):
166 temp_std = ivy.std(
167 input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out
168 )
169 temp_mean = ivy.mean(input, axis=dim, keepdims=keepdim, out=out)
170 return temp_std, temp_mean
171
172
173 @to_ivy_arrays_and_back
174 def var_mean(input, dim, unbiased, keepdim=False, *, out=None):
175 temp_var = ivy.var(
176 input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out
177 )
178 temp_mean = ivy.mean(input, axis=dim, keepdims=keepdim, out=out)
179 return (temp_var, temp_mean)
180
181
182 @to_ivy_arrays_and_back
183 def aminmax(input, *, dim=None, keepdim=False, out=None):
184 minmax_tuple = namedtuple("minmax", ["min", "max"])
185 return minmax_tuple(
186 ivy.min(input, axis=dim, keepdims=keepdim, out=out),
187 ivy.max(input, axis=dim, keepdims=keepdim, out=out),
188 )
189
190
191 aminmax.unsupported_dtypes = {
192 "torch": ("float16", "bfloat16"),
193 "numpy": ("float16", "bfloat16"),
194 "jax": ("float16", "bfloat16"),
195 "tensorflow": ("float16", "bfloat16"),
196 }
197
198
199 @to_ivy_arrays_and_back
200 def quantile(input, q, dim=None, keepdim=False, *, interpolation="linear", out=None):
201 return ivy.quantile(
202 input, q, axis=dim, keepdims=keepdim, interpolation=interpolation, out=out
203 )
204
205
206 quantile.unsupported_dtypes = {
207 "torch": ("float16", "bfloat16"),
208 "numpy": ("float16", "bfloat16"),
209 "jax": ("float16", "bfloat16"),
210 "tensorflow": ("float16", "bfloat16"),
211 }
212
213
214 @to_ivy_arrays_and_back
215 def count_nonzero(input, dim=None):
216 return ivy.count_nonzero(input, axis=dim).astype(ivy.int64)
217
218
219 @to_ivy_arrays_and_back
220 def logsumexp(input, dim, keepdim=False, *, out=None):
221 c = ivy.max(input, axis=dim, keepdims=True)
222 if ivy.get_num_dims(c) > 0:
223 c = ivy.where(ivy.isinf(c), ivy.zeros_like(c), c)
224 elif not ivy.isinf(c):
225 c = 0
226 exponential = ivy.exp(input - c)
227 sum = ivy.sum(exponential, axis=dim, keepdims=keepdim)
228 ret = ivy.log(sum)
229 if not keepdim:
230 c = ivy.squeeze(c, axis=dim)
231 ret = ivy.add(ret, c, out=out)
232 return ret
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/torch/reduction_ops.py b/ivy/functional/frontends/torch/reduction_ops.py
--- a/ivy/functional/frontends/torch/reduction_ops.py
+++ b/ivy/functional/frontends/torch/reduction_ops.py
@@ -230,3 +230,24 @@
c = ivy.squeeze(c, axis=dim)
ret = ivy.add(ret, c, out=out)
return ret
+
+
+@to_ivy_arrays_and_back
+def unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
+ results = ivy.unique_all(input, axis=dim)
+
+ fields = ["values"]
+ if return_inverse:
+ fields.append("inverse_indices")
+ if return_counts:
+ fields.append("counts")
+
+ Results = namedtuple("Results", fields)
+
+ values = [results.values]
+ if return_inverse:
+ values.append(results.inverse_indices)
+ if return_counts:
+ values.append(results.counts)
+
+ return Results(*values)
| {"golden_diff": "diff --git a/ivy/functional/frontends/torch/reduction_ops.py b/ivy/functional/frontends/torch/reduction_ops.py\n--- a/ivy/functional/frontends/torch/reduction_ops.py\n+++ b/ivy/functional/frontends/torch/reduction_ops.py\n@@ -230,3 +230,24 @@\n c = ivy.squeeze(c, axis=dim)\r\n ret = ivy.add(ret, c, out=out)\r\n return ret\r\n+\r\n+\r\n+@to_ivy_arrays_and_back\r\n+def unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None):\r\n+ results = ivy.unique_all(input, axis=dim)\r\n+\r\n+ fields = [\"values\"]\r\n+ if return_inverse:\r\n+ fields.append(\"inverse_indices\")\r\n+ if return_counts:\r\n+ fields.append(\"counts\")\r\n+\r\n+ Results = namedtuple(\"Results\", fields)\r\n+\r\n+ values = [results.values]\r\n+ if return_inverse:\r\n+ values.append(results.inverse_indices)\r\n+ if return_counts:\r\n+ values.append(results.counts)\r\n+\r\n+ return Results(*values)\n", "issue": "unique\n\n", "before_files": [{"content": "import ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes\r\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\r\nfrom collections import namedtuple\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef dist(input, other, p=2):\r\n return ivy.vector_norm(ivy.subtract(input, other), ord=p)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef argmax(input, dim=None, keepdim=False):\r\n return ivy.argmax(input, axis=dim, keepdims=keepdim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef argmin(input, dim=None, keepdim=False):\r\n return ivy.argmin(input, axis=dim, keepdims=keepdim).astype(ivy.int64)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef amax(input, dim=None, keepdim=False, *, out=None):\r\n return ivy.max(input, axis=dim, keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef amin(input, dim=None, keepdim=False, *, out=None):\r\n return ivy.min(input, axis=dim, keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef all(input, dim=None, keepdim=False, *, out=None):\r\n input_dtype = ivy.as_ivy_dtype(input.dtype)\r\n ret = ivy.all(input, axis=dim, keepdims=keepdim, out=out)\r\n if ivy.is_uint_dtype(input_dtype):\r\n ret = ivy.astype(ret, input_dtype, out=out)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef any(input, dim=None, keepdim=False, *, out=None):\r\n input_dtype = ivy.as_ivy_dtype(input.dtype)\r\n ret = ivy.any(input, axis=dim, keepdims=keepdim, out=out)\r\n if ivy.is_uint_dtype(input_dtype):\r\n ret = ivy.astype(ret, input_dtype, out=out)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef sum(input, dim=None, keepdim=False, *, out=None):\r\n return ivy.sum(input, axis=dim, keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef mean(input, dim=None, keepdim=False, *, out=None):\r\n return ivy.mean(input, axis=dim, keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None):\r\n return ivy.nanmean(input, axis=dim, keepdims=keepdim, dtype=dtype, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef median(input, dim=None, keepdim=False, *, out=None):\r\n if dim is None:\r\n input = ivy.reshape(input, (-1,))\r\n sorted_input = ivy.sort(input)\r\n return sorted_input[(sorted_input.shape[0] - 1) // 2]\r\n\r\n median_tuple = namedtuple(\"median\", [\"values\", \"indices\"])\r\n\r\n if input.ndim == 0:\r\n result = median_tuple(input, ivy.array(0))\r\n else:\r\n sorted_indices = ivy.argsort(input, axis=dim)\r\n median_indices = ivy.gather(\r\n sorted_indices, (sorted_indices.shape[dim] - 1) // 2, axis=dim\r\n )\r\n median_values = ivy.take_along_axis(\r\n input, ivy.expand_dims(median_indices, axis=dim), dim\r\n ).squeeze(dim)\r\n\r\n if keepdim:\r\n median_values = ivy.expand_dims(median_values, axis=dim)\r\n median_indices = ivy.expand_dims(median_indices, axis=dim)\r\n\r\n result = median_tuple(median_values, median_indices)\r\n if out is not None:\r\n ivy.inplace_update(out[0], result.values)\r\n ivy.inplace_update(out[1], result.indices)\r\n return out\r\n return result\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef std(input, dim, unbiased, keepdim=False, *, out=None):\r\n return ivy.std(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\n@with_unsupported_dtypes(\r\n {\r\n \"1.11.0 and below\": (\r\n \"float16\",\r\n \"bfloat16\",\r\n )\r\n },\r\n \"torch\",\r\n)\r\n# TODO: the original torch.prod places * right before `dtype`\r\ndef prod(input, dim, *, keepdim=False, dtype=None):\r\n if not dtype:\r\n if \"int\" in input.dtype:\r\n dtype = ivy.int64\r\n elif \"float\" in input.dtype:\r\n dtype = ivy.float32\r\n return ivy.prod(input, axis=dim, dtype=dtype, keepdims=keepdim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef var(input, dim, unbiased, keepdim=False, *, out=None):\r\n return ivy.var(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef min(input, dim=None, keepdim=False, *, out=None):\r\n if dim is None:\r\n return ivy.min(input, axis=dim, keepdims=keepdim, out=out)\r\n elif out is not None:\r\n ivy.min(input, axis=dim, keepdims=keepdim, out=out[0])\r\n ivy.argmin(input, axis=dim, keepdims=keepdim, out=out[1])\r\n return out\r\n else:\r\n min_tuple = namedtuple(\"min\", [\"values\", \"indices\"])\r\n return min_tuple(\r\n ivy.min(input, axis=dim, keepdims=keepdim),\r\n ivy.argmin(input, axis=dim, keepdims=keepdim),\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef max(input, dim=None, keepdim=False, *, out=None):\r\n if dim is None:\r\n return ivy.max(input, axis=dim, keepdims=keepdim, out=out)\r\n elif out is not None:\r\n ivy.max(input, axis=dim, keepdims=keepdim, out=out[0])\r\n ivy.argmax(input, axis=dim, keepdims=keepdim, out=out[1])\r\n return out\r\n else:\r\n max_tuple = namedtuple(\"max\", [\"values\", \"indices\"])\r\n return max_tuple(\r\n ivy.max(input, axis=dim, keepdims=keepdim),\r\n ivy.argmax(input, axis=dim, keepdims=keepdim),\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef moveaxis(input, source, destination):\r\n return ivy.moveaxis(input, source, destination)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef std_mean(input, dim, unbiased, keepdim=False, *, out=None):\r\n temp_std = ivy.std(\r\n input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out\r\n )\r\n temp_mean = ivy.mean(input, axis=dim, keepdims=keepdim, out=out)\r\n return temp_std, temp_mean\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef var_mean(input, dim, unbiased, keepdim=False, *, out=None):\r\n temp_var = ivy.var(\r\n input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out\r\n )\r\n temp_mean = ivy.mean(input, axis=dim, keepdims=keepdim, out=out)\r\n return (temp_var, temp_mean)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef aminmax(input, *, dim=None, keepdim=False, out=None):\r\n minmax_tuple = namedtuple(\"minmax\", [\"min\", \"max\"])\r\n return minmax_tuple(\r\n ivy.min(input, axis=dim, keepdims=keepdim, out=out),\r\n ivy.max(input, axis=dim, keepdims=keepdim, out=out),\r\n )\r\n\r\n\r\naminmax.unsupported_dtypes = {\r\n \"torch\": (\"float16\", \"bfloat16\"),\r\n \"numpy\": (\"float16\", \"bfloat16\"),\r\n \"jax\": (\"float16\", \"bfloat16\"),\r\n \"tensorflow\": (\"float16\", \"bfloat16\"),\r\n}\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef quantile(input, q, dim=None, keepdim=False, *, interpolation=\"linear\", out=None):\r\n return ivy.quantile(\r\n input, q, axis=dim, keepdims=keepdim, interpolation=interpolation, out=out\r\n )\r\n\r\n\r\nquantile.unsupported_dtypes = {\r\n \"torch\": (\"float16\", \"bfloat16\"),\r\n \"numpy\": (\"float16\", \"bfloat16\"),\r\n \"jax\": (\"float16\", \"bfloat16\"),\r\n \"tensorflow\": (\"float16\", \"bfloat16\"),\r\n}\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef count_nonzero(input, dim=None):\r\n return ivy.count_nonzero(input, axis=dim).astype(ivy.int64)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef logsumexp(input, dim, keepdim=False, *, out=None):\r\n c = ivy.max(input, axis=dim, keepdims=True)\r\n if ivy.get_num_dims(c) > 0:\r\n c = ivy.where(ivy.isinf(c), ivy.zeros_like(c), c)\r\n elif not ivy.isinf(c):\r\n c = 0\r\n exponential = ivy.exp(input - c)\r\n sum = ivy.sum(exponential, axis=dim, keepdims=keepdim)\r\n ret = ivy.log(sum)\r\n if not keepdim:\r\n c = ivy.squeeze(c, axis=dim)\r\n ret = ivy.add(ret, c, out=out)\r\n return ret\r\n", "path": "ivy/functional/frontends/torch/reduction_ops.py"}], "after_files": [{"content": "import ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes\r\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\r\nfrom collections import namedtuple\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef dist(input, other, p=2):\r\n return ivy.vector_norm(ivy.subtract(input, other), ord=p)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef argmax(input, dim=None, keepdim=False):\r\n return ivy.argmax(input, axis=dim, keepdims=keepdim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef argmin(input, dim=None, keepdim=False):\r\n return ivy.argmin(input, axis=dim, keepdims=keepdim).astype(ivy.int64)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef amax(input, dim=None, keepdim=False, *, out=None):\r\n return ivy.max(input, axis=dim, keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef amin(input, dim=None, keepdim=False, *, out=None):\r\n return ivy.min(input, axis=dim, keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef all(input, dim=None, keepdim=False, *, out=None):\r\n input_dtype = ivy.as_ivy_dtype(input.dtype)\r\n ret = ivy.all(input, axis=dim, keepdims=keepdim, out=out)\r\n if ivy.is_uint_dtype(input_dtype):\r\n ret = ivy.astype(ret, input_dtype, out=out)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef any(input, dim=None, keepdim=False, *, out=None):\r\n input_dtype = ivy.as_ivy_dtype(input.dtype)\r\n ret = ivy.any(input, axis=dim, keepdims=keepdim, out=out)\r\n if ivy.is_uint_dtype(input_dtype):\r\n ret = ivy.astype(ret, input_dtype, out=out)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef sum(input, dim=None, keepdim=False, *, out=None):\r\n return ivy.sum(input, axis=dim, keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef mean(input, dim=None, keepdim=False, *, out=None):\r\n return ivy.mean(input, axis=dim, keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None):\r\n return ivy.nanmean(input, axis=dim, keepdims=keepdim, dtype=dtype, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef median(input, dim=None, keepdim=False, *, out=None):\r\n if dim is None:\r\n input = ivy.reshape(input, (-1,))\r\n sorted_input = ivy.sort(input)\r\n return sorted_input[(sorted_input.shape[0] - 1) // 2]\r\n\r\n median_tuple = namedtuple(\"median\", [\"values\", \"indices\"])\r\n\r\n if input.ndim == 0:\r\n result = median_tuple(input, ivy.array(0))\r\n else:\r\n sorted_indices = ivy.argsort(input, axis=dim)\r\n median_indices = ivy.gather(\r\n sorted_indices, (sorted_indices.shape[dim] - 1) // 2, axis=dim\r\n )\r\n median_values = ivy.take_along_axis(\r\n input, ivy.expand_dims(median_indices, axis=dim), dim\r\n ).squeeze(dim)\r\n\r\n if keepdim:\r\n median_values = ivy.expand_dims(median_values, axis=dim)\r\n median_indices = ivy.expand_dims(median_indices, axis=dim)\r\n\r\n result = median_tuple(median_values, median_indices)\r\n if out is not None:\r\n ivy.inplace_update(out[0], result.values)\r\n ivy.inplace_update(out[1], result.indices)\r\n return out\r\n return result\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef std(input, dim, unbiased, keepdim=False, *, out=None):\r\n return ivy.std(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\n@with_unsupported_dtypes(\r\n {\r\n \"1.11.0 and below\": (\r\n \"float16\",\r\n \"bfloat16\",\r\n )\r\n },\r\n \"torch\",\r\n)\r\n# TODO: the original torch.prod places * right before `dtype`\r\ndef prod(input, dim, *, keepdim=False, dtype=None):\r\n if not dtype:\r\n if \"int\" in input.dtype:\r\n dtype = ivy.int64\r\n elif \"float\" in input.dtype:\r\n dtype = ivy.float32\r\n return ivy.prod(input, axis=dim, dtype=dtype, keepdims=keepdim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef var(input, dim, unbiased, keepdim=False, *, out=None):\r\n return ivy.var(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef min(input, dim=None, keepdim=False, *, out=None):\r\n if dim is None:\r\n return ivy.min(input, axis=dim, keepdims=keepdim, out=out)\r\n elif out is not None:\r\n ivy.min(input, axis=dim, keepdims=keepdim, out=out[0])\r\n ivy.argmin(input, axis=dim, keepdims=keepdim, out=out[1])\r\n return out\r\n else:\r\n min_tuple = namedtuple(\"min\", [\"values\", \"indices\"])\r\n return min_tuple(\r\n ivy.min(input, axis=dim, keepdims=keepdim),\r\n ivy.argmin(input, axis=dim, keepdims=keepdim),\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef max(input, dim=None, keepdim=False, *, out=None):\r\n if dim is None:\r\n return ivy.max(input, axis=dim, keepdims=keepdim, out=out)\r\n elif out is not None:\r\n ivy.max(input, axis=dim, keepdims=keepdim, out=out[0])\r\n ivy.argmax(input, axis=dim, keepdims=keepdim, out=out[1])\r\n return out\r\n else:\r\n max_tuple = namedtuple(\"max\", [\"values\", \"indices\"])\r\n return max_tuple(\r\n ivy.max(input, axis=dim, keepdims=keepdim),\r\n ivy.argmax(input, axis=dim, keepdims=keepdim),\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef moveaxis(input, source, destination):\r\n return ivy.moveaxis(input, source, destination)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef std_mean(input, dim, unbiased, keepdim=False, *, out=None):\r\n temp_std = ivy.std(\r\n input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out\r\n )\r\n temp_mean = ivy.mean(input, axis=dim, keepdims=keepdim, out=out)\r\n return temp_std, temp_mean\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef var_mean(input, dim, unbiased, keepdim=False, *, out=None):\r\n temp_var = ivy.var(\r\n input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out\r\n )\r\n temp_mean = ivy.mean(input, axis=dim, keepdims=keepdim, out=out)\r\n return (temp_var, temp_mean)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef aminmax(input, *, dim=None, keepdim=False, out=None):\r\n minmax_tuple = namedtuple(\"minmax\", [\"min\", \"max\"])\r\n return minmax_tuple(\r\n ivy.min(input, axis=dim, keepdims=keepdim, out=out),\r\n ivy.max(input, axis=dim, keepdims=keepdim, out=out),\r\n )\r\n\r\n\r\naminmax.unsupported_dtypes = {\r\n \"torch\": (\"float16\", \"bfloat16\"),\r\n \"numpy\": (\"float16\", \"bfloat16\"),\r\n \"jax\": (\"float16\", \"bfloat16\"),\r\n \"tensorflow\": (\"float16\", \"bfloat16\"),\r\n}\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef quantile(input, q, dim=None, keepdim=False, *, interpolation=\"linear\", out=None):\r\n return ivy.quantile(\r\n input, q, axis=dim, keepdims=keepdim, interpolation=interpolation, out=out\r\n )\r\n\r\n\r\nquantile.unsupported_dtypes = {\r\n \"torch\": (\"float16\", \"bfloat16\"),\r\n \"numpy\": (\"float16\", \"bfloat16\"),\r\n \"jax\": (\"float16\", \"bfloat16\"),\r\n \"tensorflow\": (\"float16\", \"bfloat16\"),\r\n}\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef count_nonzero(input, dim=None):\r\n return ivy.count_nonzero(input, axis=dim).astype(ivy.int64)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef logsumexp(input, dim, keepdim=False, *, out=None):\r\n c = ivy.max(input, axis=dim, keepdims=True)\r\n if ivy.get_num_dims(c) > 0:\r\n c = ivy.where(ivy.isinf(c), ivy.zeros_like(c), c)\r\n elif not ivy.isinf(c):\r\n c = 0\r\n exponential = ivy.exp(input - c)\r\n sum = ivy.sum(exponential, axis=dim, keepdims=keepdim)\r\n ret = ivy.log(sum)\r\n if not keepdim:\r\n c = ivy.squeeze(c, axis=dim)\r\n ret = ivy.add(ret, c, out=out)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None):\r\n results = ivy.unique_all(input, axis=dim)\r\n\r\n fields = [\"values\"]\r\n if return_inverse:\r\n fields.append(\"inverse_indices\")\r\n if return_counts:\r\n fields.append(\"counts\")\r\n\r\n Results = namedtuple(\"Results\", fields)\r\n\r\n values = [results.values]\r\n if return_inverse:\r\n values.append(results.inverse_indices)\r\n if return_counts:\r\n values.append(results.counts)\r\n\r\n return Results(*values)\r\n", "path": "ivy/functional/frontends/torch/reduction_ops.py"}]} | 2,983 | 245 |
gh_patches_debug_20205 | rasdani/github-patches | git_diff | pytorch__ignite-2564 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SSIM update does not work for the last batch (if batch size is different)
## 🐛 Bug description

<!-- A clear and concise description of what the bug is. -->
<!-- Please, add steps on how to reproduce it. -->
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
<!-- A clear and concise description of what you expected to happen. -->
## Environment
- PyTorch Version :1.10.1
- Ignite Version ():0.4.8
- OS : Ubuntu
- How you installed Ignite (`conda`, `pip`, source):
- Python version: 3.9
- Any other relevant information:
If the previous batch and the current batch are different then it throws the error. Attaching the screen shot of error.
Screenshot of the basic code is also attached.

Implementation of ReduceLROnPlateau scheduler
## 🚀 Feature
While there might be some workarounds in using `ReduceLROnPlateau` with ignite's optimizer , it would be nicer to have a standalone feature like other schedulers in contrib.
[Relevant issue and code sample](https://github.com/pytorch/ignite/issues/462#issuecomment-771907302).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/metrics/ssim.py`
Content:
```
1 from typing import Callable, Sequence, Union
2
3 import torch
4 import torch.nn.functional as F
5
6 from ignite.exceptions import NotComputableError
7 from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
8
9 __all__ = ["SSIM"]
10
11
12 class SSIM(Metric):
13 """
14 Computes Structual Similarity Index Measure
15
16 Args:
17 data_range: Range of the image. Typically, ``1.0`` or ``255``.
18 kernel_size: Size of the kernel. Default: (11, 11)
19 sigma: Standard deviation of the gaussian kernel.
20 Argument is used if ``gaussian=True``. Default: (1.5, 1.5)
21 k1: Parameter of SSIM. Default: 0.01
22 k2: Parameter of SSIM. Default: 0.03
23 gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel
24 output_transform: A callable that is used to transform the
25 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
26 form expected by the metric.
27 device: specifies which device updates are accumulated on. Setting the metric's
28 device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
29 default, CPU.
30
31 Examples:
32 To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
33 The output of the engine's ``process_function`` needs to be in the format of
34 ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
35
36 ``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need
37 to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.
38
39 For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
40
41 .. include:: defaults.rst
42 :start-after: :orphan:
43
44 .. testcode::
45
46 metric = SSIM(data_range=1.0)
47 metric.attach(default_evaluator, 'ssim')
48 preds = torch.rand([4, 3, 16, 16])
49 target = preds * 0.75
50 state = default_evaluator.run([[preds, target]])
51 print(state.metrics['ssim'])
52
53 .. testoutput::
54
55 0.9218971...
56
57 .. versionadded:: 0.4.2
58 """
59
60 def __init__(
61 self,
62 data_range: Union[int, float],
63 kernel_size: Union[int, Sequence[int]] = (11, 11),
64 sigma: Union[float, Sequence[float]] = (1.5, 1.5),
65 k1: float = 0.01,
66 k2: float = 0.03,
67 gaussian: bool = True,
68 output_transform: Callable = lambda x: x,
69 device: Union[str, torch.device] = torch.device("cpu"),
70 ):
71 if isinstance(kernel_size, int):
72 self.kernel_size = [kernel_size, kernel_size] # type: Sequence[int]
73 elif isinstance(kernel_size, Sequence):
74 self.kernel_size = kernel_size
75 else:
76 raise ValueError("Argument kernel_size should be either int or a sequence of int.")
77
78 if isinstance(sigma, float):
79 self.sigma = [sigma, sigma] # type: Sequence[float]
80 elif isinstance(sigma, Sequence):
81 self.sigma = sigma
82 else:
83 raise ValueError("Argument sigma should be either float or a sequence of float.")
84
85 if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):
86 raise ValueError(f"Expected kernel_size to have odd positive number. Got {kernel_size}.")
87
88 if any(y <= 0 for y in self.sigma):
89 raise ValueError(f"Expected sigma to have positive number. Got {sigma}.")
90
91 super(SSIM, self).__init__(output_transform=output_transform, device=device)
92 self.gaussian = gaussian
93 self.c1 = (k1 * data_range) ** 2
94 self.c2 = (k2 * data_range) ** 2
95 self.pad_h = (self.kernel_size[0] - 1) // 2
96 self.pad_w = (self.kernel_size[1] - 1) // 2
97 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
98
99 @reinit__is_reduced
100 def reset(self) -> None:
101 self._sum_of_ssim = torch.tensor(0.0, device=self._device)
102 self._num_examples = 0
103 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
104
105 def _uniform(self, kernel_size: int) -> torch.Tensor:
106 max, min = 2.5, -2.5
107 ksize_half = (kernel_size - 1) * 0.5
108 kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)
109 for i, j in enumerate(kernel):
110 if min <= j <= max:
111 kernel[i] = 1 / (max - min)
112 else:
113 kernel[i] = 0
114
115 return kernel.unsqueeze(dim=0) # (1, kernel_size)
116
117 def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:
118 ksize_half = (kernel_size - 1) * 0.5
119 kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)
120 gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))
121 return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)
122
123 def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:
124 if self.gaussian:
125 kernel_x = self._gaussian(kernel_size[0], sigma[0])
126 kernel_y = self._gaussian(kernel_size[1], sigma[1])
127 else:
128 kernel_x = self._uniform(kernel_size[0])
129 kernel_y = self._uniform(kernel_size[1])
130
131 return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)
132
133 @reinit__is_reduced
134 def update(self, output: Sequence[torch.Tensor]) -> None:
135 y_pred, y = output[0].detach(), output[1].detach()
136
137 if y_pred.dtype != y.dtype:
138 raise TypeError(
139 f"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}."
140 )
141
142 if y_pred.shape != y.shape:
143 raise ValueError(
144 f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
145 )
146
147 if len(y_pred.shape) != 4 or len(y.shape) != 4:
148 raise ValueError(
149 f"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
150 )
151
152 channel = y_pred.size(1)
153 if len(self._kernel.shape) < 4:
154 self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device)
155
156 y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
157 y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
158
159 input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y])
160 outputs = F.conv2d(input_list, self._kernel, groups=channel)
161
162 output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]
163
164 mu_pred_sq = output_list[0].pow(2)
165 mu_target_sq = output_list[1].pow(2)
166 mu_pred_target = output_list[0] * output_list[1]
167
168 sigma_pred_sq = output_list[2] - mu_pred_sq
169 sigma_target_sq = output_list[3] - mu_target_sq
170 sigma_pred_target = output_list[4] - mu_pred_target
171
172 a1 = 2 * mu_pred_target + self.c1
173 a2 = 2 * sigma_pred_target + self.c2
174 b1 = mu_pred_sq + mu_target_sq + self.c1
175 b2 = sigma_pred_sq + sigma_target_sq + self.c2
176
177 ssim_idx = (a1 * a2) / (b1 * b2)
178 self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(self._device)
179
180 self._num_examples += y.shape[0]
181
182 @sync_all_reduce("_sum_of_ssim", "_num_examples")
183 def compute(self) -> torch.Tensor:
184 if self._num_examples == 0:
185 raise NotComputableError("SSIM must have at least one example before it can be computed.")
186 return self._sum_of_ssim / self._num_examples
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py
--- a/ignite/metrics/ssim.py
+++ b/ignite/metrics/ssim.py
@@ -98,7 +98,7 @@
@reinit__is_reduced
def reset(self) -> None:
- self._sum_of_ssim = torch.tensor(0.0, device=self._device)
+ self._sum_of_ssim = torch.tensor(0.0, dtype=torch.float64, device=self._device)
self._num_examples = 0
self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
@@ -180,7 +180,7 @@
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_ssim", "_num_examples")
- def compute(self) -> torch.Tensor:
+ def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("SSIM must have at least one example before it can be computed.")
- return self._sum_of_ssim / self._num_examples
+ return (self._sum_of_ssim / self._num_examples).item()
| {"golden_diff": "diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py\n--- a/ignite/metrics/ssim.py\n+++ b/ignite/metrics/ssim.py\n@@ -98,7 +98,7 @@\n \n @reinit__is_reduced\n def reset(self) -> None:\n- self._sum_of_ssim = torch.tensor(0.0, device=self._device)\n+ self._sum_of_ssim = torch.tensor(0.0, dtype=torch.float64, device=self._device)\n self._num_examples = 0\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n \n@@ -180,7 +180,7 @@\n self._num_examples += y.shape[0]\n \n @sync_all_reduce(\"_sum_of_ssim\", \"_num_examples\")\n- def compute(self) -> torch.Tensor:\n+ def compute(self) -> float:\n if self._num_examples == 0:\n raise NotComputableError(\"SSIM must have at least one example before it can be computed.\")\n- return self._sum_of_ssim / self._num_examples\n+ return (self._sum_of_ssim / self._num_examples).item()\n", "issue": "SSIM update does not work for the last batch (if batch size is different)\n## \ud83d\udc1b Bug description\r\n\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n<!-- Please, add steps on how to reproduce it. -->\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n## Environment\r\n\r\n- PyTorch Version :1.10.1\r\n- Ignite Version ():0.4.8\r\n- OS : Ubuntu\r\n- How you installed Ignite (`conda`, `pip`, source): \r\n- Python version: 3.9\r\n- Any other relevant information:\r\n\r\nIf the previous batch and the current batch are different then it throws the error. Attaching the screen shot of error. \r\n\r\nScreenshot of the basic code is also attached.\r\n\r\n\nImplementation of ReduceLROnPlateau scheduler \n## \ud83d\ude80 Feature\r\n\r\nWhile there might be some workarounds in using `ReduceLROnPlateau` with ignite's optimizer , it would be nicer to have a standalone feature like other schedulers in contrib.\r\n\r\n[Relevant issue and code sample](https://github.com/pytorch/ignite/issues/462#issuecomment-771907302).\n", "before_files": [{"content": "from typing import Callable, Sequence, Union\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"SSIM\"]\n\n\nclass SSIM(Metric):\n \"\"\"\n Computes Structual Similarity Index Measure\n\n Args:\n data_range: Range of the image. Typically, ``1.0`` or ``255``.\n kernel_size: Size of the kernel. Default: (11, 11)\n sigma: Standard deviation of the gaussian kernel.\n Argument is used if ``gaussian=True``. Default: (1.5, 1.5)\n k1: Parameter of SSIM. Default: 0.01\n k2: Parameter of SSIM. Default: 0.03\n gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel\n output_transform: A callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n\n Examples:\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in the format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n\n ``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need\n to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.\n\n For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n metric = SSIM(data_range=1.0)\n metric.attach(default_evaluator, 'ssim')\n preds = torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['ssim'])\n\n .. testoutput::\n\n 0.9218971...\n\n .. versionadded:: 0.4.2\n \"\"\"\n\n def __init__(\n self,\n data_range: Union[int, float],\n kernel_size: Union[int, Sequence[int]] = (11, 11),\n sigma: Union[float, Sequence[float]] = (1.5, 1.5),\n k1: float = 0.01,\n k2: float = 0.03,\n gaussian: bool = True,\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n if isinstance(kernel_size, int):\n self.kernel_size = [kernel_size, kernel_size] # type: Sequence[int]\n elif isinstance(kernel_size, Sequence):\n self.kernel_size = kernel_size\n else:\n raise ValueError(\"Argument kernel_size should be either int or a sequence of int.\")\n\n if isinstance(sigma, float):\n self.sigma = [sigma, sigma] # type: Sequence[float]\n elif isinstance(sigma, Sequence):\n self.sigma = sigma\n else:\n raise ValueError(\"Argument sigma should be either float or a sequence of float.\")\n\n if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):\n raise ValueError(f\"Expected kernel_size to have odd positive number. Got {kernel_size}.\")\n\n if any(y <= 0 for y in self.sigma):\n raise ValueError(f\"Expected sigma to have positive number. Got {sigma}.\")\n\n super(SSIM, self).__init__(output_transform=output_transform, device=device)\n self.gaussian = gaussian\n self.c1 = (k1 * data_range) ** 2\n self.c2 = (k2 * data_range) ** 2\n self.pad_h = (self.kernel_size[0] - 1) // 2\n self.pad_w = (self.kernel_size[1] - 1) // 2\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum_of_ssim = torch.tensor(0.0, device=self._device)\n self._num_examples = 0\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n def _uniform(self, kernel_size: int) -> torch.Tensor:\n max, min = 2.5, -2.5\n ksize_half = (kernel_size - 1) * 0.5\n kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n for i, j in enumerate(kernel):\n if min <= j <= max:\n kernel[i] = 1 / (max - min)\n else:\n kernel[i] = 0\n\n return kernel.unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:\n ksize_half = (kernel_size - 1) * 0.5\n kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))\n return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:\n if self.gaussian:\n kernel_x = self._gaussian(kernel_size[0], sigma[0])\n kernel_y = self._gaussian(kernel_size[1], sigma[1])\n else:\n kernel_x = self._uniform(kernel_size[0])\n kernel_y = self._uniform(kernel_size[1])\n\n return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n y_pred, y = output[0].detach(), output[1].detach()\n\n if y_pred.dtype != y.dtype:\n raise TypeError(\n f\"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}.\"\n )\n\n if y_pred.shape != y.shape:\n raise ValueError(\n f\"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n if len(y_pred.shape) != 4 or len(y.shape) != 4:\n raise ValueError(\n f\"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n channel = y_pred.size(1)\n if len(self._kernel.shape) < 4:\n self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device)\n\n y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n\n input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y])\n outputs = F.conv2d(input_list, self._kernel, groups=channel)\n\n output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]\n\n mu_pred_sq = output_list[0].pow(2)\n mu_target_sq = output_list[1].pow(2)\n mu_pred_target = output_list[0] * output_list[1]\n\n sigma_pred_sq = output_list[2] - mu_pred_sq\n sigma_target_sq = output_list[3] - mu_target_sq\n sigma_pred_target = output_list[4] - mu_pred_target\n\n a1 = 2 * mu_pred_target + self.c1\n a2 = 2 * sigma_pred_target + self.c2\n b1 = mu_pred_sq + mu_target_sq + self.c1\n b2 = sigma_pred_sq + sigma_target_sq + self.c2\n\n ssim_idx = (a1 * a2) / (b1 * b2)\n self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(self._device)\n\n self._num_examples += y.shape[0]\n\n @sync_all_reduce(\"_sum_of_ssim\", \"_num_examples\")\n def compute(self) -> torch.Tensor:\n if self._num_examples == 0:\n raise NotComputableError(\"SSIM must have at least one example before it can be computed.\")\n return self._sum_of_ssim / self._num_examples\n", "path": "ignite/metrics/ssim.py"}], "after_files": [{"content": "from typing import Callable, Sequence, Union\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"SSIM\"]\n\n\nclass SSIM(Metric):\n \"\"\"\n Computes Structual Similarity Index Measure\n\n Args:\n data_range: Range of the image. Typically, ``1.0`` or ``255``.\n kernel_size: Size of the kernel. Default: (11, 11)\n sigma: Standard deviation of the gaussian kernel.\n Argument is used if ``gaussian=True``. Default: (1.5, 1.5)\n k1: Parameter of SSIM. Default: 0.01\n k2: Parameter of SSIM. Default: 0.03\n gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel\n output_transform: A callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n\n Examples:\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in the format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n\n ``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need\n to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.\n\n For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n metric = SSIM(data_range=1.0)\n metric.attach(default_evaluator, 'ssim')\n preds = torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['ssim'])\n\n .. testoutput::\n\n 0.9218971...\n\n .. versionadded:: 0.4.2\n \"\"\"\n\n def __init__(\n self,\n data_range: Union[int, float],\n kernel_size: Union[int, Sequence[int]] = (11, 11),\n sigma: Union[float, Sequence[float]] = (1.5, 1.5),\n k1: float = 0.01,\n k2: float = 0.03,\n gaussian: bool = True,\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n if isinstance(kernel_size, int):\n self.kernel_size = [kernel_size, kernel_size] # type: Sequence[int]\n elif isinstance(kernel_size, Sequence):\n self.kernel_size = kernel_size\n else:\n raise ValueError(\"Argument kernel_size should be either int or a sequence of int.\")\n\n if isinstance(sigma, float):\n self.sigma = [sigma, sigma] # type: Sequence[float]\n elif isinstance(sigma, Sequence):\n self.sigma = sigma\n else:\n raise ValueError(\"Argument sigma should be either float or a sequence of float.\")\n\n if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):\n raise ValueError(f\"Expected kernel_size to have odd positive number. Got {kernel_size}.\")\n\n if any(y <= 0 for y in self.sigma):\n raise ValueError(f\"Expected sigma to have positive number. Got {sigma}.\")\n\n super(SSIM, self).__init__(output_transform=output_transform, device=device)\n self.gaussian = gaussian\n self.c1 = (k1 * data_range) ** 2\n self.c2 = (k2 * data_range) ** 2\n self.pad_h = (self.kernel_size[0] - 1) // 2\n self.pad_w = (self.kernel_size[1] - 1) // 2\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum_of_ssim = torch.tensor(0.0, dtype=torch.float64, device=self._device)\n self._num_examples = 0\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n def _uniform(self, kernel_size: int) -> torch.Tensor:\n max, min = 2.5, -2.5\n ksize_half = (kernel_size - 1) * 0.5\n kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n for i, j in enumerate(kernel):\n if min <= j <= max:\n kernel[i] = 1 / (max - min)\n else:\n kernel[i] = 0\n\n return kernel.unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:\n ksize_half = (kernel_size - 1) * 0.5\n kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))\n return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:\n if self.gaussian:\n kernel_x = self._gaussian(kernel_size[0], sigma[0])\n kernel_y = self._gaussian(kernel_size[1], sigma[1])\n else:\n kernel_x = self._uniform(kernel_size[0])\n kernel_y = self._uniform(kernel_size[1])\n\n return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n y_pred, y = output[0].detach(), output[1].detach()\n\n if y_pred.dtype != y.dtype:\n raise TypeError(\n f\"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}.\"\n )\n\n if y_pred.shape != y.shape:\n raise ValueError(\n f\"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n if len(y_pred.shape) != 4 or len(y.shape) != 4:\n raise ValueError(\n f\"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n channel = y_pred.size(1)\n if len(self._kernel.shape) < 4:\n self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device)\n\n y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n\n input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y])\n outputs = F.conv2d(input_list, self._kernel, groups=channel)\n\n output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]\n\n mu_pred_sq = output_list[0].pow(2)\n mu_target_sq = output_list[1].pow(2)\n mu_pred_target = output_list[0] * output_list[1]\n\n sigma_pred_sq = output_list[2] - mu_pred_sq\n sigma_target_sq = output_list[3] - mu_target_sq\n sigma_pred_target = output_list[4] - mu_pred_target\n\n a1 = 2 * mu_pred_target + self.c1\n a2 = 2 * sigma_pred_target + self.c2\n b1 = mu_pred_sq + mu_target_sq + self.c1\n b2 = sigma_pred_sq + sigma_target_sq + self.c2\n\n ssim_idx = (a1 * a2) / (b1 * b2)\n self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(self._device)\n\n self._num_examples += y.shape[0]\n\n @sync_all_reduce(\"_sum_of_ssim\", \"_num_examples\")\n def compute(self) -> float:\n if self._num_examples == 0:\n raise NotComputableError(\"SSIM must have at least one example before it can be computed.\")\n return (self._sum_of_ssim / self._num_examples).item()\n", "path": "ignite/metrics/ssim.py"}]} | 3,258 | 274 |
gh_patches_debug_4674 | rasdani/github-patches | git_diff | arviz-devs__arviz-1198 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Passing axes to plot_density fails with several datasets
**Describe the bug**
I would like to pass existing axes to plot_density with the `ax` keyword. This works fine if I plot data from a single dataset. But I get an error when I try to plot several datasets at once (I think the error arises when it tries to produce a legend).
Plotting several datasets without providing axes also works fine.
**To Reproduce**
```python
import arviz
import matplotlib.pyplot as plt
test_data = arviz.load_arviz_data('centered_eight')
# This works
fig, ax = plt.subplots(3, 3)
ax1 = arviz.plot_density(data=[test_data.posterior], var_names=['mu', 'theta'], ax=ax);
# This works as well
ax2 = arviz.plot_density(data=[test_data.prior, test_data.posterior], var_names=['mu', 'theta']);
# This does not work
fig3, ax3 = plt.subplots(3, 3)
arviz.plot_density(data=[test_data.prior, test_data.posterior], var_names=['mu', 'theta'], ax=ax3);
```
This is the error I get:
```python
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-133-7feead268671> in <module>
11 # This does not work
12 fig3, ax3 = plt.subplots(3, 3)
---> 13 arviz.plot_density(data=[test_data.prior, test_data.posterior], var_names=['mu', 'theta'], ax=ax3);
~/miniconda3/envs/sunode/lib/python3.7/site-packages/arviz/plots/densityplot.py in plot_density(data, group, data_labels, var_names, transform, credible_interval, point_estimate, colors, outline, hpd_markers, shade, bw, figsize, textsize, ax, backend, backend_kwargs, show)
263 # TODO: Add backend kwargs
264 plot = get_plotting_function("plot_density", "densityplot", backend)
--> 265 ax = plot(**plot_density_kwargs)
266 return ax
~/miniconda3/envs/sunode/lib/python3.7/site-packages/arviz/plots/backends/matplotlib/densityplot.py in plot_density(ax, all_labels, to_plot, colors, bw, figsize, length_plotters, rows, cols, titlesize, xt_labelsize, linewidth, markersize, credible_interval, point_estimate, hpd_markers, outline, shade, n_data, data_labels, backend_kwargs, show)
75 if n_data > 1:
76 for m_idx, label in enumerate(data_labels):
---> 77 ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize)
78 ax[0].legend(fontsize=xt_labelsize)
79
AttributeError: 'numpy.ndarray' object has no attribute 'plot'
```
**Additional context**
ArviZ version: 0.7.0
matplotlib version: 3.2.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/plots/backends/matplotlib/densityplot.py`
Content:
```
1 """Matplotlib Densityplot."""
2 import matplotlib.pyplot as plt
3 import numpy as np
4
5 from . import backend_show
6 from ....stats import hdi
7 from ...plot_utils import (
8 make_label,
9 _create_axes_grid,
10 calculate_point_estimate,
11 )
12 from ....numeric_utils import _fast_kde, get_bins
13
14
15 def plot_density(
16 ax,
17 all_labels,
18 to_plot,
19 colors,
20 bw,
21 figsize,
22 length_plotters,
23 rows,
24 cols,
25 titlesize,
26 xt_labelsize,
27 linewidth,
28 markersize,
29 hdi_prob,
30 point_estimate,
31 hdi_markers,
32 outline,
33 shade,
34 n_data,
35 data_labels,
36 backend_kwargs,
37 show,
38 ):
39 """Matplotlib densityplot."""
40 if ax is None:
41 _, ax = _create_axes_grid(
42 length_plotters,
43 rows,
44 cols,
45 figsize=figsize,
46 squeeze=False,
47 backend="matplotlib",
48 backend_kwargs=backend_kwargs,
49 )
50 else:
51 ax = np.atleast_2d(ax)
52
53 axis_map = {label: ax_ for label, ax_ in zip(all_labels, np.ravel(ax))}
54
55 for m_idx, plotters in enumerate(to_plot):
56 for var_name, selection, values in plotters:
57 label = make_label(var_name, selection)
58 _d_helper(
59 values.flatten(),
60 label,
61 colors[m_idx],
62 bw,
63 titlesize,
64 xt_labelsize,
65 linewidth,
66 markersize,
67 hdi_prob,
68 point_estimate,
69 hdi_markers,
70 outline,
71 shade,
72 axis_map[label],
73 )
74
75 if n_data > 1:
76 for m_idx, label in enumerate(data_labels):
77 ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize)
78 ax[0].legend(fontsize=xt_labelsize)
79
80 if backend_show(show):
81 plt.show()
82
83 return ax
84
85
86 def _d_helper(
87 vec,
88 vname,
89 color,
90 bw,
91 titlesize,
92 xt_labelsize,
93 linewidth,
94 markersize,
95 hdi_prob,
96 point_estimate,
97 hdi_markers,
98 outline,
99 shade,
100 ax,
101 ):
102 """Plot an individual dimension.
103
104 Parameters
105 ----------
106 vec : array
107 1D array from trace
108 vname : str
109 variable name
110 color : str
111 matplotlib color
112 bw : float
113 Bandwidth scaling factor. Should be larger than 0. The higher this number the smoother the
114 KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule of thumb
115 (the default used rule by SciPy).
116 titlesize : float
117 font size for title
118 xt_labelsize : float
119 fontsize for xticks
120 linewidth : float
121 Thickness of lines
122 markersize : float
123 Size of markers
124 hdi_prob : float
125 Probability for the highest density interval. Defaults to 0.94
126 point_estimate : Optional[str]
127 Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.
128 Defaults to 'auto' i.e. it falls back to default set in rcParams.
129 shade : float
130 Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1
131 (opaque). Defaults to 0.
132 ax : matplotlib axes
133 """
134 if vec.dtype.kind == "f":
135 if hdi_prob != 1:
136 hdi_ = hdi(vec, hdi_prob, multimodal=False)
137 new_vec = vec[(vec >= hdi_[0]) & (vec <= hdi_[1])]
138 else:
139 new_vec = vec
140
141 density, xmin, xmax = _fast_kde(new_vec, bw=bw)
142 density *= hdi_prob
143 x = np.linspace(xmin, xmax, len(density))
144 ymin = density[0]
145 ymax = density[-1]
146
147 if outline:
148 ax.plot(x, density, color=color, lw=linewidth)
149 ax.plot([xmin, xmin], [-ymin / 100, ymin], color=color, ls="-", lw=linewidth)
150 ax.plot([xmax, xmax], [-ymax / 100, ymax], color=color, ls="-", lw=linewidth)
151
152 if shade:
153 ax.fill_between(x, density, color=color, alpha=shade)
154
155 else:
156 xmin, xmax = hdi(vec, hdi_prob, multimodal=False)
157 bins = get_bins(vec)
158 if outline:
159 ax.hist(vec, bins=bins, color=color, histtype="step", align="left")
160 if shade:
161 ax.hist(vec, bins=bins, color=color, alpha=shade)
162
163 if hdi_markers:
164 ax.plot(xmin, 0, hdi_markers, color=color, markeredgecolor="k", markersize=markersize)
165 ax.plot(xmax, 0, hdi_markers, color=color, markeredgecolor="k", markersize=markersize)
166
167 if point_estimate is not None:
168 est = calculate_point_estimate(point_estimate, vec, bw)
169 ax.plot(est, 0, "o", color=color, markeredgecolor="k", markersize=markersize)
170
171 ax.set_yticks([])
172 ax.set_title(vname, fontsize=titlesize, wrap=True)
173 for pos in ["left", "right", "top"]:
174 ax.spines[pos].set_visible(False)
175 ax.tick_params(labelsize=xt_labelsize)
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/plots/backends/matplotlib/densityplot.py b/arviz/plots/backends/matplotlib/densityplot.py
--- a/arviz/plots/backends/matplotlib/densityplot.py
+++ b/arviz/plots/backends/matplotlib/densityplot.py
@@ -74,8 +74,8 @@
if n_data > 1:
for m_idx, label in enumerate(data_labels):
- ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize)
- ax[0].legend(fontsize=xt_labelsize)
+ ax.item(0).plot([], label=label, c=colors[m_idx], markersize=markersize)
+ ax.item(0).legend(fontsize=xt_labelsize)
if backend_show(show):
plt.show()
| {"golden_diff": "diff --git a/arviz/plots/backends/matplotlib/densityplot.py b/arviz/plots/backends/matplotlib/densityplot.py\n--- a/arviz/plots/backends/matplotlib/densityplot.py\n+++ b/arviz/plots/backends/matplotlib/densityplot.py\n@@ -74,8 +74,8 @@\n \n if n_data > 1:\n for m_idx, label in enumerate(data_labels):\n- ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize)\n- ax[0].legend(fontsize=xt_labelsize)\n+ ax.item(0).plot([], label=label, c=colors[m_idx], markersize=markersize)\n+ ax.item(0).legend(fontsize=xt_labelsize)\n \n if backend_show(show):\n plt.show()\n", "issue": "Passing axes to plot_density fails with several datasets\n**Describe the bug**\r\nI would like to pass existing axes to plot_density with the `ax` keyword. This works fine if I plot data from a single dataset. But I get an error when I try to plot several datasets at once (I think the error arises when it tries to produce a legend).\r\nPlotting several datasets without providing axes also works fine.\r\n\r\n**To Reproduce**\r\n```python\r\nimport arviz\r\nimport matplotlib.pyplot as plt\r\n\r\ntest_data = arviz.load_arviz_data('centered_eight')\r\n\r\n# This works\r\nfig, ax = plt.subplots(3, 3)\r\nax1 = arviz.plot_density(data=[test_data.posterior], var_names=['mu', 'theta'], ax=ax);\r\n# This works as well\r\nax2 = arviz.plot_density(data=[test_data.prior, test_data.posterior], var_names=['mu', 'theta']);\r\n# This does not work\r\nfig3, ax3 = plt.subplots(3, 3)\r\narviz.plot_density(data=[test_data.prior, test_data.posterior], var_names=['mu', 'theta'], ax=ax3);\r\n```\r\n\r\nThis is the error I get:\r\n```python\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-133-7feead268671> in <module>\r\n 11 # This does not work\r\n 12 fig3, ax3 = plt.subplots(3, 3)\r\n---> 13 arviz.plot_density(data=[test_data.prior, test_data.posterior], var_names=['mu', 'theta'], ax=ax3);\r\n\r\n~/miniconda3/envs/sunode/lib/python3.7/site-packages/arviz/plots/densityplot.py in plot_density(data, group, data_labels, var_names, transform, credible_interval, point_estimate, colors, outline, hpd_markers, shade, bw, figsize, textsize, ax, backend, backend_kwargs, show)\r\n 263 # TODO: Add backend kwargs\r\n 264 plot = get_plotting_function(\"plot_density\", \"densityplot\", backend)\r\n--> 265 ax = plot(**plot_density_kwargs)\r\n 266 return ax\r\n\r\n~/miniconda3/envs/sunode/lib/python3.7/site-packages/arviz/plots/backends/matplotlib/densityplot.py in plot_density(ax, all_labels, to_plot, colors, bw, figsize, length_plotters, rows, cols, titlesize, xt_labelsize, linewidth, markersize, credible_interval, point_estimate, hpd_markers, outline, shade, n_data, data_labels, backend_kwargs, show)\r\n 75 if n_data > 1:\r\n 76 for m_idx, label in enumerate(data_labels):\r\n---> 77 ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize)\r\n 78 ax[0].legend(fontsize=xt_labelsize)\r\n 79 \r\n\r\nAttributeError: 'numpy.ndarray' object has no attribute 'plot'\r\n```\r\n\r\n**Additional context**\r\nArviZ version: 0.7.0\r\nmatplotlib version: 3.2.1\r\n\n", "before_files": [{"content": "\"\"\"Matplotlib Densityplot.\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom . import backend_show\nfrom ....stats import hdi\nfrom ...plot_utils import (\n make_label,\n _create_axes_grid,\n calculate_point_estimate,\n)\nfrom ....numeric_utils import _fast_kde, get_bins\n\n\ndef plot_density(\n ax,\n all_labels,\n to_plot,\n colors,\n bw,\n figsize,\n length_plotters,\n rows,\n cols,\n titlesize,\n xt_labelsize,\n linewidth,\n markersize,\n hdi_prob,\n point_estimate,\n hdi_markers,\n outline,\n shade,\n n_data,\n data_labels,\n backend_kwargs,\n show,\n):\n \"\"\"Matplotlib densityplot.\"\"\"\n if ax is None:\n _, ax = _create_axes_grid(\n length_plotters,\n rows,\n cols,\n figsize=figsize,\n squeeze=False,\n backend=\"matplotlib\",\n backend_kwargs=backend_kwargs,\n )\n else:\n ax = np.atleast_2d(ax)\n\n axis_map = {label: ax_ for label, ax_ in zip(all_labels, np.ravel(ax))}\n\n for m_idx, plotters in enumerate(to_plot):\n for var_name, selection, values in plotters:\n label = make_label(var_name, selection)\n _d_helper(\n values.flatten(),\n label,\n colors[m_idx],\n bw,\n titlesize,\n xt_labelsize,\n linewidth,\n markersize,\n hdi_prob,\n point_estimate,\n hdi_markers,\n outline,\n shade,\n axis_map[label],\n )\n\n if n_data > 1:\n for m_idx, label in enumerate(data_labels):\n ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize)\n ax[0].legend(fontsize=xt_labelsize)\n\n if backend_show(show):\n plt.show()\n\n return ax\n\n\ndef _d_helper(\n vec,\n vname,\n color,\n bw,\n titlesize,\n xt_labelsize,\n linewidth,\n markersize,\n hdi_prob,\n point_estimate,\n hdi_markers,\n outline,\n shade,\n ax,\n):\n \"\"\"Plot an individual dimension.\n\n Parameters\n ----------\n vec : array\n 1D array from trace\n vname : str\n variable name\n color : str\n matplotlib color\n bw : float\n Bandwidth scaling factor. Should be larger than 0. The higher this number the smoother the\n KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule of thumb\n (the default used rule by SciPy).\n titlesize : float\n font size for title\n xt_labelsize : float\n fontsize for xticks\n linewidth : float\n Thickness of lines\n markersize : float\n Size of markers\n hdi_prob : float\n Probability for the highest density interval. Defaults to 0.94\n point_estimate : Optional[str]\n Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.\n Defaults to 'auto' i.e. it falls back to default set in rcParams.\n shade : float\n Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1\n (opaque). Defaults to 0.\n ax : matplotlib axes\n \"\"\"\n if vec.dtype.kind == \"f\":\n if hdi_prob != 1:\n hdi_ = hdi(vec, hdi_prob, multimodal=False)\n new_vec = vec[(vec >= hdi_[0]) & (vec <= hdi_[1])]\n else:\n new_vec = vec\n\n density, xmin, xmax = _fast_kde(new_vec, bw=bw)\n density *= hdi_prob\n x = np.linspace(xmin, xmax, len(density))\n ymin = density[0]\n ymax = density[-1]\n\n if outline:\n ax.plot(x, density, color=color, lw=linewidth)\n ax.plot([xmin, xmin], [-ymin / 100, ymin], color=color, ls=\"-\", lw=linewidth)\n ax.plot([xmax, xmax], [-ymax / 100, ymax], color=color, ls=\"-\", lw=linewidth)\n\n if shade:\n ax.fill_between(x, density, color=color, alpha=shade)\n\n else:\n xmin, xmax = hdi(vec, hdi_prob, multimodal=False)\n bins = get_bins(vec)\n if outline:\n ax.hist(vec, bins=bins, color=color, histtype=\"step\", align=\"left\")\n if shade:\n ax.hist(vec, bins=bins, color=color, alpha=shade)\n\n if hdi_markers:\n ax.plot(xmin, 0, hdi_markers, color=color, markeredgecolor=\"k\", markersize=markersize)\n ax.plot(xmax, 0, hdi_markers, color=color, markeredgecolor=\"k\", markersize=markersize)\n\n if point_estimate is not None:\n est = calculate_point_estimate(point_estimate, vec, bw)\n ax.plot(est, 0, \"o\", color=color, markeredgecolor=\"k\", markersize=markersize)\n\n ax.set_yticks([])\n ax.set_title(vname, fontsize=titlesize, wrap=True)\n for pos in [\"left\", \"right\", \"top\"]:\n ax.spines[pos].set_visible(False)\n ax.tick_params(labelsize=xt_labelsize)\n", "path": "arviz/plots/backends/matplotlib/densityplot.py"}], "after_files": [{"content": "\"\"\"Matplotlib Densityplot.\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom . import backend_show\nfrom ....stats import hdi\nfrom ...plot_utils import (\n make_label,\n _create_axes_grid,\n calculate_point_estimate,\n)\nfrom ....numeric_utils import _fast_kde, get_bins\n\n\ndef plot_density(\n ax,\n all_labels,\n to_plot,\n colors,\n bw,\n figsize,\n length_plotters,\n rows,\n cols,\n titlesize,\n xt_labelsize,\n linewidth,\n markersize,\n hdi_prob,\n point_estimate,\n hdi_markers,\n outline,\n shade,\n n_data,\n data_labels,\n backend_kwargs,\n show,\n):\n \"\"\"Matplotlib densityplot.\"\"\"\n if ax is None:\n _, ax = _create_axes_grid(\n length_plotters,\n rows,\n cols,\n figsize=figsize,\n squeeze=False,\n backend=\"matplotlib\",\n backend_kwargs=backend_kwargs,\n )\n else:\n ax = np.atleast_2d(ax)\n\n axis_map = {label: ax_ for label, ax_ in zip(all_labels, np.ravel(ax))}\n\n for m_idx, plotters in enumerate(to_plot):\n for var_name, selection, values in plotters:\n label = make_label(var_name, selection)\n _d_helper(\n values.flatten(),\n label,\n colors[m_idx],\n bw,\n titlesize,\n xt_labelsize,\n linewidth,\n markersize,\n hdi_prob,\n point_estimate,\n hdi_markers,\n outline,\n shade,\n axis_map[label],\n )\n\n if n_data > 1:\n for m_idx, label in enumerate(data_labels):\n ax.item(0).plot([], label=label, c=colors[m_idx], markersize=markersize)\n ax.item(0).legend(fontsize=xt_labelsize)\n\n if backend_show(show):\n plt.show()\n\n return ax\n\n\ndef _d_helper(\n vec,\n vname,\n color,\n bw,\n titlesize,\n xt_labelsize,\n linewidth,\n markersize,\n hdi_prob,\n point_estimate,\n hdi_markers,\n outline,\n shade,\n ax,\n):\n \"\"\"Plot an individual dimension.\n\n Parameters\n ----------\n vec : array\n 1D array from trace\n vname : str\n variable name\n color : str\n matplotlib color\n bw : float\n Bandwidth scaling factor. Should be larger than 0. The higher this number the smoother the\n KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule of thumb\n (the default used rule by SciPy).\n titlesize : float\n font size for title\n xt_labelsize : float\n fontsize for xticks\n linewidth : float\n Thickness of lines\n markersize : float\n Size of markers\n hdi_prob : float\n Probability for the highest density interval. Defaults to 0.94\n point_estimate : Optional[str]\n Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.\n Defaults to 'auto' i.e. it falls back to default set in rcParams.\n shade : float\n Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1\n (opaque). Defaults to 0.\n ax : matplotlib axes\n \"\"\"\n if vec.dtype.kind == \"f\":\n if hdi_prob != 1:\n hdi_ = hdi(vec, hdi_prob, multimodal=False)\n new_vec = vec[(vec >= hdi_[0]) & (vec <= hdi_[1])]\n else:\n new_vec = vec\n\n density, xmin, xmax = _fast_kde(new_vec, bw=bw)\n density *= hdi_prob\n x = np.linspace(xmin, xmax, len(density))\n ymin = density[0]\n ymax = density[-1]\n\n if outline:\n ax.plot(x, density, color=color, lw=linewidth)\n ax.plot([xmin, xmin], [-ymin / 100, ymin], color=color, ls=\"-\", lw=linewidth)\n ax.plot([xmax, xmax], [-ymax / 100, ymax], color=color, ls=\"-\", lw=linewidth)\n\n if shade:\n ax.fill_between(x, density, color=color, alpha=shade)\n\n else:\n xmin, xmax = hdi(vec, hdi_prob, multimodal=False)\n bins = get_bins(vec)\n if outline:\n ax.hist(vec, bins=bins, color=color, histtype=\"step\", align=\"left\")\n if shade:\n ax.hist(vec, bins=bins, color=color, alpha=shade)\n\n if hdi_markers:\n ax.plot(xmin, 0, hdi_markers, color=color, markeredgecolor=\"k\", markersize=markersize)\n ax.plot(xmax, 0, hdi_markers, color=color, markeredgecolor=\"k\", markersize=markersize)\n\n if point_estimate is not None:\n est = calculate_point_estimate(point_estimate, vec, bw)\n ax.plot(est, 0, \"o\", color=color, markeredgecolor=\"k\", markersize=markersize)\n\n ax.set_yticks([])\n ax.set_title(vname, fontsize=titlesize, wrap=True)\n for pos in [\"left\", \"right\", \"top\"]:\n ax.spines[pos].set_visible(False)\n ax.tick_params(labelsize=xt_labelsize)\n", "path": "arviz/plots/backends/matplotlib/densityplot.py"}]} | 2,581 | 179 |
gh_patches_debug_3388 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-9057 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo (?) in Aggr.Aggregation's description
### 📚 Describe the documentation issue
First of all, I would like to thank everyone who has helped me in the last weeks to get going with Pytorch-Geometric. I really appreciate it.
There might be a minor typo in the documentation regarding the abstract class `aggr.Aggregation`.
https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.aggr.Aggregation.html#torch_geometric.nn.aggr.Aggregation
See second code box:
```python
# Feature matrix holding 10 elements with 64 features each:
x = torch.randn(10, 64)
# Define the boundary indices for three sets:
ptr = torch.tensor([0, 4, 7, 10])
output = aggr(x, ptr=ptr) # Output shape: [4, 64]
```
### Suggest a potential alternative/fix
I believe it should be `# Output shape: [3, 64]` in the last line. It says *three sets* just above. The previous example on indices also has output shape `[3, 64]`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch_geometric/nn/aggr/base.py`
Content:
```
1 from typing import Final, Optional, Tuple
2
3 import torch
4 from torch import Tensor
5
6 from torch_geometric.experimental import disable_dynamic_shapes
7 from torch_geometric.utils import scatter, segment, to_dense_batch
8
9
10 class Aggregation(torch.nn.Module):
11 r"""An abstract base class for implementing custom aggregations.
12
13 Aggregation can be either performed via an :obj:`index` vector, which
14 defines the mapping from input elements to their location in the output:
15
16 |
17
18 .. image:: https://raw.githubusercontent.com/rusty1s/pytorch_scatter/
19 master/docs/source/_figures/add.svg?sanitize=true
20 :align: center
21 :width: 400px
22
23 |
24
25 Notably, :obj:`index` does not have to be sorted (for most aggregation
26 operators):
27
28 .. code-block::
29
30 # Feature matrix holding 10 elements with 64 features each:
31 x = torch.randn(10, 64)
32
33 # Assign each element to one of three sets:
34 index = torch.tensor([0, 0, 1, 0, 2, 0, 2, 1, 0, 2])
35
36 output = aggr(x, index) # Output shape: [3, 64]
37
38 Alternatively, aggregation can be achieved via a "compressed" index vector
39 called :obj:`ptr`. Here, elements within the same set need to be grouped
40 together in the input, and :obj:`ptr` defines their boundaries:
41
42 .. code-block::
43
44 # Feature matrix holding 10 elements with 64 features each:
45 x = torch.randn(10, 64)
46
47 # Define the boundary indices for three sets:
48 ptr = torch.tensor([0, 4, 7, 10])
49
50 output = aggr(x, ptr=ptr) # Output shape: [4, 64]
51
52 Note that at least one of :obj:`index` or :obj:`ptr` must be defined.
53
54 Shapes:
55 - **input:**
56 node features :math:`(*, |\mathcal{V}|, F_{in})` or edge features
57 :math:`(*, |\mathcal{E}|, F_{in})`,
58 index vector :math:`(|\mathcal{V}|)` or :math:`(|\mathcal{E}|)`,
59 - **output:** graph features :math:`(*, |\mathcal{G}|, F_{out})` or
60 node features :math:`(*, |\mathcal{V}|, F_{out})`
61 """
62 def __init__(self) -> None:
63 super().__init__()
64
65 self._deterministic: Final[bool] = (
66 torch.are_deterministic_algorithms_enabled()
67 or torch.is_deterministic_algorithms_warn_only_enabled())
68
69 def forward(
70 self,
71 x: Tensor,
72 index: Optional[Tensor] = None,
73 ptr: Optional[Tensor] = None,
74 dim_size: Optional[int] = None,
75 dim: int = -2,
76 max_num_elements: Optional[int] = None,
77 ) -> Tensor:
78 r"""Forward pass.
79
80 Args:
81 x (torch.Tensor): The source tensor.
82 index (torch.Tensor, optional): The indices of elements for
83 applying the aggregation.
84 One of :obj:`index` or :obj:`ptr` must be defined.
85 (default: :obj:`None`)
86 ptr (torch.Tensor, optional): If given, computes the aggregation
87 based on sorted inputs in CSR representation.
88 One of :obj:`index` or :obj:`ptr` must be defined.
89 (default: :obj:`None`)
90 dim_size (int, optional): The size of the output tensor at
91 dimension :obj:`dim` after aggregation. (default: :obj:`None`)
92 dim (int, optional): The dimension in which to aggregate.
93 (default: :obj:`-2`)
94 max_num_elements: (int, optional): The maximum number of elements
95 within a single aggregation group. (default: :obj:`None`)
96 """
97 pass
98
99 def reset_parameters(self):
100 r"""Resets all learnable parameters of the module."""
101 pass
102
103 @disable_dynamic_shapes(required_args=['dim_size'])
104 def __call__(
105 self,
106 x: Tensor,
107 index: Optional[Tensor] = None,
108 ptr: Optional[Tensor] = None,
109 dim_size: Optional[int] = None,
110 dim: int = -2,
111 **kwargs,
112 ) -> Tensor:
113
114 if dim >= x.dim() or dim < -x.dim():
115 raise ValueError(f"Encountered invalid dimension '{dim}' of "
116 f"source tensor with {x.dim()} dimensions")
117
118 if index is None and ptr is None:
119 index = x.new_zeros(x.size(dim), dtype=torch.long)
120
121 if ptr is not None:
122 if dim_size is None:
123 dim_size = ptr.numel() - 1
124 elif dim_size != ptr.numel() - 1:
125 raise ValueError(f"Encountered invalid 'dim_size' (got "
126 f"'{dim_size}' but expected "
127 f"'{ptr.numel() - 1}')")
128
129 if index is not None and dim_size is None:
130 dim_size = int(index.max()) + 1 if index.numel() > 0 else 0
131
132 try:
133 return super().__call__(x, index=index, ptr=ptr, dim_size=dim_size,
134 dim=dim, **kwargs)
135 except (IndexError, RuntimeError) as e:
136 if index is not None:
137 if index.numel() > 0 and dim_size <= int(index.max()):
138 raise ValueError(f"Encountered invalid 'dim_size' (got "
139 f"'{dim_size}' but expected "
140 f">= '{int(index.max()) + 1}')")
141 raise e
142
143 def __repr__(self) -> str:
144 return f'{self.__class__.__name__}()'
145
146 # Assertions ##############################################################
147
148 def assert_index_present(self, index: Optional[Tensor]):
149 # TODO Currently, not all aggregators support `ptr`. This assert helps
150 # to ensure that we require `index` to be passed to the computation:
151 if index is None:
152 raise NotImplementedError(
153 "Aggregation requires 'index' to be specified")
154
155 def assert_sorted_index(self, index: Optional[Tensor]):
156 if index is not None and not torch.all(index[:-1] <= index[1:]):
157 raise ValueError("Can not perform aggregation since the 'index' "
158 "tensor is not sorted. Specifically, if you use "
159 "this aggregation as part of 'MessagePassing`, "
160 "ensure that 'edge_index' is sorted by "
161 "destination nodes, e.g., by calling "
162 "`data.sort(sort_by_row=False)`")
163
164 def assert_two_dimensional_input(self, x: Tensor, dim: int):
165 if x.dim() != 2:
166 raise ValueError(f"Aggregation requires two-dimensional inputs "
167 f"(got '{x.dim()}')")
168
169 if dim not in [-2, 0]:
170 raise ValueError(f"Aggregation needs to perform aggregation in "
171 f"first dimension (got '{dim}')")
172
173 # Helper methods ##########################################################
174
175 def reduce(self, x: Tensor, index: Optional[Tensor] = None,
176 ptr: Optional[Tensor] = None, dim_size: Optional[int] = None,
177 dim: int = -2, reduce: str = 'sum') -> Tensor:
178
179 if ptr is not None:
180 if index is None or self._deterministic:
181 ptr = expand_left(ptr, dim, dims=x.dim())
182 return segment(x, ptr, reduce=reduce)
183
184 if index is None:
185 raise RuntimeError("Aggregation requires 'index' to be specified")
186
187 return scatter(x, index, dim, dim_size, reduce)
188
189 def to_dense_batch(
190 self,
191 x: Tensor,
192 index: Optional[Tensor] = None,
193 ptr: Optional[Tensor] = None,
194 dim_size: Optional[int] = None,
195 dim: int = -2,
196 fill_value: float = 0.0,
197 max_num_elements: Optional[int] = None,
198 ) -> Tuple[Tensor, Tensor]:
199
200 # TODO Currently, `to_dense_batch` can only operate on `index`:
201 self.assert_index_present(index)
202 self.assert_sorted_index(index)
203 self.assert_two_dimensional_input(x, dim)
204
205 return to_dense_batch(
206 x,
207 index,
208 batch_size=dim_size,
209 fill_value=fill_value,
210 max_num_nodes=max_num_elements,
211 )
212
213
214 ###############################################################################
215
216
217 def expand_left(ptr: Tensor, dim: int, dims: int) -> Tensor:
218 for _ in range(dims + dim if dim < 0 else dim):
219 ptr = ptr.unsqueeze(0)
220 return ptr
221
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py
--- a/torch_geometric/nn/aggr/base.py
+++ b/torch_geometric/nn/aggr/base.py
@@ -47,7 +47,7 @@
# Define the boundary indices for three sets:
ptr = torch.tensor([0, 4, 7, 10])
- output = aggr(x, ptr=ptr) # Output shape: [4, 64]
+ output = aggr(x, ptr=ptr) # Output shape: [3, 64]
Note that at least one of :obj:`index` or :obj:`ptr` must be defined.
| {"golden_diff": "diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py\n--- a/torch_geometric/nn/aggr/base.py\n+++ b/torch_geometric/nn/aggr/base.py\n@@ -47,7 +47,7 @@\n # Define the boundary indices for three sets:\n ptr = torch.tensor([0, 4, 7, 10])\n \n- output = aggr(x, ptr=ptr) # Output shape: [4, 64]\n+ output = aggr(x, ptr=ptr) # Output shape: [3, 64]\n \n Note that at least one of :obj:`index` or :obj:`ptr` must be defined.\n", "issue": "Typo (?) in Aggr.Aggregation's description\n### \ud83d\udcda Describe the documentation issue\r\n\r\nFirst of all, I would like to thank everyone who has helped me in the last weeks to get going with Pytorch-Geometric. I really appreciate it.\r\n\r\nThere might be a minor typo in the documentation regarding the abstract class `aggr.Aggregation`. \r\n\r\nhttps://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.aggr.Aggregation.html#torch_geometric.nn.aggr.Aggregation\r\n\r\nSee second code box:\r\n\r\n```python\r\n# Feature matrix holding 10 elements with 64 features each:\r\nx = torch.randn(10, 64)\r\n\r\n# Define the boundary indices for three sets:\r\nptr = torch.tensor([0, 4, 7, 10])\r\n\r\noutput = aggr(x, ptr=ptr) # Output shape: [4, 64]\r\n```\r\n\r\n\r\n### Suggest a potential alternative/fix\r\n\r\nI believe it should be `# Output shape: [3, 64]` in the last line. It says *three sets* just above. The previous example on indices also has output shape `[3, 64]`.\n", "before_files": [{"content": "from typing import Final, Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_geometric.experimental import disable_dynamic_shapes\nfrom torch_geometric.utils import scatter, segment, to_dense_batch\n\n\nclass Aggregation(torch.nn.Module):\n r\"\"\"An abstract base class for implementing custom aggregations.\n\n Aggregation can be either performed via an :obj:`index` vector, which\n defines the mapping from input elements to their location in the output:\n\n |\n\n .. image:: https://raw.githubusercontent.com/rusty1s/pytorch_scatter/\n master/docs/source/_figures/add.svg?sanitize=true\n :align: center\n :width: 400px\n\n |\n\n Notably, :obj:`index` does not have to be sorted (for most aggregation\n operators):\n\n .. code-block::\n\n # Feature matrix holding 10 elements with 64 features each:\n x = torch.randn(10, 64)\n\n # Assign each element to one of three sets:\n index = torch.tensor([0, 0, 1, 0, 2, 0, 2, 1, 0, 2])\n\n output = aggr(x, index) # Output shape: [3, 64]\n\n Alternatively, aggregation can be achieved via a \"compressed\" index vector\n called :obj:`ptr`. Here, elements within the same set need to be grouped\n together in the input, and :obj:`ptr` defines their boundaries:\n\n .. code-block::\n\n # Feature matrix holding 10 elements with 64 features each:\n x = torch.randn(10, 64)\n\n # Define the boundary indices for three sets:\n ptr = torch.tensor([0, 4, 7, 10])\n\n output = aggr(x, ptr=ptr) # Output shape: [4, 64]\n\n Note that at least one of :obj:`index` or :obj:`ptr` must be defined.\n\n Shapes:\n - **input:**\n node features :math:`(*, |\\mathcal{V}|, F_{in})` or edge features\n :math:`(*, |\\mathcal{E}|, F_{in})`,\n index vector :math:`(|\\mathcal{V}|)` or :math:`(|\\mathcal{E}|)`,\n - **output:** graph features :math:`(*, |\\mathcal{G}|, F_{out})` or\n node features :math:`(*, |\\mathcal{V}|, F_{out})`\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n\n self._deterministic: Final[bool] = (\n torch.are_deterministic_algorithms_enabled()\n or torch.is_deterministic_algorithms_warn_only_enabled())\n\n def forward(\n self,\n x: Tensor,\n index: Optional[Tensor] = None,\n ptr: Optional[Tensor] = None,\n dim_size: Optional[int] = None,\n dim: int = -2,\n max_num_elements: Optional[int] = None,\n ) -> Tensor:\n r\"\"\"Forward pass.\n\n Args:\n x (torch.Tensor): The source tensor.\n index (torch.Tensor, optional): The indices of elements for\n applying the aggregation.\n One of :obj:`index` or :obj:`ptr` must be defined.\n (default: :obj:`None`)\n ptr (torch.Tensor, optional): If given, computes the aggregation\n based on sorted inputs in CSR representation.\n One of :obj:`index` or :obj:`ptr` must be defined.\n (default: :obj:`None`)\n dim_size (int, optional): The size of the output tensor at\n dimension :obj:`dim` after aggregation. (default: :obj:`None`)\n dim (int, optional): The dimension in which to aggregate.\n (default: :obj:`-2`)\n max_num_elements: (int, optional): The maximum number of elements\n within a single aggregation group. (default: :obj:`None`)\n \"\"\"\n pass\n\n def reset_parameters(self):\n r\"\"\"Resets all learnable parameters of the module.\"\"\"\n pass\n\n @disable_dynamic_shapes(required_args=['dim_size'])\n def __call__(\n self,\n x: Tensor,\n index: Optional[Tensor] = None,\n ptr: Optional[Tensor] = None,\n dim_size: Optional[int] = None,\n dim: int = -2,\n **kwargs,\n ) -> Tensor:\n\n if dim >= x.dim() or dim < -x.dim():\n raise ValueError(f\"Encountered invalid dimension '{dim}' of \"\n f\"source tensor with {x.dim()} dimensions\")\n\n if index is None and ptr is None:\n index = x.new_zeros(x.size(dim), dtype=torch.long)\n\n if ptr is not None:\n if dim_size is None:\n dim_size = ptr.numel() - 1\n elif dim_size != ptr.numel() - 1:\n raise ValueError(f\"Encountered invalid 'dim_size' (got \"\n f\"'{dim_size}' but expected \"\n f\"'{ptr.numel() - 1}')\")\n\n if index is not None and dim_size is None:\n dim_size = int(index.max()) + 1 if index.numel() > 0 else 0\n\n try:\n return super().__call__(x, index=index, ptr=ptr, dim_size=dim_size,\n dim=dim, **kwargs)\n except (IndexError, RuntimeError) as e:\n if index is not None:\n if index.numel() > 0 and dim_size <= int(index.max()):\n raise ValueError(f\"Encountered invalid 'dim_size' (got \"\n f\"'{dim_size}' but expected \"\n f\">= '{int(index.max()) + 1}')\")\n raise e\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}()'\n\n # Assertions ##############################################################\n\n def assert_index_present(self, index: Optional[Tensor]):\n # TODO Currently, not all aggregators support `ptr`. This assert helps\n # to ensure that we require `index` to be passed to the computation:\n if index is None:\n raise NotImplementedError(\n \"Aggregation requires 'index' to be specified\")\n\n def assert_sorted_index(self, index: Optional[Tensor]):\n if index is not None and not torch.all(index[:-1] <= index[1:]):\n raise ValueError(\"Can not perform aggregation since the 'index' \"\n \"tensor is not sorted. Specifically, if you use \"\n \"this aggregation as part of 'MessagePassing`, \"\n \"ensure that 'edge_index' is sorted by \"\n \"destination nodes, e.g., by calling \"\n \"`data.sort(sort_by_row=False)`\")\n\n def assert_two_dimensional_input(self, x: Tensor, dim: int):\n if x.dim() != 2:\n raise ValueError(f\"Aggregation requires two-dimensional inputs \"\n f\"(got '{x.dim()}')\")\n\n if dim not in [-2, 0]:\n raise ValueError(f\"Aggregation needs to perform aggregation in \"\n f\"first dimension (got '{dim}')\")\n\n # Helper methods ##########################################################\n\n def reduce(self, x: Tensor, index: Optional[Tensor] = None,\n ptr: Optional[Tensor] = None, dim_size: Optional[int] = None,\n dim: int = -2, reduce: str = 'sum') -> Tensor:\n\n if ptr is not None:\n if index is None or self._deterministic:\n ptr = expand_left(ptr, dim, dims=x.dim())\n return segment(x, ptr, reduce=reduce)\n\n if index is None:\n raise RuntimeError(\"Aggregation requires 'index' to be specified\")\n\n return scatter(x, index, dim, dim_size, reduce)\n\n def to_dense_batch(\n self,\n x: Tensor,\n index: Optional[Tensor] = None,\n ptr: Optional[Tensor] = None,\n dim_size: Optional[int] = None,\n dim: int = -2,\n fill_value: float = 0.0,\n max_num_elements: Optional[int] = None,\n ) -> Tuple[Tensor, Tensor]:\n\n # TODO Currently, `to_dense_batch` can only operate on `index`:\n self.assert_index_present(index)\n self.assert_sorted_index(index)\n self.assert_two_dimensional_input(x, dim)\n\n return to_dense_batch(\n x,\n index,\n batch_size=dim_size,\n fill_value=fill_value,\n max_num_nodes=max_num_elements,\n )\n\n\n###############################################################################\n\n\ndef expand_left(ptr: Tensor, dim: int, dims: int) -> Tensor:\n for _ in range(dims + dim if dim < 0 else dim):\n ptr = ptr.unsqueeze(0)\n return ptr\n", "path": "torch_geometric/nn/aggr/base.py"}], "after_files": [{"content": "from typing import Final, Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_geometric.experimental import disable_dynamic_shapes\nfrom torch_geometric.utils import scatter, segment, to_dense_batch\n\n\nclass Aggregation(torch.nn.Module):\n r\"\"\"An abstract base class for implementing custom aggregations.\n\n Aggregation can be either performed via an :obj:`index` vector, which\n defines the mapping from input elements to their location in the output:\n\n |\n\n .. image:: https://raw.githubusercontent.com/rusty1s/pytorch_scatter/\n master/docs/source/_figures/add.svg?sanitize=true\n :align: center\n :width: 400px\n\n |\n\n Notably, :obj:`index` does not have to be sorted (for most aggregation\n operators):\n\n .. code-block::\n\n # Feature matrix holding 10 elements with 64 features each:\n x = torch.randn(10, 64)\n\n # Assign each element to one of three sets:\n index = torch.tensor([0, 0, 1, 0, 2, 0, 2, 1, 0, 2])\n\n output = aggr(x, index) # Output shape: [3, 64]\n\n Alternatively, aggregation can be achieved via a \"compressed\" index vector\n called :obj:`ptr`. Here, elements within the same set need to be grouped\n together in the input, and :obj:`ptr` defines their boundaries:\n\n .. code-block::\n\n # Feature matrix holding 10 elements with 64 features each:\n x = torch.randn(10, 64)\n\n # Define the boundary indices for three sets:\n ptr = torch.tensor([0, 4, 7, 10])\n\n output = aggr(x, ptr=ptr) # Output shape: [3, 64]\n\n Note that at least one of :obj:`index` or :obj:`ptr` must be defined.\n\n Shapes:\n - **input:**\n node features :math:`(*, |\\mathcal{V}|, F_{in})` or edge features\n :math:`(*, |\\mathcal{E}|, F_{in})`,\n index vector :math:`(|\\mathcal{V}|)` or :math:`(|\\mathcal{E}|)`,\n - **output:** graph features :math:`(*, |\\mathcal{G}|, F_{out})` or\n node features :math:`(*, |\\mathcal{V}|, F_{out})`\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n\n self._deterministic: Final[bool] = (\n torch.are_deterministic_algorithms_enabled()\n or torch.is_deterministic_algorithms_warn_only_enabled())\n\n def forward(\n self,\n x: Tensor,\n index: Optional[Tensor] = None,\n ptr: Optional[Tensor] = None,\n dim_size: Optional[int] = None,\n dim: int = -2,\n max_num_elements: Optional[int] = None,\n ) -> Tensor:\n r\"\"\"Forward pass.\n\n Args:\n x (torch.Tensor): The source tensor.\n index (torch.Tensor, optional): The indices of elements for\n applying the aggregation.\n One of :obj:`index` or :obj:`ptr` must be defined.\n (default: :obj:`None`)\n ptr (torch.Tensor, optional): If given, computes the aggregation\n based on sorted inputs in CSR representation.\n One of :obj:`index` or :obj:`ptr` must be defined.\n (default: :obj:`None`)\n dim_size (int, optional): The size of the output tensor at\n dimension :obj:`dim` after aggregation. (default: :obj:`None`)\n dim (int, optional): The dimension in which to aggregate.\n (default: :obj:`-2`)\n max_num_elements: (int, optional): The maximum number of elements\n within a single aggregation group. (default: :obj:`None`)\n \"\"\"\n pass\n\n def reset_parameters(self):\n r\"\"\"Resets all learnable parameters of the module.\"\"\"\n pass\n\n @disable_dynamic_shapes(required_args=['dim_size'])\n def __call__(\n self,\n x: Tensor,\n index: Optional[Tensor] = None,\n ptr: Optional[Tensor] = None,\n dim_size: Optional[int] = None,\n dim: int = -2,\n **kwargs,\n ) -> Tensor:\n\n if dim >= x.dim() or dim < -x.dim():\n raise ValueError(f\"Encountered invalid dimension '{dim}' of \"\n f\"source tensor with {x.dim()} dimensions\")\n\n if index is None and ptr is None:\n index = x.new_zeros(x.size(dim), dtype=torch.long)\n\n if ptr is not None:\n if dim_size is None:\n dim_size = ptr.numel() - 1\n elif dim_size != ptr.numel() - 1:\n raise ValueError(f\"Encountered invalid 'dim_size' (got \"\n f\"'{dim_size}' but expected \"\n f\"'{ptr.numel() - 1}')\")\n\n if index is not None and dim_size is None:\n dim_size = int(index.max()) + 1 if index.numel() > 0 else 0\n\n try:\n return super().__call__(x, index=index, ptr=ptr, dim_size=dim_size,\n dim=dim, **kwargs)\n except (IndexError, RuntimeError) as e:\n if index is not None:\n if index.numel() > 0 and dim_size <= int(index.max()):\n raise ValueError(f\"Encountered invalid 'dim_size' (got \"\n f\"'{dim_size}' but expected \"\n f\">= '{int(index.max()) + 1}')\")\n raise e\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}()'\n\n # Assertions ##############################################################\n\n def assert_index_present(self, index: Optional[Tensor]):\n # TODO Currently, not all aggregators support `ptr`. This assert helps\n # to ensure that we require `index` to be passed to the computation:\n if index is None:\n raise NotImplementedError(\n \"Aggregation requires 'index' to be specified\")\n\n def assert_sorted_index(self, index: Optional[Tensor]):\n if index is not None and not torch.all(index[:-1] <= index[1:]):\n raise ValueError(\"Can not perform aggregation since the 'index' \"\n \"tensor is not sorted. Specifically, if you use \"\n \"this aggregation as part of 'MessagePassing`, \"\n \"ensure that 'edge_index' is sorted by \"\n \"destination nodes, e.g., by calling \"\n \"`data.sort(sort_by_row=False)`\")\n\n def assert_two_dimensional_input(self, x: Tensor, dim: int):\n if x.dim() != 2:\n raise ValueError(f\"Aggregation requires two-dimensional inputs \"\n f\"(got '{x.dim()}')\")\n\n if dim not in [-2, 0]:\n raise ValueError(f\"Aggregation needs to perform aggregation in \"\n f\"first dimension (got '{dim}')\")\n\n # Helper methods ##########################################################\n\n def reduce(self, x: Tensor, index: Optional[Tensor] = None,\n ptr: Optional[Tensor] = None, dim_size: Optional[int] = None,\n dim: int = -2, reduce: str = 'sum') -> Tensor:\n\n if ptr is not None:\n if index is None or self._deterministic:\n ptr = expand_left(ptr, dim, dims=x.dim())\n return segment(x, ptr, reduce=reduce)\n\n if index is None:\n raise RuntimeError(\"Aggregation requires 'index' to be specified\")\n\n return scatter(x, index, dim, dim_size, reduce)\n\n def to_dense_batch(\n self,\n x: Tensor,\n index: Optional[Tensor] = None,\n ptr: Optional[Tensor] = None,\n dim_size: Optional[int] = None,\n dim: int = -2,\n fill_value: float = 0.0,\n max_num_elements: Optional[int] = None,\n ) -> Tuple[Tensor, Tensor]:\n\n # TODO Currently, `to_dense_batch` can only operate on `index`:\n self.assert_index_present(index)\n self.assert_sorted_index(index)\n self.assert_two_dimensional_input(x, dim)\n\n return to_dense_batch(\n x,\n index,\n batch_size=dim_size,\n fill_value=fill_value,\n max_num_nodes=max_num_elements,\n )\n\n\n###############################################################################\n\n\ndef expand_left(ptr: Tensor, dim: int, dims: int) -> Tensor:\n for _ in range(dims + dim if dim < 0 else dim):\n ptr = ptr.unsqueeze(0)\n return ptr\n", "path": "torch_geometric/nn/aggr/base.py"}]} | 3,015 | 166 |
gh_patches_debug_7483 | rasdani/github-patches | git_diff | akvo__akvo-rsr-4639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Indicator update user should not change when update rejected or approved
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/indicator_period_data.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 import json
8 import os
9
10 from akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment, Project
11 from akvo.rest.models import TastyTokenAuthentication, JWTAuthentication
12 from akvo.rsr.models.result.utils import QUANTITATIVE, PERCENTAGE_MEASURE
13
14 from ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,
15 IndicatorPeriodDataCommentSerializer)
16 from ..viewsets import PublicProjectViewSet
17
18 from django.shortcuts import get_object_or_404
19 from django.http import HttpResponseBadRequest, HttpResponseForbidden
20 from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION
21 from django.contrib.contenttypes.models import ContentType
22 from rest_framework import status
23 from rest_framework.authentication import SessionAuthentication
24 from rest_framework.decorators import api_view, authentication_classes
25 from rest_framework.response import Response
26 from rest_framework.utils.encoders import JSONEncoder
27
28
29 class IndicatorPeriodDataViewSet(PublicProjectViewSet):
30 """
31 """
32 queryset = IndicatorPeriodData.objects.select_related('user', 'approved_by').all()
33 serializer_class = IndicatorPeriodDataSerializer
34
35 project_relation = 'period__indicator__result__project__'
36
37 def get_queryset(self):
38 queryset = super(IndicatorPeriodDataViewSet, self).get_queryset()
39 return IndicatorPeriodData.get_user_viewable_updates(
40 queryset, self.request.user
41 )
42
43 def perform_create(self, serializer):
44 serializer.save(user=self.request.user)
45
46
47 class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):
48 """
49 """
50 authentication_classes = (SessionAuthentication, TastyTokenAuthentication, JWTAuthentication)
51
52 queryset = IndicatorPeriodData.objects.select_related(
53 'period',
54 'user',
55 'approved_by',
56 ).prefetch_related(
57 'comments',
58 'disaggregations',
59 ).all()
60 serializer_class = IndicatorPeriodDataFrameworkSerializer
61 project_relation = 'period__indicator__result__project__'
62
63 def get_queryset(self):
64 queryset = getattr(self, '_c_queryset', None)
65 if queryset is None:
66 queryset = super(IndicatorPeriodDataFrameworkViewSet, self).get_queryset()
67 queryset = IndicatorPeriodData.get_user_viewable_updates(
68 queryset, self.request.user
69 )
70 self._c_queryset = queryset
71
72 return queryset
73
74 def perform_create(self, serializer):
75 data = {key: value for key, value in serializer.validated_data.items() if key not in ['period', 'files', 'photos']}
76 if len(serializer._disaggregations_data) > 0:
77 data['disaggregations'] = [
78 {key: value for key, value in dsg.items() if key in ['id', 'dimension_value', 'value', 'numerator', 'denominator']}
79 for dsg in serializer._disaggregations_data
80 ]
81 user = self.request.user
82 serializer.save(user=user)
83 instance = serializer.instance
84 log_data = {'audit_trail': True, 'data': data}
85 LogEntry.objects.log_action(
86 user_id=user.id,
87 content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,
88 object_id=instance.id,
89 object_repr=str(instance),
90 action_flag=ADDITION,
91 change_message=json.dumps(log_data, cls=JSONEncoder)
92 )
93
94 def perform_update(self, serializer):
95 instance = serializer.instance
96 data = {
97 key: value
98 for key, value in serializer.validated_data.items()
99 if key not in ['period', 'files', 'photos'] and (key == 'comments' or getattr(instance, key) != value)
100 }
101 if len(serializer._disaggregations_data) > 0:
102 indicator = instance.period.indicator
103 is_percentage = indicator.type == QUANTITATIVE and indicator.measure == PERCENTAGE_MEASURE
104 dsg_attrs = ['id', 'dimension_value', 'numerator', 'denominator'] if is_percentage else ['id', 'dimension_value', 'value']
105 data['disaggregations'] = [
106 {key: value for key, value in dsg.items() if key in dsg_attrs}
107 for dsg in serializer._disaggregations_data
108 ]
109 user = self.request.user
110 serializer.save(user=user)
111 log_data = {'audit_trail': True, 'data': data}
112 LogEntry.objects.log_action(
113 user_id=user.id,
114 content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,
115 object_id=instance.id,
116 object_repr=str(instance),
117 action_flag=CHANGE,
118 change_message=json.dumps(log_data, cls=JSONEncoder)
119 )
120
121 def perform_destroy(self, instance):
122 object_id = instance.id
123 object_repr = str(instance)
124 super().perform_destroy(instance)
125 LogEntry.objects.log_action(
126 user_id=self.request.user.id,
127 content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,
128 object_id=object_id,
129 object_repr=object_repr,
130 action_flag=DELETION,
131 change_message=json.dumps({'audit_trail': True})
132 )
133
134
135 class IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):
136 """
137 """
138 authentication_classes = (SessionAuthentication, TastyTokenAuthentication, JWTAuthentication)
139
140 # TODO: Is there more optimization possible?
141 queryset = IndicatorPeriodDataComment.objects.select_related(
142 'user'
143 ).prefetch_related(
144 'user__employers', 'user__employers__organisation'
145 )
146 serializer_class = IndicatorPeriodDataCommentSerializer
147 project_relation = 'data__period__indicator__result__project__'
148
149 def perform_create(self, serializer):
150 serializer.save(user=self.request.user)
151
152
153 @api_view(['POST', 'DELETE'])
154 @authentication_classes([SessionAuthentication, TastyTokenAuthentication, JWTAuthentication])
155 def period_update_files(request, update_pk, file_pk=None):
156 update = get_object_or_404(IndicatorPeriodData, pk=update_pk)
157 user = request.user
158 if not user.has_perm('rsr.change_indicatorperioddata', update):
159 return Response({'error': 'User has no permission to add/remove files'}, status=status.HTTP_403_FORBIDDEN)
160
161 if request.method == 'POST' and not file_pk:
162 serializer = IndicatorPeriodDataFrameworkSerializer(instance=update, data=request.data, partial=True)
163 serializer.is_valid(raise_exception=True)
164 files = [f"Uploaded file \"{file.name}\"" for file in serializer.validated_data.get('files', [])]
165 serializer.save(user=user)
166 log_data = {'audit_trail': True, 'data': {'files': files}}
167 LogEntry.objects.log_action(
168 user_id=user.id,
169 content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,
170 object_id=update.id,
171 object_repr=str(update),
172 action_flag=CHANGE,
173 change_message=json.dumps(log_data)
174 )
175 return Response(serializer.data['file_set'])
176
177 if request.method == 'DELETE' and file_pk:
178 file = update.indicatorperioddatafile_set.get(pk=file_pk)
179 filename = os.path.basename(file.file.name)
180 file.delete()
181 update.user = user
182 update.save(update_fields=['user'])
183 log_data = {'audit_trail': True, 'data': {'files': [f"Removed file \"{filename}\""]}}
184 LogEntry.objects.log_action(
185 user_id=user.id,
186 content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,
187 object_id=update.id,
188 object_repr=str(update),
189 action_flag=CHANGE,
190 change_message=json.dumps(log_data)
191 )
192 return Response(status=status.HTTP_204_NO_CONTENT)
193
194 return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
195
196
197 @api_view(['POST', 'DELETE'])
198 @authentication_classes([SessionAuthentication, TastyTokenAuthentication, JWTAuthentication])
199 def period_update_photos(request, update_pk, photo_pk=None):
200 update = get_object_or_404(IndicatorPeriodData, pk=update_pk)
201 user = request.user
202 if user != update.user:
203 return Response({'error': 'User has no permission to add/remove photos'}, status=status.HTTP_403_FORBIDDEN)
204
205 if request.method == 'POST' and not photo_pk:
206 serializer = IndicatorPeriodDataFrameworkSerializer(instance=update, data=request.data, partial=True)
207 serializer.is_valid(raise_exception=True)
208 serializer.save(user=user)
209 return Response(serializer.data['photo_set'])
210
211 if request.method == 'DELETE' and photo_pk:
212 photo = update.indicatorperioddataphoto_set.get(pk=photo_pk)
213 photo.delete()
214 return Response(status=status.HTTP_204_NO_CONTENT)
215
216 return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
217
218
219 @api_view(['POST', 'DELETE'])
220 def indicator_upload_file(request, pk=None):
221 """
222 Special API call for directly uploading a file.
223
224 :param request; A Django request object.
225 :param pk; The primary key of an IndicatorPeriodData instance.
226 """
227 # Permissions
228 user = getattr(request, 'user', None)
229 if not user:
230 return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)
231 # TODO: Check if user is allowed to upload a file
232 # if not user.has_perm('rsr.change_project', update.period.indicator.result.project):
233 # return Response({'error': 'User has no permission to place an update'},
234 # status=status.HTTP_403_FORBIDDEN)
235
236 update = IndicatorPeriodData.objects.get(pk=pk)
237 if request.method == 'DELETE':
238 try:
239 if request.data['type'] == 'photo':
240 update.photo = ''
241 update.save(update_fields=['photo'])
242 return Response({}, status=status.HTTP_204_NO_CONTENT)
243 elif request.data['type'] == 'file':
244 update.file = ''
245 update.save(update_fields=['file'])
246 return Response({}, status=status.HTTP_204_NO_CONTENT)
247 except Exception as e:
248 return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)
249 else: # POST
250 upload_file = request.data['file']
251 try:
252 file_type = request.POST.copy()['type']
253 if file_type == 'photo':
254 update.photo = upload_file
255 update.save(update_fields=['photo'])
256 # Add photo member to be able to distinguish from file URL in new results version
257 # while keeping the old API
258 return Response({'file': update.photo.url, 'photo': update.photo.url})
259 elif file_type == 'file':
260 update.file = upload_file
261 update.save(update_fields=['file'])
262 return Response({'file': update.file.url})
263 except Exception as e:
264 return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)
265
266
267 @api_view(['POST'])
268 @authentication_classes([SessionAuthentication, TastyTokenAuthentication])
269 def set_updates_status(request, project_pk):
270 """Bulk update IndicatorPeriodData.status attributes of a project.
271 """
272 update_ids = request.data.get('updates', [])
273 status = request.data.get('status', None)
274 if len(update_ids) < 1 or status is None:
275 return HttpResponseBadRequest()
276 user = request.user
277 project = get_object_or_404(Project, pk=project_pk)
278 if not user.has_perm('rsr.change_project', project):
279 return HttpResponseForbidden()
280 IndicatorPeriodData.objects\
281 .filter(id__in=update_ids, period__indicator__result__project=project)\
282 .update(status=status)
283 log_data = {'audit_trail': True, 'data': {'status': status}}
284 for update_id in update_ids:
285 LogEntry.objects.log_action(
286 user_id=user.id,
287 content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,
288 object_id=update_id,
289 object_repr='IndicatorPeriodData',
290 action_flag=CHANGE,
291 change_message=json.dumps(log_data)
292 )
293 return Response({'success': True})
294
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py
--- a/akvo/rest/views/indicator_period_data.py
+++ b/akvo/rest/views/indicator_period_data.py
@@ -107,7 +107,9 @@
for dsg in serializer._disaggregations_data
]
user = self.request.user
- serializer.save(user=user)
+ status = data.get('status', None)
+ if status != 'R' and status != 'A':
+ serializer.save(user=user)
log_data = {'audit_trail': True, 'data': data}
LogEntry.objects.log_action(
user_id=user.id,
| {"golden_diff": "diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py\n--- a/akvo/rest/views/indicator_period_data.py\n+++ b/akvo/rest/views/indicator_period_data.py\n@@ -107,7 +107,9 @@\n for dsg in serializer._disaggregations_data\n ]\n user = self.request.user\n- serializer.save(user=user)\n+ status = data.get('status', None)\n+ if status != 'R' and status != 'A':\n+ serializer.save(user=user)\n log_data = {'audit_trail': True, 'data': data}\n LogEntry.objects.log_action(\n user_id=user.id,\n", "issue": "Indicator update user should not change when update rejected or approved\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport json\nimport os\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment, Project\nfrom akvo.rest.models import TastyTokenAuthentication, JWTAuthentication\nfrom akvo.rsr.models.result.utils import QUANTITATIVE, PERCENTAGE_MEASURE\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponseBadRequest, HttpResponseForbidden\nfrom django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION\nfrom django.contrib.contenttypes.models import ContentType\nfrom rest_framework import status\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.decorators import api_view, authentication_classes\nfrom rest_framework.response import Response\nfrom rest_framework.utils.encoders import JSONEncoder\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.select_related('user', 'approved_by').all()\n serializer_class = IndicatorPeriodDataSerializer\n\n project_relation = 'period__indicator__result__project__'\n\n def get_queryset(self):\n queryset = super(IndicatorPeriodDataViewSet, self).get_queryset()\n return IndicatorPeriodData.get_user_viewable_updates(\n queryset, self.request.user\n )\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n authentication_classes = (SessionAuthentication, TastyTokenAuthentication, JWTAuthentication)\n\n queryset = IndicatorPeriodData.objects.select_related(\n 'period',\n 'user',\n 'approved_by',\n ).prefetch_related(\n 'comments',\n 'disaggregations',\n ).all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n project_relation = 'period__indicator__result__project__'\n\n def get_queryset(self):\n queryset = getattr(self, '_c_queryset', None)\n if queryset is None:\n queryset = super(IndicatorPeriodDataFrameworkViewSet, self).get_queryset()\n queryset = IndicatorPeriodData.get_user_viewable_updates(\n queryset, self.request.user\n )\n self._c_queryset = queryset\n\n return queryset\n\n def perform_create(self, serializer):\n data = {key: value for key, value in serializer.validated_data.items() if key not in ['period', 'files', 'photos']}\n if len(serializer._disaggregations_data) > 0:\n data['disaggregations'] = [\n {key: value for key, value in dsg.items() if key in ['id', 'dimension_value', 'value', 'numerator', 'denominator']}\n for dsg in serializer._disaggregations_data\n ]\n user = self.request.user\n serializer.save(user=user)\n instance = serializer.instance\n log_data = {'audit_trail': True, 'data': data}\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=instance.id,\n object_repr=str(instance),\n action_flag=ADDITION,\n change_message=json.dumps(log_data, cls=JSONEncoder)\n )\n\n def perform_update(self, serializer):\n instance = serializer.instance\n data = {\n key: value\n for key, value in serializer.validated_data.items()\n if key not in ['period', 'files', 'photos'] and (key == 'comments' or getattr(instance, key) != value)\n }\n if len(serializer._disaggregations_data) > 0:\n indicator = instance.period.indicator\n is_percentage = indicator.type == QUANTITATIVE and indicator.measure == PERCENTAGE_MEASURE\n dsg_attrs = ['id', 'dimension_value', 'numerator', 'denominator'] if is_percentage else ['id', 'dimension_value', 'value']\n data['disaggregations'] = [\n {key: value for key, value in dsg.items() if key in dsg_attrs}\n for dsg in serializer._disaggregations_data\n ]\n user = self.request.user\n serializer.save(user=user)\n log_data = {'audit_trail': True, 'data': data}\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=instance.id,\n object_repr=str(instance),\n action_flag=CHANGE,\n change_message=json.dumps(log_data, cls=JSONEncoder)\n )\n\n def perform_destroy(self, instance):\n object_id = instance.id\n object_repr = str(instance)\n super().perform_destroy(instance)\n LogEntry.objects.log_action(\n user_id=self.request.user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=object_id,\n object_repr=object_repr,\n action_flag=DELETION,\n change_message=json.dumps({'audit_trail': True})\n )\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n authentication_classes = (SessionAuthentication, TastyTokenAuthentication, JWTAuthentication)\n\n # TODO: Is there more optimization possible?\n queryset = IndicatorPeriodDataComment.objects.select_related(\n 'user'\n ).prefetch_related(\n 'user__employers', 'user__employers__organisation'\n )\n serializer_class = IndicatorPeriodDataCommentSerializer\n project_relation = 'data__period__indicator__result__project__'\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\n@api_view(['POST', 'DELETE'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication, JWTAuthentication])\ndef period_update_files(request, update_pk, file_pk=None):\n update = get_object_or_404(IndicatorPeriodData, pk=update_pk)\n user = request.user\n if not user.has_perm('rsr.change_indicatorperioddata', update):\n return Response({'error': 'User has no permission to add/remove files'}, status=status.HTTP_403_FORBIDDEN)\n\n if request.method == 'POST' and not file_pk:\n serializer = IndicatorPeriodDataFrameworkSerializer(instance=update, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n files = [f\"Uploaded file \\\"{file.name}\\\"\" for file in serializer.validated_data.get('files', [])]\n serializer.save(user=user)\n log_data = {'audit_trail': True, 'data': {'files': files}}\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=update.id,\n object_repr=str(update),\n action_flag=CHANGE,\n change_message=json.dumps(log_data)\n )\n return Response(serializer.data['file_set'])\n\n if request.method == 'DELETE' and file_pk:\n file = update.indicatorperioddatafile_set.get(pk=file_pk)\n filename = os.path.basename(file.file.name)\n file.delete()\n update.user = user\n update.save(update_fields=['user'])\n log_data = {'audit_trail': True, 'data': {'files': [f\"Removed file \\\"{filename}\\\"\"]}}\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=update.id,\n object_repr=str(update),\n action_flag=CHANGE,\n change_message=json.dumps(log_data)\n )\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n@api_view(['POST', 'DELETE'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication, JWTAuthentication])\ndef period_update_photos(request, update_pk, photo_pk=None):\n update = get_object_or_404(IndicatorPeriodData, pk=update_pk)\n user = request.user\n if user != update.user:\n return Response({'error': 'User has no permission to add/remove photos'}, status=status.HTTP_403_FORBIDDEN)\n\n if request.method == 'POST' and not photo_pk:\n serializer = IndicatorPeriodDataFrameworkSerializer(instance=update, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save(user=user)\n return Response(serializer.data['photo_set'])\n\n if request.method == 'DELETE' and photo_pk:\n photo = update.indicatorperioddataphoto_set.get(pk=photo_pk)\n photo.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n@api_view(['POST', 'DELETE'])\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n # Permissions\n user = getattr(request, 'user', None)\n if not user:\n return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n # TODO: Check if user is allowed to upload a file\n # if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n # return Response({'error': 'User has no permission to place an update'},\n # status=status.HTTP_403_FORBIDDEN)\n\n update = IndicatorPeriodData.objects.get(pk=pk)\n if request.method == 'DELETE':\n try:\n if request.data['type'] == 'photo':\n update.photo = ''\n update.save(update_fields=['photo'])\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n elif request.data['type'] == 'file':\n update.file = ''\n update.save(update_fields=['file'])\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n else: # POST\n upload_file = request.data['file']\n try:\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n # Add photo member to be able to distinguish from file URL in new results version\n # while keeping the old API\n return Response({'file': update.photo.url, 'photo': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['POST'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication])\ndef set_updates_status(request, project_pk):\n \"\"\"Bulk update IndicatorPeriodData.status attributes of a project.\n \"\"\"\n update_ids = request.data.get('updates', [])\n status = request.data.get('status', None)\n if len(update_ids) < 1 or status is None:\n return HttpResponseBadRequest()\n user = request.user\n project = get_object_or_404(Project, pk=project_pk)\n if not user.has_perm('rsr.change_project', project):\n return HttpResponseForbidden()\n IndicatorPeriodData.objects\\\n .filter(id__in=update_ids, period__indicator__result__project=project)\\\n .update(status=status)\n log_data = {'audit_trail': True, 'data': {'status': status}}\n for update_id in update_ids:\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=update_id,\n object_repr='IndicatorPeriodData',\n action_flag=CHANGE,\n change_message=json.dumps(log_data)\n )\n return Response({'success': True})\n", "path": "akvo/rest/views/indicator_period_data.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport json\nimport os\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment, Project\nfrom akvo.rest.models import TastyTokenAuthentication, JWTAuthentication\nfrom akvo.rsr.models.result.utils import QUANTITATIVE, PERCENTAGE_MEASURE\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponseBadRequest, HttpResponseForbidden\nfrom django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION\nfrom django.contrib.contenttypes.models import ContentType\nfrom rest_framework import status\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.decorators import api_view, authentication_classes\nfrom rest_framework.response import Response\nfrom rest_framework.utils.encoders import JSONEncoder\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.select_related('user', 'approved_by').all()\n serializer_class = IndicatorPeriodDataSerializer\n\n project_relation = 'period__indicator__result__project__'\n\n def get_queryset(self):\n queryset = super(IndicatorPeriodDataViewSet, self).get_queryset()\n return IndicatorPeriodData.get_user_viewable_updates(\n queryset, self.request.user\n )\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n authentication_classes = (SessionAuthentication, TastyTokenAuthentication, JWTAuthentication)\n\n queryset = IndicatorPeriodData.objects.select_related(\n 'period',\n 'user',\n 'approved_by',\n ).prefetch_related(\n 'comments',\n 'disaggregations',\n ).all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n project_relation = 'period__indicator__result__project__'\n\n def get_queryset(self):\n queryset = getattr(self, '_c_queryset', None)\n if queryset is None:\n queryset = super(IndicatorPeriodDataFrameworkViewSet, self).get_queryset()\n queryset = IndicatorPeriodData.get_user_viewable_updates(\n queryset, self.request.user\n )\n self._c_queryset = queryset\n\n return queryset\n\n def perform_create(self, serializer):\n data = {key: value for key, value in serializer.validated_data.items() if key not in ['period', 'files', 'photos']}\n if len(serializer._disaggregations_data) > 0:\n data['disaggregations'] = [\n {key: value for key, value in dsg.items() if key in ['id', 'dimension_value', 'value', 'numerator', 'denominator']}\n for dsg in serializer._disaggregations_data\n ]\n user = self.request.user\n serializer.save(user=user)\n instance = serializer.instance\n log_data = {'audit_trail': True, 'data': data}\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=instance.id,\n object_repr=str(instance),\n action_flag=ADDITION,\n change_message=json.dumps(log_data, cls=JSONEncoder)\n )\n\n def perform_update(self, serializer):\n instance = serializer.instance\n data = {\n key: value\n for key, value in serializer.validated_data.items()\n if key not in ['period', 'files', 'photos'] and (key == 'comments' or getattr(instance, key) != value)\n }\n if len(serializer._disaggregations_data) > 0:\n indicator = instance.period.indicator\n is_percentage = indicator.type == QUANTITATIVE and indicator.measure == PERCENTAGE_MEASURE\n dsg_attrs = ['id', 'dimension_value', 'numerator', 'denominator'] if is_percentage else ['id', 'dimension_value', 'value']\n data['disaggregations'] = [\n {key: value for key, value in dsg.items() if key in dsg_attrs}\n for dsg in serializer._disaggregations_data\n ]\n user = self.request.user\n status = data.get('status', None)\n if status != 'R' and status != 'A':\n serializer.save(user=user)\n log_data = {'audit_trail': True, 'data': data}\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=instance.id,\n object_repr=str(instance),\n action_flag=CHANGE,\n change_message=json.dumps(log_data, cls=JSONEncoder)\n )\n\n def perform_destroy(self, instance):\n object_id = instance.id\n object_repr = str(instance)\n super().perform_destroy(instance)\n LogEntry.objects.log_action(\n user_id=self.request.user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=object_id,\n object_repr=object_repr,\n action_flag=DELETION,\n change_message=json.dumps({'audit_trail': True})\n )\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n authentication_classes = (SessionAuthentication, TastyTokenAuthentication, JWTAuthentication)\n\n # TODO: Is there more optimization possible?\n queryset = IndicatorPeriodDataComment.objects.select_related(\n 'user'\n ).prefetch_related(\n 'user__employers', 'user__employers__organisation'\n )\n serializer_class = IndicatorPeriodDataCommentSerializer\n project_relation = 'data__period__indicator__result__project__'\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\n@api_view(['POST', 'DELETE'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication, JWTAuthentication])\ndef period_update_files(request, update_pk, file_pk=None):\n update = get_object_or_404(IndicatorPeriodData, pk=update_pk)\n user = request.user\n if not user.has_perm('rsr.change_indicatorperioddata', update):\n return Response({'error': 'User has no permission to add/remove files'}, status=status.HTTP_403_FORBIDDEN)\n\n if request.method == 'POST' and not file_pk:\n serializer = IndicatorPeriodDataFrameworkSerializer(instance=update, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n files = [f\"Uploaded file \\\"{file.name}\\\"\" for file in serializer.validated_data.get('files', [])]\n serializer.save(user=user)\n log_data = {'audit_trail': True, 'data': {'files': files}}\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=update.id,\n object_repr=str(update),\n action_flag=CHANGE,\n change_message=json.dumps(log_data)\n )\n return Response(serializer.data['file_set'])\n\n if request.method == 'DELETE' and file_pk:\n file = update.indicatorperioddatafile_set.get(pk=file_pk)\n filename = os.path.basename(file.file.name)\n file.delete()\n update.user = user\n update.save(update_fields=['user'])\n log_data = {'audit_trail': True, 'data': {'files': [f\"Removed file \\\"{filename}\\\"\"]}}\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=update.id,\n object_repr=str(update),\n action_flag=CHANGE,\n change_message=json.dumps(log_data)\n )\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n@api_view(['POST', 'DELETE'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication, JWTAuthentication])\ndef period_update_photos(request, update_pk, photo_pk=None):\n update = get_object_or_404(IndicatorPeriodData, pk=update_pk)\n user = request.user\n if user != update.user:\n return Response({'error': 'User has no permission to add/remove photos'}, status=status.HTTP_403_FORBIDDEN)\n\n if request.method == 'POST' and not photo_pk:\n serializer = IndicatorPeriodDataFrameworkSerializer(instance=update, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save(user=user)\n return Response(serializer.data['photo_set'])\n\n if request.method == 'DELETE' and photo_pk:\n photo = update.indicatorperioddataphoto_set.get(pk=photo_pk)\n photo.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n@api_view(['POST', 'DELETE'])\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n # Permissions\n user = getattr(request, 'user', None)\n if not user:\n return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n # TODO: Check if user is allowed to upload a file\n # if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n # return Response({'error': 'User has no permission to place an update'},\n # status=status.HTTP_403_FORBIDDEN)\n\n update = IndicatorPeriodData.objects.get(pk=pk)\n if request.method == 'DELETE':\n try:\n if request.data['type'] == 'photo':\n update.photo = ''\n update.save(update_fields=['photo'])\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n elif request.data['type'] == 'file':\n update.file = ''\n update.save(update_fields=['file'])\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n else: # POST\n upload_file = request.data['file']\n try:\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n # Add photo member to be able to distinguish from file URL in new results version\n # while keeping the old API\n return Response({'file': update.photo.url, 'photo': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['POST'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication])\ndef set_updates_status(request, project_pk):\n \"\"\"Bulk update IndicatorPeriodData.status attributes of a project.\n \"\"\"\n update_ids = request.data.get('updates', [])\n status = request.data.get('status', None)\n if len(update_ids) < 1 or status is None:\n return HttpResponseBadRequest()\n user = request.user\n project = get_object_or_404(Project, pk=project_pk)\n if not user.has_perm('rsr.change_project', project):\n return HttpResponseForbidden()\n IndicatorPeriodData.objects\\\n .filter(id__in=update_ids, period__indicator__result__project=project)\\\n .update(status=status)\n log_data = {'audit_trail': True, 'data': {'status': status}}\n for update_id in update_ids:\n LogEntry.objects.log_action(\n user_id=user.id,\n content_type_id=ContentType.objects.get_for_model(IndicatorPeriodData).id,\n object_id=update_id,\n object_repr='IndicatorPeriodData',\n action_flag=CHANGE,\n change_message=json.dumps(log_data)\n )\n return Response({'success': True})\n", "path": "akvo/rest/views/indicator_period_data.py"}]} | 3,679 | 156 |
gh_patches_debug_1319 | rasdani/github-patches | git_diff | zenml-io__zenml-2271 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update `sklearn` Integration to Support Versions >1.3.0 and Resolve MLflow Autologging Issues
## Open Source Contributors Welcomed!
Please comment below if you would like to work on this issue!
### Contact Details [Optional]
[email protected]
### What happened?
The current ZenML Sklearn integration is restricted to versions of Sklearn <1.3.0, as defined in `src/zenml/integrations/sklearn/__init__.py`. However, the release of Sklearn 1.3.0 necessitates an update to this constraint. Additionally, this Sklearn version upgrade appears to cause issues with MLflow autologging, likely due to compatibility conflicts.
### Task Description
Update the Sklearn integration in ZenML to support Sklearn versions >1.3.0. Additionally, identify and resolve any issues arising in MLflow autologging due to this version update.
### Expected Outcome
- The Sklearn integration in ZenML should allow for the use of Sklearn versions >1.3.0.
- Any compatibility issues, especially with MLflow autologging, should be identified and resolved.
- Ensure that all tests, including CI pipelines, pass with the updated Sklearn version.
### Steps to Implement
- Modify the Sklearn version constraint in src/zenml/integrations/sklearn/__init__.py to allow for versions >1.3.0.
- Investigate and identify the root cause of the issues with MLflow autologging when using Sklearn 1.3.0.
- Implement necessary fixes or updates to ensure compatibility with the new Sklearn version.
- Thoroughly test the changes, especially focusing on MLflow autologging functionality.
- Update documentation and examples as necessary to reflect the support for the new Sklearn version.
### Additional Context
This update is crucial for keeping ZenML compatible with the latest machine learning tools and libraries, ensuring that users can leverage the newest features and improvements in Sklearn.
### Code of Conduct
- [ ] I agree to follow this project's Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/zenml/integrations/sklearn/__init__.py`
Content:
```
1 # Copyright (c) ZenML GmbH 2021. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at:
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
12 # or implied. See the License for the specific language governing
13 # permissions and limitations under the License.
14 """Initialization of the sklearn integration."""
15
16 from zenml.integrations.constants import SKLEARN
17 from zenml.integrations.integration import Integration
18
19
20 class SklearnIntegration(Integration):
21 """Definition of sklearn integration for ZenML."""
22
23 NAME = SKLEARN
24 REQUIREMENTS = ["scikit-learn<1.3"]
25
26 @classmethod
27 def activate(cls) -> None:
28 """Activates the integration."""
29 from zenml.integrations.sklearn import materializers # noqa
30
31
32 SklearnIntegration.check_installation()
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/zenml/integrations/sklearn/__init__.py b/src/zenml/integrations/sklearn/__init__.py
--- a/src/zenml/integrations/sklearn/__init__.py
+++ b/src/zenml/integrations/sklearn/__init__.py
@@ -21,7 +21,7 @@
"""Definition of sklearn integration for ZenML."""
NAME = SKLEARN
- REQUIREMENTS = ["scikit-learn<1.3"]
+ REQUIREMENTS = ["scikit-learn>1.3"]
@classmethod
def activate(cls) -> None:
| {"golden_diff": "diff --git a/src/zenml/integrations/sklearn/__init__.py b/src/zenml/integrations/sklearn/__init__.py\n--- a/src/zenml/integrations/sklearn/__init__.py\n+++ b/src/zenml/integrations/sklearn/__init__.py\n@@ -21,7 +21,7 @@\n \"\"\"Definition of sklearn integration for ZenML.\"\"\"\n \n NAME = SKLEARN\n- REQUIREMENTS = [\"scikit-learn<1.3\"]\n+ REQUIREMENTS = [\"scikit-learn>1.3\"]\n \n @classmethod\n def activate(cls) -> None:\n", "issue": "Update `sklearn` Integration to Support Versions >1.3.0 and Resolve MLflow Autologging Issues\n## Open Source Contributors Welcomed!\r\nPlease comment below if you would like to work on this issue!\r\n\r\n### Contact Details [Optional]\r\n\r\[email protected]\r\n\r\n### What happened?\r\nThe current ZenML Sklearn integration is restricted to versions of Sklearn <1.3.0, as defined in `src/zenml/integrations/sklearn/__init__.py`. However, the release of Sklearn 1.3.0 necessitates an update to this constraint. Additionally, this Sklearn version upgrade appears to cause issues with MLflow autologging, likely due to compatibility conflicts.\r\n\r\n### Task Description\r\nUpdate the Sklearn integration in ZenML to support Sklearn versions >1.3.0. Additionally, identify and resolve any issues arising in MLflow autologging due to this version update.\r\n\r\n### Expected Outcome\r\n- The Sklearn integration in ZenML should allow for the use of Sklearn versions >1.3.0.\r\n- Any compatibility issues, especially with MLflow autologging, should be identified and resolved.\r\n- Ensure that all tests, including CI pipelines, pass with the updated Sklearn version.\r\n\r\n### Steps to Implement\r\n\r\n- Modify the Sklearn version constraint in src/zenml/integrations/sklearn/__init__.py to allow for versions >1.3.0.\r\n- Investigate and identify the root cause of the issues with MLflow autologging when using Sklearn 1.3.0.\r\n- Implement necessary fixes or updates to ensure compatibility with the new Sklearn version.\r\n- Thoroughly test the changes, especially focusing on MLflow autologging functionality.\r\n- Update documentation and examples as necessary to reflect the support for the new Sklearn version.\r\n\r\n### Additional Context\r\nThis update is crucial for keeping ZenML compatible with the latest machine learning tools and libraries, ensuring that users can leverage the newest features and improvements in Sklearn.\r\n\r\n### Code of Conduct\r\n- [ ] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Initialization of the sklearn integration.\"\"\"\n\nfrom zenml.integrations.constants import SKLEARN\nfrom zenml.integrations.integration import Integration\n\n\nclass SklearnIntegration(Integration):\n \"\"\"Definition of sklearn integration for ZenML.\"\"\"\n\n NAME = SKLEARN\n REQUIREMENTS = [\"scikit-learn<1.3\"]\n\n @classmethod\n def activate(cls) -> None:\n \"\"\"Activates the integration.\"\"\"\n from zenml.integrations.sklearn import materializers # noqa\n\n\nSklearnIntegration.check_installation()\n", "path": "src/zenml/integrations/sklearn/__init__.py"}], "after_files": [{"content": "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Initialization of the sklearn integration.\"\"\"\n\nfrom zenml.integrations.constants import SKLEARN\nfrom zenml.integrations.integration import Integration\n\n\nclass SklearnIntegration(Integration):\n \"\"\"Definition of sklearn integration for ZenML.\"\"\"\n\n NAME = SKLEARN\n REQUIREMENTS = [\"scikit-learn>1.3\"]\n\n @classmethod\n def activate(cls) -> None:\n \"\"\"Activates the integration.\"\"\"\n from zenml.integrations.sklearn import materializers # noqa\n\n\nSklearnIntegration.check_installation()\n", "path": "src/zenml/integrations/sklearn/__init__.py"}]} | 1,017 | 141 |
gh_patches_debug_1995 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-1641 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RedhatRelease parser failed to parse minor release version in some scenarios
In few cases where redhat_release content is something similar to below, RedhatRelease parser fails to get the minor version extracted from it
Run:
```
>>> from insights.parsers.redhat_release import RedhatRelease
>>> from insights.tests import context_wrap
>>> RedhatRelease(context_wrap("Red Hat Enterprise Linux release 7.5-0.14")).major
7
>>> RedhatRelease(context_wrap("Red Hat Enterprise Linux release 7.5-0.14")).minor
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/insights/insights-core/insights/parsers/redhat_release.py", line 59, in minor
return int(s[1])
ValueError: invalid literal for int() with base 10: '5-0'
>>> RedhatRelease(context_wrap("Red Hat Enterprise Linux release 7.5-0.14")).version
'7.5-0.14'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/parsers/redhat_release.py`
Content:
```
1 """
2 redhat-release - File ``/etc/redhat-release``
3 =============================================
4
5 This module provides plugins access to file ``/etc/redhat-release``
6
7 Typical content of file ``/etc/redhat-release`` is::
8
9 Red Hat Enterprise Linux Server release 7.2 (Maipo)
10
11 This module parses the file content and stores data in the dict ``self.parsed``.
12 The version info can also be get via ``obj.major`` and ``obj.minor``.
13 Property ``is_rhel`` and ``is_hypervisor`` specifies the host type.
14
15 Examples:
16 >>> rh_rls_content = '''
17 ... Red Hat Enterprise Linux Server release 7.2 (Maipo)
18 ... '''.strip()
19 >>> from insights.tests import context_wrap
20 >>> shared = {RedhatRelease: RedhatRelease(context_wrap(rh_rls_content))}
21 >>> release = shared[RedhatRelease]
22 >>> assert release.raw == rh_rls_content
23 >>> assert release.major == 7
24 >>> assert release.minor == 2
25 >>> assert release.version == "7.2"
26 >>> assert release.is_rhel
27 >>> assert release.product == "Red Hat Enterprise Linux Server"
28 """
29 from .. import Parser, parser
30 from ..specs import Specs
31
32
33 @parser(Specs.redhat_release)
34 class RedhatRelease(Parser):
35 """Parses the content of file ``/etc/redhat-release``."""
36
37 def parse_content(self, content):
38 self.raw = content[0]
39 product, _, version_name = [v.strip() for v in content[0].partition("release")]
40 version_name_split = [v.strip() for v in version_name.split(None, 1)]
41 code_name = (version_name_split[1].strip("()")
42 if len(version_name_split) > 1 else None)
43 self.parsed = {
44 "product": product,
45 "version": version_name_split[0],
46 "code_name": code_name
47 }
48
49 @property
50 def major(self):
51 """int: the major version of this OS."""
52 return int(self.parsed["version"].split(".")[0])
53
54 @property
55 def minor(self):
56 """int: the minor version of this OS."""
57 s = self.parsed["version"].split(".")
58 if len(s) > 1:
59 return int(s[1])
60
61 @property
62 def version(self):
63 """string: version of this OS."""
64 return self.parsed["version"]
65
66 @property
67 def is_rhel(self):
68 """bool: True if this OS belong to RHEL, else False."""
69 return "Red Hat Enterprise Linux" in self.parsed["product"]
70
71 @property
72 def product(self):
73 """string: product of this OS."""
74 return self.parsed["product"]
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/parsers/redhat_release.py b/insights/parsers/redhat_release.py
--- a/insights/parsers/redhat_release.py
+++ b/insights/parsers/redhat_release.py
@@ -54,7 +54,7 @@
@property
def minor(self):
"""int: the minor version of this OS."""
- s = self.parsed["version"].split(".")
+ s = self.parsed["version"].split("-", 1)[0].split(".")
if len(s) > 1:
return int(s[1])
| {"golden_diff": "diff --git a/insights/parsers/redhat_release.py b/insights/parsers/redhat_release.py\n--- a/insights/parsers/redhat_release.py\n+++ b/insights/parsers/redhat_release.py\n@@ -54,7 +54,7 @@\n @property\n def minor(self):\n \"\"\"int: the minor version of this OS.\"\"\"\n- s = self.parsed[\"version\"].split(\".\")\n+ s = self.parsed[\"version\"].split(\"-\", 1)[0].split(\".\")\n if len(s) > 1:\n return int(s[1])\n", "issue": "RedhatRelease parser failed to parse minor release version in some scenarios\nIn few cases where redhat_release content is something similar to below, RedhatRelease parser fails to get the minor version extracted from it\r\n\r\nRun:\r\n```\r\n>>> from insights.parsers.redhat_release import RedhatRelease\r\n>>> from insights.tests import context_wrap\r\n>>> RedhatRelease(context_wrap(\"Red Hat Enterprise Linux release 7.5-0.14\")).major\r\n7\r\n>>> RedhatRelease(context_wrap(\"Red Hat Enterprise Linux release 7.5-0.14\")).minor\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/opt/insights/insights-core/insights/parsers/redhat_release.py\", line 59, in minor\r\n return int(s[1])\r\nValueError: invalid literal for int() with base 10: '5-0'\r\n>>> RedhatRelease(context_wrap(\"Red Hat Enterprise Linux release 7.5-0.14\")).version\r\n'7.5-0.14'\r\n```\n", "before_files": [{"content": "\"\"\"\nredhat-release - File ``/etc/redhat-release``\n=============================================\n\nThis module provides plugins access to file ``/etc/redhat-release``\n\nTypical content of file ``/etc/redhat-release`` is::\n\n Red Hat Enterprise Linux Server release 7.2 (Maipo)\n\nThis module parses the file content and stores data in the dict ``self.parsed``.\nThe version info can also be get via ``obj.major`` and ``obj.minor``.\nProperty ``is_rhel`` and ``is_hypervisor`` specifies the host type.\n\nExamples:\n >>> rh_rls_content = '''\n ... Red Hat Enterprise Linux Server release 7.2 (Maipo)\n ... '''.strip()\n >>> from insights.tests import context_wrap\n >>> shared = {RedhatRelease: RedhatRelease(context_wrap(rh_rls_content))}\n >>> release = shared[RedhatRelease]\n >>> assert release.raw == rh_rls_content\n >>> assert release.major == 7\n >>> assert release.minor == 2\n >>> assert release.version == \"7.2\"\n >>> assert release.is_rhel\n >>> assert release.product == \"Red Hat Enterprise Linux Server\"\n\"\"\"\nfrom .. import Parser, parser\nfrom ..specs import Specs\n\n\n@parser(Specs.redhat_release)\nclass RedhatRelease(Parser):\n \"\"\"Parses the content of file ``/etc/redhat-release``.\"\"\"\n\n def parse_content(self, content):\n self.raw = content[0]\n product, _, version_name = [v.strip() for v in content[0].partition(\"release\")]\n version_name_split = [v.strip() for v in version_name.split(None, 1)]\n code_name = (version_name_split[1].strip(\"()\")\n if len(version_name_split) > 1 else None)\n self.parsed = {\n \"product\": product,\n \"version\": version_name_split[0],\n \"code_name\": code_name\n }\n\n @property\n def major(self):\n \"\"\"int: the major version of this OS.\"\"\"\n return int(self.parsed[\"version\"].split(\".\")[0])\n\n @property\n def minor(self):\n \"\"\"int: the minor version of this OS.\"\"\"\n s = self.parsed[\"version\"].split(\".\")\n if len(s) > 1:\n return int(s[1])\n\n @property\n def version(self):\n \"\"\"string: version of this OS.\"\"\"\n return self.parsed[\"version\"]\n\n @property\n def is_rhel(self):\n \"\"\"bool: True if this OS belong to RHEL, else False.\"\"\"\n return \"Red Hat Enterprise Linux\" in self.parsed[\"product\"]\n\n @property\n def product(self):\n \"\"\"string: product of this OS.\"\"\"\n return self.parsed[\"product\"]\n", "path": "insights/parsers/redhat_release.py"}], "after_files": [{"content": "\"\"\"\nredhat-release - File ``/etc/redhat-release``\n=============================================\n\nThis module provides plugins access to file ``/etc/redhat-release``\n\nTypical content of file ``/etc/redhat-release`` is::\n\n Red Hat Enterprise Linux Server release 7.2 (Maipo)\n\nThis module parses the file content and stores data in the dict ``self.parsed``.\nThe version info can also be get via ``obj.major`` and ``obj.minor``.\nProperty ``is_rhel`` and ``is_hypervisor`` specifies the host type.\n\nExamples:\n >>> rh_rls_content = '''\n ... Red Hat Enterprise Linux Server release 7.2 (Maipo)\n ... '''.strip()\n >>> from insights.tests import context_wrap\n >>> shared = {RedhatRelease: RedhatRelease(context_wrap(rh_rls_content))}\n >>> release = shared[RedhatRelease]\n >>> assert release.raw == rh_rls_content\n >>> assert release.major == 7\n >>> assert release.minor == 2\n >>> assert release.version == \"7.2\"\n >>> assert release.is_rhel\n >>> assert release.product == \"Red Hat Enterprise Linux Server\"\n\"\"\"\nfrom .. import Parser, parser\nfrom ..specs import Specs\n\n\n@parser(Specs.redhat_release)\nclass RedhatRelease(Parser):\n \"\"\"Parses the content of file ``/etc/redhat-release``.\"\"\"\n\n def parse_content(self, content):\n self.raw = content[0]\n product, _, version_name = [v.strip() for v in content[0].partition(\"release\")]\n version_name_split = [v.strip() for v in version_name.split(None, 1)]\n code_name = (version_name_split[1].strip(\"()\")\n if len(version_name_split) > 1 else None)\n self.parsed = {\n \"product\": product,\n \"version\": version_name_split[0],\n \"code_name\": code_name\n }\n\n @property\n def major(self):\n \"\"\"int: the major version of this OS.\"\"\"\n return int(self.parsed[\"version\"].split(\".\")[0])\n\n @property\n def minor(self):\n \"\"\"int: the minor version of this OS.\"\"\"\n s = self.parsed[\"version\"].split(\"-\", 1)[0].split(\".\")\n if len(s) > 1:\n return int(s[1])\n\n @property\n def version(self):\n \"\"\"string: version of this OS.\"\"\"\n return self.parsed[\"version\"]\n\n @property\n def is_rhel(self):\n \"\"\"bool: True if this OS belong to RHEL, else False.\"\"\"\n return \"Red Hat Enterprise Linux\" in self.parsed[\"product\"]\n\n @property\n def product(self):\n \"\"\"string: product of this OS.\"\"\"\n return self.parsed[\"product\"]\n", "path": "insights/parsers/redhat_release.py"}]} | 1,228 | 129 |
gh_patches_debug_10667 | rasdani/github-patches | git_diff | saleor__saleor-730 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
no_query_found method is missing.
Hello,
the user gets an error if he submits the search form without a query.
There is not "no_query_found" method inside SearchForm.
https://github.com/mirumee/saleor/blob/35956e20a85b66abc95e08491f38fa69d74d5f12/saleor/search/views.py#L28
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/search/views.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django.core.paginator import Paginator, InvalidPage
4 from django.conf import settings
5 from django.http import Http404
6 from django.shortcuts import render
7 from .forms import SearchForm
8 from ..product.utils import products_with_details
9
10
11 def paginate_results(results, get_data, paginate_by=25):
12 paginator = Paginator(results, paginate_by)
13 page_number = get_data.get('page', 1)
14 try:
15 page = paginator.page(page_number)
16 except InvalidPage:
17 raise Http404('No such page!')
18 return page
19
20
21 def search(request):
22 form = SearchForm(data=request.GET or None)
23 if form.is_valid():
24 visible_products = products_with_details(request.user)
25 results = form.search(model_or_queryset=visible_products)
26 page = paginate_results(results, request.GET, settings.PAGINATE_BY)
27 else:
28 page = form.no_query_found()
29 query = form.cleaned_data['q']
30 ctx = {
31 'query': query,
32 'results': page,
33 'query_string': '?q=%s' % query}
34 return render(request, 'search/results.html', ctx)
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/search/views.py b/saleor/search/views.py
--- a/saleor/search/views.py
+++ b/saleor/search/views.py
@@ -23,10 +23,11 @@
if form.is_valid():
visible_products = products_with_details(request.user)
results = form.search(model_or_queryset=visible_products)
- page = paginate_results(results, request.GET, settings.PAGINATE_BY)
+ query = form.cleaned_data.get('q', '')
else:
- page = form.no_query_found()
- query = form.cleaned_data['q']
+ results = []
+ query = ''
+ page = paginate_results(results, request.GET, settings.PAGINATE_BY)
ctx = {
'query': query,
'results': page,
| {"golden_diff": "diff --git a/saleor/search/views.py b/saleor/search/views.py\n--- a/saleor/search/views.py\n+++ b/saleor/search/views.py\n@@ -23,10 +23,11 @@\n if form.is_valid():\n visible_products = products_with_details(request.user)\n results = form.search(model_or_queryset=visible_products)\n- page = paginate_results(results, request.GET, settings.PAGINATE_BY)\n+ query = form.cleaned_data.get('q', '')\n else:\n- page = form.no_query_found()\n- query = form.cleaned_data['q']\n+ results = []\n+ query = ''\n+ page = paginate_results(results, request.GET, settings.PAGINATE_BY)\n ctx = {\n 'query': query,\n 'results': page,\n", "issue": "no_query_found method is missing.\nHello,\r\nthe user gets an error if he submits the search form without a query.\r\nThere is not \"no_query_found\" method inside SearchForm.\r\n\r\nhttps://github.com/mirumee/saleor/blob/35956e20a85b66abc95e08491f38fa69d74d5f12/saleor/search/views.py#L28\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.core.paginator import Paginator, InvalidPage\nfrom django.conf import settings\nfrom django.http import Http404\nfrom django.shortcuts import render\nfrom .forms import SearchForm\nfrom ..product.utils import products_with_details\n\n\ndef paginate_results(results, get_data, paginate_by=25):\n paginator = Paginator(results, paginate_by)\n page_number = get_data.get('page', 1)\n try:\n page = paginator.page(page_number)\n except InvalidPage:\n raise Http404('No such page!')\n return page\n\n\ndef search(request):\n form = SearchForm(data=request.GET or None)\n if form.is_valid():\n visible_products = products_with_details(request.user)\n results = form.search(model_or_queryset=visible_products)\n page = paginate_results(results, request.GET, settings.PAGINATE_BY)\n else:\n page = form.no_query_found()\n query = form.cleaned_data['q']\n ctx = {\n 'query': query,\n 'results': page,\n 'query_string': '?q=%s' % query}\n return render(request, 'search/results.html', ctx)\n", "path": "saleor/search/views.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.core.paginator import Paginator, InvalidPage\nfrom django.conf import settings\nfrom django.http import Http404\nfrom django.shortcuts import render\nfrom .forms import SearchForm\nfrom ..product.utils import products_with_details\n\n\ndef paginate_results(results, get_data, paginate_by=25):\n paginator = Paginator(results, paginate_by)\n page_number = get_data.get('page', 1)\n try:\n page = paginator.page(page_number)\n except InvalidPage:\n raise Http404('No such page!')\n return page\n\n\ndef search(request):\n form = SearchForm(data=request.GET or None)\n if form.is_valid():\n visible_products = products_with_details(request.user)\n results = form.search(model_or_queryset=visible_products)\n query = form.cleaned_data.get('q', '')\n else:\n results = []\n query = ''\n page = paginate_results(results, request.GET, settings.PAGINATE_BY)\n ctx = {\n 'query': query,\n 'results': page,\n 'query_string': '?q=%s' % query}\n return render(request, 'search/results.html', ctx)\n", "path": "saleor/search/views.py"}]} | 668 | 174 |
gh_patches_debug_11214 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2737 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py`
Content:
```
1 from dataclasses import dataclass
2 from torch.fx.node import Node
3 from torch.fx.graph import Graph
4 from torch.fx.graph_module import GraphModule
5 from collections import OrderedDict as ODict
6 from typing import List, OrderedDict, Union, Any
7 from colossalai.fx.passes.utils import get_node_module
8
9 __all__ = ['LiveVariable', 'LiveVariableVector', 'LiveStage', 'GraphAnalyser']
10
11
12 @dataclass
13 class LiveVariable:
14 """
15 LiveVariable is a data structure to store the meta information of a variable for liveness analysis.
16 """
17 name: str
18 node: Node
19 is_inplace: bool
20
21
22 class LiveVariableVector(list):
23 """
24 LiveVariableVector is a data structure to store the list of LiveVariable objects.
25 """
26
27 def exists(self, name) -> bool:
28 """
29 Check if a variable has already existed in the current list by name.
30 """
31 for var in self:
32 if name == var.name:
33 return True
34 return False
35
36 def get(self, name) -> LiveVariable:
37 for var in self:
38 if name == var.name:
39 return var
40 raise KeyError(f"Variable {name} is not found")
41
42 def copy(self) -> "LiveVariableVector":
43 """
44 Create a copy of this vector
45 """
46 vector = LiveVariableVector()
47 for var in self:
48 vector.append(var)
49 return vector
50
51
52 @dataclass
53 class LiveStage:
54 """
55 LiveStage is a data structure to record the living variables at this current node.
56 """
57 name: str
58 node: Node
59 all_live_vars: LiveVariableVector
60 unique_live_vars: LiveVariableVector
61
62
63 class GraphAnalyser:
64
65 def __init__(self, gm: GraphModule):
66 self._gm = gm
67 self._graph = gm.graph
68
69 @property
70 def gm(self) -> GraphModule:
71 """
72 Return the GraphModule object associated with this analyser.
73 """
74 return self._gm
75
76 @property
77 def graph(self) -> Graph:
78 """
79 Return the Graph object associated with this analyser.
80 """
81 return self._graph
82
83 def liveness_analysis(self) -> List[LiveStage]:
84 """
85 Analyse the graph to obtain the variable liveness information. This function returns
86 an ordered dictionary where the key is the compute stage ID and the value is a LivenessStage object.
87 """
88 compute_nodes = self.graph.nodes
89 liveness_list = []
90
91 # checked: record all variables created since the first stage
92 # all: record the live variables only exist until the current stage.
93 # this can be different from the `checked list`` as some varialbes may be destroyed prior to this stage.
94 # unique: record the unique live variables only exist until the current stage.
95 # this is different from `all list` as some variables are duplicated.
96 checked_variables = LiveVariableVector()
97 all_live_variables = LiveVariableVector()
98 unique_live_vars = LiveVariableVector()
99
100 for idx, node in enumerate(compute_nodes):
101 #############################
102 # find new living variables #
103 #############################
104 # detect whether the current op is an in-place op
105 # if it is an in-place op, we would deem it as a duplciate var
106 is_inplace = False
107 if node.op == 'call_function':
108 # check if this is an inplace op such as torch.nn.functional.relu(x, inplace=True)
109 if node.kwargs.get('inplace', False):
110 is_inplace = True
111 elif node.op == 'call_module':
112 # to check if this is an inplace op such as torch.nn.Relu(inplace=True)
113 module = get_node_module(node)
114 if getattr(module, 'inplace', False):
115 is_inplace = True
116
117 # add the output var
118 meta = getattr(node, '_meta_data', None)
119 live_var = LiveVariable(name=node.name, node=node, is_inplace=is_inplace)
120 if not is_inplace:
121 unique_live_vars.append(live_var)
122 checked_variables.append(live_var)
123 all_live_variables.append(live_var)
124
125 # check if any input is not checked yet
126 for arg in node.args:
127 if not isinstance(arg, Node):
128 continue
129 arg_name = arg.name
130 if not checked_variables.exists(arg_name):
131 live_var_from_arg = LiveVariable(name=arg_name, node=node, is_inplace=False)
132 all_live_variables.append(live_var_from_arg)
133 checked_variables.append(live_var_from_arg)
134 unique_live_vars.append(live_var_from_arg)
135
136 # TODO: add the logic to remove live variables
137 # this should be completed if we are able to trace the backward compute graph
138
139 # add this stage to liveness dict
140 stage = LiveStage(name=node.name,
141 node=node,
142 all_live_vars=all_live_variables.copy(),
143 unique_live_vars=unique_live_vars.copy())
144 # if a LiveStage is covered by another LiveStage, we just keep the larger one.
145 replace = False
146 for index, prev_stage in enumerate(liveness_list):
147 all_covered = True
148 for ele in prev_stage.unique_live_vars:
149 if ele not in stage.unique_live_vars:
150 all_covered = False
151 break
152 if all_covered:
153 replace = True
154 break
155 if replace:
156 liveness_list[index] = stage
157 else:
158 liveness_list.append(stage)
159
160 return liveness_list
161
162 def get_alias_set(self):
163 pass
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py b/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py
--- a/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py
+++ b/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py
@@ -1,9 +1,11 @@
+from collections import OrderedDict as ODict
from dataclasses import dataclass
-from torch.fx.node import Node
+from typing import Any, List, OrderedDict, Union
+
from torch.fx.graph import Graph
from torch.fx.graph_module import GraphModule
-from collections import OrderedDict as ODict
-from typing import List, OrderedDict, Union, Any
+from torch.fx.node import Node
+
from colossalai.fx.passes.utils import get_node_module
__all__ = ['LiveVariable', 'LiveVariableVector', 'LiveStage', 'GraphAnalyser']
| {"golden_diff": "diff --git a/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py b/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py\n--- a/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py\n+++ b/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py\n@@ -1,9 +1,11 @@\n+from collections import OrderedDict as ODict\n from dataclasses import dataclass\n-from torch.fx.node import Node\n+from typing import Any, List, OrderedDict, Union\n+\n from torch.fx.graph import Graph\n from torch.fx.graph_module import GraphModule\n-from collections import OrderedDict as ODict\n-from typing import List, OrderedDict, Union, Any\n+from torch.fx.node import Node\n+\n from colossalai.fx.passes.utils import get_node_module\n \n __all__ = ['LiveVariable', 'LiveVariableVector', 'LiveStage', 'GraphAnalyser']\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom torch.fx.node import Node\nfrom torch.fx.graph import Graph\nfrom torch.fx.graph_module import GraphModule\nfrom collections import OrderedDict as ODict\nfrom typing import List, OrderedDict, Union, Any\nfrom colossalai.fx.passes.utils import get_node_module\n\n__all__ = ['LiveVariable', 'LiveVariableVector', 'LiveStage', 'GraphAnalyser']\n\n\n@dataclass\nclass LiveVariable:\n \"\"\"\n LiveVariable is a data structure to store the meta information of a variable for liveness analysis.\n \"\"\"\n name: str\n node: Node\n is_inplace: bool\n\n\nclass LiveVariableVector(list):\n \"\"\"\n LiveVariableVector is a data structure to store the list of LiveVariable objects.\n \"\"\"\n\n def exists(self, name) -> bool:\n \"\"\"\n Check if a variable has already existed in the current list by name.\n \"\"\"\n for var in self:\n if name == var.name:\n return True\n return False\n\n def get(self, name) -> LiveVariable:\n for var in self:\n if name == var.name:\n return var\n raise KeyError(f\"Variable {name} is not found\")\n\n def copy(self) -> \"LiveVariableVector\":\n \"\"\"\n Create a copy of this vector\n \"\"\"\n vector = LiveVariableVector()\n for var in self:\n vector.append(var)\n return vector\n\n\n@dataclass\nclass LiveStage:\n \"\"\"\n LiveStage is a data structure to record the living variables at this current node.\n \"\"\"\n name: str\n node: Node\n all_live_vars: LiveVariableVector\n unique_live_vars: LiveVariableVector\n\n\nclass GraphAnalyser:\n\n def __init__(self, gm: GraphModule):\n self._gm = gm\n self._graph = gm.graph\n\n @property\n def gm(self) -> GraphModule:\n \"\"\"\n Return the GraphModule object associated with this analyser.\n \"\"\"\n return self._gm\n\n @property\n def graph(self) -> Graph:\n \"\"\"\n Return the Graph object associated with this analyser.\n \"\"\"\n return self._graph\n\n def liveness_analysis(self) -> List[LiveStage]:\n \"\"\"\n Analyse the graph to obtain the variable liveness information. This function returns\n an ordered dictionary where the key is the compute stage ID and the value is a LivenessStage object.\n \"\"\"\n compute_nodes = self.graph.nodes\n liveness_list = []\n\n # checked: record all variables created since the first stage\n # all: record the live variables only exist until the current stage.\n # this can be different from the `checked list`` as some varialbes may be destroyed prior to this stage.\n # unique: record the unique live variables only exist until the current stage.\n # this is different from `all list` as some variables are duplicated.\n checked_variables = LiveVariableVector()\n all_live_variables = LiveVariableVector()\n unique_live_vars = LiveVariableVector()\n\n for idx, node in enumerate(compute_nodes):\n #############################\n # find new living variables #\n #############################\n # detect whether the current op is an in-place op\n # if it is an in-place op, we would deem it as a duplciate var\n is_inplace = False\n if node.op == 'call_function':\n # check if this is an inplace op such as torch.nn.functional.relu(x, inplace=True)\n if node.kwargs.get('inplace', False):\n is_inplace = True\n elif node.op == 'call_module':\n # to check if this is an inplace op such as torch.nn.Relu(inplace=True)\n module = get_node_module(node)\n if getattr(module, 'inplace', False):\n is_inplace = True\n\n # add the output var\n meta = getattr(node, '_meta_data', None)\n live_var = LiveVariable(name=node.name, node=node, is_inplace=is_inplace)\n if not is_inplace:\n unique_live_vars.append(live_var)\n checked_variables.append(live_var)\n all_live_variables.append(live_var)\n\n # check if any input is not checked yet\n for arg in node.args:\n if not isinstance(arg, Node):\n continue\n arg_name = arg.name\n if not checked_variables.exists(arg_name):\n live_var_from_arg = LiveVariable(name=arg_name, node=node, is_inplace=False)\n all_live_variables.append(live_var_from_arg)\n checked_variables.append(live_var_from_arg)\n unique_live_vars.append(live_var_from_arg)\n\n # TODO: add the logic to remove live variables\n # this should be completed if we are able to trace the backward compute graph\n\n # add this stage to liveness dict\n stage = LiveStage(name=node.name,\n node=node,\n all_live_vars=all_live_variables.copy(),\n unique_live_vars=unique_live_vars.copy())\n # if a LiveStage is covered by another LiveStage, we just keep the larger one.\n replace = False\n for index, prev_stage in enumerate(liveness_list):\n all_covered = True\n for ele in prev_stage.unique_live_vars:\n if ele not in stage.unique_live_vars:\n all_covered = False\n break\n if all_covered:\n replace = True\n break\n if replace:\n liveness_list[index] = stage\n else:\n liveness_list.append(stage)\n\n return liveness_list\n\n def get_alias_set(self):\n pass\n", "path": "colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py"}], "after_files": [{"content": "from collections import OrderedDict as ODict\nfrom dataclasses import dataclass\nfrom typing import Any, List, OrderedDict, Union\n\nfrom torch.fx.graph import Graph\nfrom torch.fx.graph_module import GraphModule\nfrom torch.fx.node import Node\n\nfrom colossalai.fx.passes.utils import get_node_module\n\n__all__ = ['LiveVariable', 'LiveVariableVector', 'LiveStage', 'GraphAnalyser']\n\n\n@dataclass\nclass LiveVariable:\n \"\"\"\n LiveVariable is a data structure to store the meta information of a variable for liveness analysis.\n \"\"\"\n name: str\n node: Node\n is_inplace: bool\n\n\nclass LiveVariableVector(list):\n \"\"\"\n LiveVariableVector is a data structure to store the list of LiveVariable objects.\n \"\"\"\n\n def exists(self, name) -> bool:\n \"\"\"\n Check if a variable has already existed in the current list by name.\n \"\"\"\n for var in self:\n if name == var.name:\n return True\n return False\n\n def get(self, name) -> LiveVariable:\n for var in self:\n if name == var.name:\n return var\n raise KeyError(f\"Variable {name} is not found\")\n\n def copy(self) -> \"LiveVariableVector\":\n \"\"\"\n Create a copy of this vector\n \"\"\"\n vector = LiveVariableVector()\n for var in self:\n vector.append(var)\n return vector\n\n\n@dataclass\nclass LiveStage:\n \"\"\"\n LiveStage is a data structure to record the living variables at this current node.\n \"\"\"\n name: str\n node: Node\n all_live_vars: LiveVariableVector\n unique_live_vars: LiveVariableVector\n\n\nclass GraphAnalyser:\n\n def __init__(self, gm: GraphModule):\n self._gm = gm\n self._graph = gm.graph\n\n @property\n def gm(self) -> GraphModule:\n \"\"\"\n Return the GraphModule object associated with this analyser.\n \"\"\"\n return self._gm\n\n @property\n def graph(self) -> Graph:\n \"\"\"\n Return the Graph object associated with this analyser.\n \"\"\"\n return self._graph\n\n def liveness_analysis(self) -> List[LiveStage]:\n \"\"\"\n Analyse the graph to obtain the variable liveness information. This function returns\n an ordered dictionary where the key is the compute stage ID and the value is a LivenessStage object.\n \"\"\"\n compute_nodes = self.graph.nodes\n liveness_list = []\n\n # checked: record all variables created since the first stage\n # all: record the live variables only exist until the current stage.\n # this can be different from the `checked list`` as some varialbes may be destroyed prior to this stage.\n # unique: record the unique live variables only exist until the current stage.\n # this is different from `all list` as some variables are duplicated.\n checked_variables = LiveVariableVector()\n all_live_variables = LiveVariableVector()\n unique_live_vars = LiveVariableVector()\n\n for idx, node in enumerate(compute_nodes):\n #############################\n # find new living variables #\n #############################\n # detect whether the current op is an in-place op\n # if it is an in-place op, we would deem it as a duplciate var\n is_inplace = False\n if node.op == 'call_function':\n # check if this is an inplace op such as torch.nn.functional.relu(x, inplace=True)\n if node.kwargs.get('inplace', False):\n is_inplace = True\n elif node.op == 'call_module':\n # to check if this is an inplace op such as torch.nn.Relu(inplace=True)\n module = get_node_module(node)\n if getattr(module, 'inplace', False):\n is_inplace = True\n\n # add the output var\n meta = getattr(node, '_meta_data', None)\n live_var = LiveVariable(name=node.name, node=node, is_inplace=is_inplace)\n if not is_inplace:\n unique_live_vars.append(live_var)\n checked_variables.append(live_var)\n all_live_variables.append(live_var)\n\n # check if any input is not checked yet\n for arg in node.args:\n if not isinstance(arg, Node):\n continue\n arg_name = arg.name\n if not checked_variables.exists(arg_name):\n live_var_from_arg = LiveVariable(name=arg_name, node=node, is_inplace=False)\n all_live_variables.append(live_var_from_arg)\n checked_variables.append(live_var_from_arg)\n unique_live_vars.append(live_var_from_arg)\n\n # TODO: add the logic to remove live variables\n # this should be completed if we are able to trace the backward compute graph\n\n # add this stage to liveness dict\n stage = LiveStage(name=node.name,\n node=node,\n all_live_vars=all_live_variables.copy(),\n unique_live_vars=unique_live_vars.copy())\n # if a LiveStage is covered by another LiveStage, we just keep the larger one.\n replace = False\n for index, prev_stage in enumerate(liveness_list):\n all_covered = True\n for ele in prev_stage.unique_live_vars:\n if ele not in stage.unique_live_vars:\n all_covered = False\n break\n if all_covered:\n replace = True\n break\n if replace:\n liveness_list[index] = stage\n else:\n liveness_list.append(stage)\n\n return liveness_list\n\n def get_alias_set(self):\n pass\n", "path": "colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py"}]} | 1,872 | 197 |
gh_patches_debug_36427 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1116 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Type resolver not called when extending from pydantic model
I have 2 types, Category and Articles, and I want the Category type to return its related Articles(I have tried with other simpler types too), but when defining a resolver in the Category type its not being called:
```python
def articles_helper(root):
return [] # <-whatever I return here, it always shows null
@strawberry.experimental.pydantic.type(
model=Category,
fields=[
'title',
'description',
'content',
'template',
'slug',
'date_published',
'date_updated',
])
class CategoryType(Category):
id: int
articles: Optional[List[ArticleType]] = strawberry.field(resolver=articles_helper) # <- resolver not called
```
Query type:
```python
@strawberry.type()
class Query:
category: Optional[CategoryType] = field(resolver=get_category) # this resolver is OK
```
Always returns `null`:
```
Query:
{
category(categoryId: 1) {
id
title
articles {
title
}
}
}
Response:
{
"data": {
"category": {
"id": 1,
"title": "test",
"articles": null <- always
}
}
}
```
**I works when I don't extend from a pydantic model**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/experimental/pydantic/object_type.py`
Content:
```
1 import builtins
2 import dataclasses
3 from functools import partial
4 from typing import Any, Dict, List, Optional, Type
5
6 from pydantic import BaseModel
7 from pydantic.fields import ModelField
8
9 from strawberry.arguments import UNSET
10 from strawberry.experimental.pydantic.conversion import (
11 convert_pydantic_model_to_strawberry_class,
12 )
13 from strawberry.experimental.pydantic.fields import get_basic_type
14 from strawberry.field import StrawberryField
15 from strawberry.object_type import _process_type
16 from strawberry.types.types import FederationTypeParams, TypeDefinition
17
18 from .exceptions import MissingFieldsListError, UnregisteredTypeException
19
20
21 def replace_pydantic_types(type_: Any):
22 if hasattr(type_, "__args__"):
23 new_type = type_.copy_with(
24 tuple(replace_pydantic_types(t) for t in type_.__args__)
25 )
26
27 if isinstance(new_type, TypeDefinition):
28 # TODO: Not sure if this is necessary. No coverage in tests
29 # TODO: Unnecessary with StrawberryObject
30
31 new_type = builtins.type(
32 new_type.name,
33 (),
34 {"_type_definition": new_type},
35 )
36
37 return new_type
38
39 if issubclass(type_, BaseModel):
40 if hasattr(type_, "_strawberry_type"):
41 return type_._strawberry_type
42 else:
43 raise UnregisteredTypeException(type_)
44
45 return type_
46
47
48 def get_type_for_field(field: ModelField):
49 type_ = field.outer_type_
50 type_ = get_basic_type(type_)
51 type_ = replace_pydantic_types(type_)
52
53 if not field.required:
54 type_ = Optional[type_]
55
56 return type_
57
58
59 def type(
60 model: Type[BaseModel],
61 *,
62 fields: List[str],
63 name: Optional[str] = None,
64 is_input: bool = False,
65 is_interface: bool = False,
66 description: Optional[str] = None,
67 federation: Optional[FederationTypeParams] = None,
68 ):
69 def wrap(cls):
70 if not fields:
71 raise MissingFieldsListError(model)
72
73 model_fields = model.__fields__
74 fields_set = set(fields)
75
76 all_fields = [
77 (
78 name,
79 get_type_for_field(field),
80 StrawberryField(
81 python_name=field.name,
82 graphql_name=field.alias if field.has_alias else None,
83 default=field.default if not field.required else UNSET,
84 default_factory=(
85 field.default_factory if field.default_factory else UNSET
86 ),
87 type_annotation=get_type_for_field(field),
88 ),
89 )
90 for name, field in model_fields.items()
91 if name in fields_set
92 ]
93
94 cls_annotations = getattr(cls, "__annotations__", {})
95 all_fields.extend(
96 (
97 (
98 name,
99 type_,
100 StrawberryField(
101 python_name=name,
102 graphql_name=None,
103 type_annotation=type_,
104 # we need a default value when adding additional fields
105 # on top of a type generated from Pydantic, this is because
106 # Pydantic Optional fields always have None as default value
107 # which breaks dataclasses generation; as we can't define
108 # a field without a default value after one with a default value
109 # adding fields at the beginning won't work as we will also
110 # support default values on them (so the problem will be just
111 # shifted around)
112 default=None,
113 ),
114 )
115 for name, type_ in cls_annotations.items()
116 )
117 )
118
119 cls = dataclasses.make_dataclass(
120 cls.__name__,
121 all_fields,
122 )
123
124 _process_type(
125 cls,
126 name=name,
127 is_input=is_input,
128 is_interface=is_interface,
129 description=description,
130 federation=federation,
131 )
132
133 model._strawberry_type = cls # type: ignore
134
135 def from_pydantic(instance: Any, extra: Dict[str, Any] = None) -> Any:
136 return convert_pydantic_model_to_strawberry_class(
137 cls=cls, model_instance=instance, extra=extra
138 )
139
140 def to_pydantic(self) -> Any:
141 instance_kwargs = dataclasses.asdict(self)
142
143 return model(**instance_kwargs)
144
145 cls.from_pydantic = staticmethod(from_pydantic)
146 cls.to_pydantic = to_pydantic
147
148 return cls
149
150 return wrap
151
152
153 input = partial(type, is_input=True)
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/experimental/pydantic/object_type.py b/strawberry/experimental/pydantic/object_type.py
--- a/strawberry/experimental/pydantic/object_type.py
+++ b/strawberry/experimental/pydantic/object_type.py
@@ -12,7 +12,8 @@
)
from strawberry.experimental.pydantic.fields import get_basic_type
from strawberry.field import StrawberryField
-from strawberry.object_type import _process_type
+from strawberry.object_type import _process_type, _wrap_dataclass
+from strawberry.types.type_resolver import _get_fields
from strawberry.types.types import FederationTypeParams, TypeDefinition
from .exceptions import MissingFieldsListError, UnregisteredTypeException
@@ -91,34 +92,36 @@
if name in fields_set
]
- cls_annotations = getattr(cls, "__annotations__", {})
+ wrapped = _wrap_dataclass(cls)
+ extra_fields = _get_fields(wrapped)
+
all_fields.extend(
(
(
- name,
- type_,
- StrawberryField(
- python_name=name,
- graphql_name=None,
- type_annotation=type_,
- # we need a default value when adding additional fields
- # on top of a type generated from Pydantic, this is because
- # Pydantic Optional fields always have None as default value
- # which breaks dataclasses generation; as we can't define
- # a field without a default value after one with a default value
- # adding fields at the beginning won't work as we will also
- # support default values on them (so the problem will be just
- # shifted around)
- default=None,
- ),
+ field.name,
+ field.type,
+ field,
)
- for name, type_ in cls_annotations.items()
+ for field in extra_fields
)
)
+ # Sort fields so that fields with missing defaults go first
+ # because dataclasses require that fields with no defaults are defined
+ # first
+ missing_default = []
+ has_default = []
+ for field in all_fields:
+ if field[2].default is dataclasses.MISSING:
+ missing_default.append(field)
+ else:
+ has_default.append(field)
+
+ sorted_fields = missing_default + has_default
+
cls = dataclasses.make_dataclass(
cls.__name__,
- all_fields,
+ sorted_fields,
)
_process_type(
| {"golden_diff": "diff --git a/strawberry/experimental/pydantic/object_type.py b/strawberry/experimental/pydantic/object_type.py\n--- a/strawberry/experimental/pydantic/object_type.py\n+++ b/strawberry/experimental/pydantic/object_type.py\n@@ -12,7 +12,8 @@\n )\n from strawberry.experimental.pydantic.fields import get_basic_type\n from strawberry.field import StrawberryField\n-from strawberry.object_type import _process_type\n+from strawberry.object_type import _process_type, _wrap_dataclass\n+from strawberry.types.type_resolver import _get_fields\n from strawberry.types.types import FederationTypeParams, TypeDefinition\n \n from .exceptions import MissingFieldsListError, UnregisteredTypeException\n@@ -91,34 +92,36 @@\n if name in fields_set\n ]\n \n- cls_annotations = getattr(cls, \"__annotations__\", {})\n+ wrapped = _wrap_dataclass(cls)\n+ extra_fields = _get_fields(wrapped)\n+\n all_fields.extend(\n (\n (\n- name,\n- type_,\n- StrawberryField(\n- python_name=name,\n- graphql_name=None,\n- type_annotation=type_,\n- # we need a default value when adding additional fields\n- # on top of a type generated from Pydantic, this is because\n- # Pydantic Optional fields always have None as default value\n- # which breaks dataclasses generation; as we can't define\n- # a field without a default value after one with a default value\n- # adding fields at the beginning won't work as we will also\n- # support default values on them (so the problem will be just\n- # shifted around)\n- default=None,\n- ),\n+ field.name,\n+ field.type,\n+ field,\n )\n- for name, type_ in cls_annotations.items()\n+ for field in extra_fields\n )\n )\n \n+ # Sort fields so that fields with missing defaults go first\n+ # because dataclasses require that fields with no defaults are defined\n+ # first\n+ missing_default = []\n+ has_default = []\n+ for field in all_fields:\n+ if field[2].default is dataclasses.MISSING:\n+ missing_default.append(field)\n+ else:\n+ has_default.append(field)\n+\n+ sorted_fields = missing_default + has_default\n+\n cls = dataclasses.make_dataclass(\n cls.__name__,\n- all_fields,\n+ sorted_fields,\n )\n \n _process_type(\n", "issue": "Type resolver not called when extending from pydantic model\nI have 2 types, Category and Articles, and I want the Category type to return its related Articles(I have tried with other simpler types too), but when defining a resolver in the Category type its not being called:\r\n\r\n```python\r\ndef articles_helper(root): \r\n return [] # <-whatever I return here, it always shows null\r\n \r\n \r\[email protected](\r\n model=Category,\r\n fields=[\r\n 'title',\r\n 'description',\r\n 'content',\r\n 'template',\r\n 'slug',\r\n 'date_published',\r\n 'date_updated',\r\n ])\r\nclass CategoryType(Category):\r\n id: int\r\n articles: Optional[List[ArticleType]] = strawberry.field(resolver=articles_helper) # <- resolver not called\r\n``` \r\n\r\nQuery type:\r\n```python\r\[email protected]()\r\nclass Query:\r\n category: Optional[CategoryType] = field(resolver=get_category) # this resolver is OK\r\n```\r\n\r\nAlways returns `null`:\r\n```\r\nQuery:\r\n\r\n{\r\n category(categoryId: 1) {\r\n id\r\n title\r\n articles {\r\n title\r\n }\r\n }\r\n}\r\n\r\nResponse:\r\n\r\n{\r\n \"data\": {\r\n \"category\": {\r\n \"id\": 1,\r\n \"title\": \"test\",\r\n \"articles\": null <- always\r\n }\r\n }\r\n}\r\n```\r\n\r\n**I works when I don't extend from a pydantic model**\n", "before_files": [{"content": "import builtins\nimport dataclasses\nfrom functools import partial\nfrom typing import Any, Dict, List, Optional, Type\n\nfrom pydantic import BaseModel\nfrom pydantic.fields import ModelField\n\nfrom strawberry.arguments import UNSET\nfrom strawberry.experimental.pydantic.conversion import (\n convert_pydantic_model_to_strawberry_class,\n)\nfrom strawberry.experimental.pydantic.fields import get_basic_type\nfrom strawberry.field import StrawberryField\nfrom strawberry.object_type import _process_type\nfrom strawberry.types.types import FederationTypeParams, TypeDefinition\n\nfrom .exceptions import MissingFieldsListError, UnregisteredTypeException\n\n\ndef replace_pydantic_types(type_: Any):\n if hasattr(type_, \"__args__\"):\n new_type = type_.copy_with(\n tuple(replace_pydantic_types(t) for t in type_.__args__)\n )\n\n if isinstance(new_type, TypeDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n\n new_type = builtins.type(\n new_type.name,\n (),\n {\"_type_definition\": new_type},\n )\n\n return new_type\n\n if issubclass(type_, BaseModel):\n if hasattr(type_, \"_strawberry_type\"):\n return type_._strawberry_type\n else:\n raise UnregisteredTypeException(type_)\n\n return type_\n\n\ndef get_type_for_field(field: ModelField):\n type_ = field.outer_type_\n type_ = get_basic_type(type_)\n type_ = replace_pydantic_types(type_)\n\n if not field.required:\n type_ = Optional[type_]\n\n return type_\n\n\ndef type(\n model: Type[BaseModel],\n *,\n fields: List[str],\n name: Optional[str] = None,\n is_input: bool = False,\n is_interface: bool = False,\n description: Optional[str] = None,\n federation: Optional[FederationTypeParams] = None,\n):\n def wrap(cls):\n if not fields:\n raise MissingFieldsListError(model)\n\n model_fields = model.__fields__\n fields_set = set(fields)\n\n all_fields = [\n (\n name,\n get_type_for_field(field),\n StrawberryField(\n python_name=field.name,\n graphql_name=field.alias if field.has_alias else None,\n default=field.default if not field.required else UNSET,\n default_factory=(\n field.default_factory if field.default_factory else UNSET\n ),\n type_annotation=get_type_for_field(field),\n ),\n )\n for name, field in model_fields.items()\n if name in fields_set\n ]\n\n cls_annotations = getattr(cls, \"__annotations__\", {})\n all_fields.extend(\n (\n (\n name,\n type_,\n StrawberryField(\n python_name=name,\n graphql_name=None,\n type_annotation=type_,\n # we need a default value when adding additional fields\n # on top of a type generated from Pydantic, this is because\n # Pydantic Optional fields always have None as default value\n # which breaks dataclasses generation; as we can't define\n # a field without a default value after one with a default value\n # adding fields at the beginning won't work as we will also\n # support default values on them (so the problem will be just\n # shifted around)\n default=None,\n ),\n )\n for name, type_ in cls_annotations.items()\n )\n )\n\n cls = dataclasses.make_dataclass(\n cls.__name__,\n all_fields,\n )\n\n _process_type(\n cls,\n name=name,\n is_input=is_input,\n is_interface=is_interface,\n description=description,\n federation=federation,\n )\n\n model._strawberry_type = cls # type: ignore\n\n def from_pydantic(instance: Any, extra: Dict[str, Any] = None) -> Any:\n return convert_pydantic_model_to_strawberry_class(\n cls=cls, model_instance=instance, extra=extra\n )\n\n def to_pydantic(self) -> Any:\n instance_kwargs = dataclasses.asdict(self)\n\n return model(**instance_kwargs)\n\n cls.from_pydantic = staticmethod(from_pydantic)\n cls.to_pydantic = to_pydantic\n\n return cls\n\n return wrap\n\n\ninput = partial(type, is_input=True)\n", "path": "strawberry/experimental/pydantic/object_type.py"}], "after_files": [{"content": "import builtins\nimport dataclasses\nfrom functools import partial\nfrom typing import Any, Dict, List, Optional, Type\n\nfrom pydantic import BaseModel\nfrom pydantic.fields import ModelField\n\nfrom strawberry.arguments import UNSET\nfrom strawberry.experimental.pydantic.conversion import (\n convert_pydantic_model_to_strawberry_class,\n)\nfrom strawberry.experimental.pydantic.fields import get_basic_type\nfrom strawberry.field import StrawberryField\nfrom strawberry.object_type import _process_type, _wrap_dataclass\nfrom strawberry.types.type_resolver import _get_fields\nfrom strawberry.types.types import FederationTypeParams, TypeDefinition\n\nfrom .exceptions import MissingFieldsListError, UnregisteredTypeException\n\n\ndef replace_pydantic_types(type_: Any):\n if hasattr(type_, \"__args__\"):\n new_type = type_.copy_with(\n tuple(replace_pydantic_types(t) for t in type_.__args__)\n )\n\n if isinstance(new_type, TypeDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n\n new_type = builtins.type(\n new_type.name,\n (),\n {\"_type_definition\": new_type},\n )\n\n return new_type\n\n if issubclass(type_, BaseModel):\n if hasattr(type_, \"_strawberry_type\"):\n return type_._strawberry_type\n else:\n raise UnregisteredTypeException(type_)\n\n return type_\n\n\ndef get_type_for_field(field: ModelField):\n type_ = field.outer_type_\n type_ = get_basic_type(type_)\n type_ = replace_pydantic_types(type_)\n\n if not field.required:\n type_ = Optional[type_]\n\n return type_\n\n\ndef type(\n model: Type[BaseModel],\n *,\n fields: List[str],\n name: Optional[str] = None,\n is_input: bool = False,\n is_interface: bool = False,\n description: Optional[str] = None,\n federation: Optional[FederationTypeParams] = None,\n):\n def wrap(cls):\n if not fields:\n raise MissingFieldsListError(model)\n\n model_fields = model.__fields__\n fields_set = set(fields)\n\n all_fields = [\n (\n name,\n get_type_for_field(field),\n StrawberryField(\n python_name=field.name,\n graphql_name=field.alias if field.has_alias else None,\n default=field.default if not field.required else UNSET,\n default_factory=(\n field.default_factory if field.default_factory else UNSET\n ),\n type_annotation=get_type_for_field(field),\n ),\n )\n for name, field in model_fields.items()\n if name in fields_set\n ]\n\n wrapped = _wrap_dataclass(cls)\n extra_fields = _get_fields(wrapped)\n\n all_fields.extend(\n (\n (\n field.name,\n field.type,\n field,\n )\n for field in extra_fields\n )\n )\n\n # Sort fields so that fields with missing defaults go first\n # because dataclasses require that fields with no defaults are defined\n # first\n missing_default = []\n has_default = []\n for field in all_fields:\n if field[2].default is dataclasses.MISSING:\n missing_default.append(field)\n else:\n has_default.append(field)\n\n sorted_fields = missing_default + has_default\n\n cls = dataclasses.make_dataclass(\n cls.__name__,\n sorted_fields,\n )\n\n _process_type(\n cls,\n name=name,\n is_input=is_input,\n is_interface=is_interface,\n description=description,\n federation=federation,\n )\n\n model._strawberry_type = cls # type: ignore\n\n def from_pydantic(instance: Any, extra: Dict[str, Any] = None) -> Any:\n return convert_pydantic_model_to_strawberry_class(\n cls=cls, model_instance=instance, extra=extra\n )\n\n def to_pydantic(self) -> Any:\n instance_kwargs = dataclasses.asdict(self)\n\n return model(**instance_kwargs)\n\n cls.from_pydantic = staticmethod(from_pydantic)\n cls.to_pydantic = to_pydantic\n\n return cls\n\n return wrap\n\n\ninput = partial(type, is_input=True)\n", "path": "strawberry/experimental/pydantic/object_type.py"}]} | 1,870 | 549 |
gh_patches_debug_39393 | rasdani/github-patches | git_diff | conda__conda-7827 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fix 'conda env create` using remote env file
Fix `conda env create mfix/mfix-18.1.0-osx64` in master. Broken in https://github.com/conda/conda/pull/7385.
xref: https://github.com/conda/conda/issues/7111
Can also remove the deprecation warnings now (they were apparently placed in the wrong code location to begin with).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_env/specs/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (C) 2012 Anaconda, Inc
3 # SPDX-License-Identifier: BSD-3-Clause
4
5 import os
6
7 from .binstar import BinstarSpec
8 from .notebook import NotebookSpec
9 from .requirements import RequirementsSpec
10 from .yaml_file import YamlFileSpec
11 from ..exceptions import (EnvironmentFileExtensionNotValid, EnvironmentFileNotFound,
12 SpecNotFound)
13
14
15 def detect(**kwargs):
16 # Check file existence
17 filename = kwargs.get('filename')
18 if filename and not os.path.isfile(filename):
19 raise EnvironmentFileNotFound(filename=filename)
20
21 # Check extensions
22 all_valid_exts = YamlFileSpec.extensions.union(RequirementsSpec.extensions)
23 fname, ext = os.path.splitext(filename)
24 if ext == '' or ext not in all_valid_exts:
25 raise EnvironmentFileExtensionNotValid(filename)
26 elif ext in YamlFileSpec.extensions:
27 specs = [YamlFileSpec]
28 elif ext in RequirementsSpec.extensions:
29 specs = [RequirementsSpec]
30 else:
31 specs = [NotebookSpec, BinstarSpec]
32
33 # Check specifications
34 for SpecClass in specs:
35 spec = SpecClass(**kwargs)
36 if spec.can_handle():
37 return spec
38
39 raise SpecNotFound(build_message(specs))
40
41
42 def build_message(specs):
43 binstar_spec = next((spec for spec in specs if isinstance(spec, BinstarSpec)), None)
44 if binstar_spec:
45 return binstar_spec.msg
46 else:
47 return "\n".join([s.msg for s in specs if s.msg is not None])
48
```
Path: `conda_env/specs/binstar.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (C) 2012 Anaconda, Inc
3 # SPDX-License-Identifier: BSD-3-Clause
4 import re
5
6 from conda.models.version import normalized_version
7 from .. import env
8 from ..exceptions import EnvironmentFileNotDownloaded
9
10 try:
11 from binstar_client import errors
12 from binstar_client.utils import get_server_api
13 except ImportError:
14 get_server_api = None
15
16 ENVIRONMENT_TYPE = 'env'
17 # TODO: isolate binstar related code into conda_env.utils.binstar
18
19
20 class BinstarSpec(object):
21 """
22 spec = BinstarSpec('darth/deathstar')
23 spec.can_handle() # => True / False
24 spec.environment # => YAML string
25 spec.msg # => Error messages
26 :raises: EnvironmentFileDoesNotExist, EnvironmentFileNotDownloaded
27 """
28
29 _environment = None
30 _username = None
31 _packagename = None
32 _package = None
33 _file_data = None
34 msg = None
35
36 def __init__(self, name=None, **kwargs):
37 self.name = name
38 self.quiet = False
39 if get_server_api is not None:
40 self.binstar = get_server_api()
41 else:
42 self.binstar = None
43
44 def can_handle(self):
45 result = self._can_handle()
46 if result:
47 print("WARNING: Binstar environments are deprecated and scheduled to be "
48 "removed in conda 4.5. See conda issue #5843 at "
49 "https://github.com/conda/conda/pull/5843 for more information.")
50 return result
51
52 def _can_handle(self):
53 """
54 Validates loader can process environment definition.
55 :return: True or False
56 """
57 # TODO: log information about trying to find the package in binstar.org
58 if self.valid_name():
59 if self.binstar is None:
60 self.msg = ("Anaconda Client is required to interact with anaconda.org or an "
61 "Anaconda API. Please run `conda install anaconda-client`.")
62 return False
63 return self.package is not None and self.valid_package()
64 return False
65
66 def valid_name(self):
67 """
68 Validates name
69 :return: True or False
70 """
71 if re.match("^(.+)/(.+)$", str(self.name)) is not None:
72 return True
73 elif self.name is None:
74 self.msg = "Can't process without a name"
75 else:
76 self.msg = "Invalid name, try the format: user/package"
77 return False
78
79 def valid_package(self):
80 """
81 Returns True if package has an environment file
82 :return: True or False
83 """
84 return len(self.file_data) > 0
85
86 @property
87 def file_data(self):
88 if self._file_data is None:
89 self._file_data = [data
90 for data in self.package['files']
91 if data['type'] == ENVIRONMENT_TYPE]
92 return self._file_data
93
94 @property
95 def environment(self):
96 """
97 :raises: EnvironmentFileNotDownloaded
98 """
99 if self._environment is None:
100 versions = [{'normalized': normalized_version(d['version']), 'original': d['version']}
101 for d in self.file_data]
102 latest_version = max(versions, key=lambda x: x['normalized'])['original']
103 file_data = [data
104 for data in self.package['files']
105 if data['version'] == latest_version]
106 req = self.binstar.download(self.username, self.packagename, latest_version,
107 file_data[0]['basename'])
108 if req is None:
109 raise EnvironmentFileNotDownloaded(self.username, self.packagename)
110 self._environment = req.text
111 return env.from_yaml(self._environment)
112
113 @property
114 def package(self):
115 if self._package is None:
116 try:
117 self._package = self.binstar.package(self.username, self.packagename)
118 except errors.NotFound:
119 self.msg = "{} was not found on anaconda.org.\n"\
120 "You may need to be logged in. Try running:\n"\
121 " anaconda login".format(self.name)
122 return self._package
123
124 @property
125 def username(self):
126 if self._username is None:
127 self._username = self.parse()[0]
128 return self._username
129
130 @property
131 def packagename(self):
132 if self._packagename is None:
133 self._packagename = self.parse()[1]
134 return self._packagename
135
136 def parse(self):
137 """Parse environment definition handle"""
138 return self.name.split('/', 1)
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_env/specs/__init__.py b/conda_env/specs/__init__.py
--- a/conda_env/specs/__init__.py
+++ b/conda_env/specs/__init__.py
@@ -13,35 +13,42 @@
def detect(**kwargs):
- # Check file existence
filename = kwargs.get('filename')
- if filename and not os.path.isfile(filename):
- raise EnvironmentFileNotFound(filename=filename)
+ remote_definition = kwargs.get('name')
# Check extensions
all_valid_exts = YamlFileSpec.extensions.union(RequirementsSpec.extensions)
fname, ext = os.path.splitext(filename)
- if ext == '' or ext not in all_valid_exts:
- raise EnvironmentFileExtensionNotValid(filename)
- elif ext in YamlFileSpec.extensions:
- specs = [YamlFileSpec]
- elif ext in RequirementsSpec.extensions:
- specs = [RequirementsSpec]
+
+ # First check if file exists and test the known valid extension for specs
+ file_exists = filename and os.path.isfile(filename)
+ if file_exists:
+ if ext == '' or ext not in all_valid_exts:
+ raise EnvironmentFileExtensionNotValid(filename)
+ elif ext in YamlFileSpec.extensions:
+ specs = [YamlFileSpec]
+ elif ext in RequirementsSpec.extensions:
+ specs = [RequirementsSpec]
else:
specs = [NotebookSpec, BinstarSpec]
# Check specifications
+ spec_instances = []
for SpecClass in specs:
spec = SpecClass(**kwargs)
+ spec_instances.append(spec)
if spec.can_handle():
return spec
- raise SpecNotFound(build_message(specs))
+ if not file_exists and remote_definition is None:
+ raise EnvironmentFileNotFound(filename=filename)
+ else:
+ raise SpecNotFound(build_message(spec_instances))
-def build_message(specs):
- binstar_spec = next((spec for spec in specs if isinstance(spec, BinstarSpec)), None)
+def build_message(spec_instances):
+ binstar_spec = next((s for s in spec_instances if isinstance(s, BinstarSpec)), None)
if binstar_spec:
return binstar_spec.msg
else:
- return "\n".join([s.msg for s in specs if s.msg is not None])
+ return "\n".join([s.msg for s in spec_instances if s.msg is not None])
diff --git a/conda_env/specs/binstar.py b/conda_env/specs/binstar.py
--- a/conda_env/specs/binstar.py
+++ b/conda_env/specs/binstar.py
@@ -43,10 +43,6 @@
def can_handle(self):
result = self._can_handle()
- if result:
- print("WARNING: Binstar environments are deprecated and scheduled to be "
- "removed in conda 4.5. See conda issue #5843 at "
- "https://github.com/conda/conda/pull/5843 for more information.")
return result
def _can_handle(self):
@@ -60,6 +56,7 @@
self.msg = ("Anaconda Client is required to interact with anaconda.org or an "
"Anaconda API. Please run `conda install anaconda-client`.")
return False
+
return self.package is not None and self.valid_package()
return False
| {"golden_diff": "diff --git a/conda_env/specs/__init__.py b/conda_env/specs/__init__.py\n--- a/conda_env/specs/__init__.py\n+++ b/conda_env/specs/__init__.py\n@@ -13,35 +13,42 @@\n \n \n def detect(**kwargs):\n- # Check file existence\n filename = kwargs.get('filename')\n- if filename and not os.path.isfile(filename):\n- raise EnvironmentFileNotFound(filename=filename)\n+ remote_definition = kwargs.get('name')\n \n # Check extensions\n all_valid_exts = YamlFileSpec.extensions.union(RequirementsSpec.extensions)\n fname, ext = os.path.splitext(filename)\n- if ext == '' or ext not in all_valid_exts:\n- raise EnvironmentFileExtensionNotValid(filename)\n- elif ext in YamlFileSpec.extensions:\n- specs = [YamlFileSpec]\n- elif ext in RequirementsSpec.extensions:\n- specs = [RequirementsSpec]\n+\n+ # First check if file exists and test the known valid extension for specs\n+ file_exists = filename and os.path.isfile(filename)\n+ if file_exists:\n+ if ext == '' or ext not in all_valid_exts:\n+ raise EnvironmentFileExtensionNotValid(filename)\n+ elif ext in YamlFileSpec.extensions:\n+ specs = [YamlFileSpec]\n+ elif ext in RequirementsSpec.extensions:\n+ specs = [RequirementsSpec]\n else:\n specs = [NotebookSpec, BinstarSpec]\n \n # Check specifications\n+ spec_instances = []\n for SpecClass in specs:\n spec = SpecClass(**kwargs)\n+ spec_instances.append(spec)\n if spec.can_handle():\n return spec\n \n- raise SpecNotFound(build_message(specs))\n+ if not file_exists and remote_definition is None:\n+ raise EnvironmentFileNotFound(filename=filename)\n+ else:\n+ raise SpecNotFound(build_message(spec_instances))\n \n \n-def build_message(specs):\n- binstar_spec = next((spec for spec in specs if isinstance(spec, BinstarSpec)), None)\n+def build_message(spec_instances):\n+ binstar_spec = next((s for s in spec_instances if isinstance(s, BinstarSpec)), None)\n if binstar_spec:\n return binstar_spec.msg\n else:\n- return \"\\n\".join([s.msg for s in specs if s.msg is not None])\n+ return \"\\n\".join([s.msg for s in spec_instances if s.msg is not None])\ndiff --git a/conda_env/specs/binstar.py b/conda_env/specs/binstar.py\n--- a/conda_env/specs/binstar.py\n+++ b/conda_env/specs/binstar.py\n@@ -43,10 +43,6 @@\n \n def can_handle(self):\n result = self._can_handle()\n- if result:\n- print(\"WARNING: Binstar environments are deprecated and scheduled to be \"\n- \"removed in conda 4.5. See conda issue #5843 at \"\n- \"https://github.com/conda/conda/pull/5843 for more information.\")\n return result\n \n def _can_handle(self):\n@@ -60,6 +56,7 @@\n self.msg = (\"Anaconda Client is required to interact with anaconda.org or an \"\n \"Anaconda API. Please run `conda install anaconda-client`.\")\n return False\n+\n return self.package is not None and self.valid_package()\n return False\n", "issue": "fix 'conda env create` using remote env file\nFix `conda env create mfix/mfix-18.1.0-osx64` in master. Broken in https://github.com/conda/conda/pull/7385.\r\n\r\nxref: https://github.com/conda/conda/issues/7111\r\n\r\nCan also remove the deprecation warnings now (they were apparently placed in the wrong code location to begin with).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport os\n\nfrom .binstar import BinstarSpec\nfrom .notebook import NotebookSpec\nfrom .requirements import RequirementsSpec\nfrom .yaml_file import YamlFileSpec\nfrom ..exceptions import (EnvironmentFileExtensionNotValid, EnvironmentFileNotFound,\n SpecNotFound)\n\n\ndef detect(**kwargs):\n # Check file existence\n filename = kwargs.get('filename')\n if filename and not os.path.isfile(filename):\n raise EnvironmentFileNotFound(filename=filename)\n\n # Check extensions\n all_valid_exts = YamlFileSpec.extensions.union(RequirementsSpec.extensions)\n fname, ext = os.path.splitext(filename)\n if ext == '' or ext not in all_valid_exts:\n raise EnvironmentFileExtensionNotValid(filename)\n elif ext in YamlFileSpec.extensions:\n specs = [YamlFileSpec]\n elif ext in RequirementsSpec.extensions:\n specs = [RequirementsSpec]\n else:\n specs = [NotebookSpec, BinstarSpec]\n\n # Check specifications\n for SpecClass in specs:\n spec = SpecClass(**kwargs)\n if spec.can_handle():\n return spec\n\n raise SpecNotFound(build_message(specs))\n\n\ndef build_message(specs):\n binstar_spec = next((spec for spec in specs if isinstance(spec, BinstarSpec)), None)\n if binstar_spec:\n return binstar_spec.msg\n else:\n return \"\\n\".join([s.msg for s in specs if s.msg is not None])\n", "path": "conda_env/specs/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\nimport re\n\nfrom conda.models.version import normalized_version\nfrom .. import env\nfrom ..exceptions import EnvironmentFileNotDownloaded\n\ntry:\n from binstar_client import errors\n from binstar_client.utils import get_server_api\nexcept ImportError:\n get_server_api = None\n\nENVIRONMENT_TYPE = 'env'\n# TODO: isolate binstar related code into conda_env.utils.binstar\n\n\nclass BinstarSpec(object):\n \"\"\"\n spec = BinstarSpec('darth/deathstar')\n spec.can_handle() # => True / False\n spec.environment # => YAML string\n spec.msg # => Error messages\n :raises: EnvironmentFileDoesNotExist, EnvironmentFileNotDownloaded\n \"\"\"\n\n _environment = None\n _username = None\n _packagename = None\n _package = None\n _file_data = None\n msg = None\n\n def __init__(self, name=None, **kwargs):\n self.name = name\n self.quiet = False\n if get_server_api is not None:\n self.binstar = get_server_api()\n else:\n self.binstar = None\n\n def can_handle(self):\n result = self._can_handle()\n if result:\n print(\"WARNING: Binstar environments are deprecated and scheduled to be \"\n \"removed in conda 4.5. See conda issue #5843 at \"\n \"https://github.com/conda/conda/pull/5843 for more information.\")\n return result\n\n def _can_handle(self):\n \"\"\"\n Validates loader can process environment definition.\n :return: True or False\n \"\"\"\n # TODO: log information about trying to find the package in binstar.org\n if self.valid_name():\n if self.binstar is None:\n self.msg = (\"Anaconda Client is required to interact with anaconda.org or an \"\n \"Anaconda API. Please run `conda install anaconda-client`.\")\n return False\n return self.package is not None and self.valid_package()\n return False\n\n def valid_name(self):\n \"\"\"\n Validates name\n :return: True or False\n \"\"\"\n if re.match(\"^(.+)/(.+)$\", str(self.name)) is not None:\n return True\n elif self.name is None:\n self.msg = \"Can't process without a name\"\n else:\n self.msg = \"Invalid name, try the format: user/package\"\n return False\n\n def valid_package(self):\n \"\"\"\n Returns True if package has an environment file\n :return: True or False\n \"\"\"\n return len(self.file_data) > 0\n\n @property\n def file_data(self):\n if self._file_data is None:\n self._file_data = [data\n for data in self.package['files']\n if data['type'] == ENVIRONMENT_TYPE]\n return self._file_data\n\n @property\n def environment(self):\n \"\"\"\n :raises: EnvironmentFileNotDownloaded\n \"\"\"\n if self._environment is None:\n versions = [{'normalized': normalized_version(d['version']), 'original': d['version']}\n for d in self.file_data]\n latest_version = max(versions, key=lambda x: x['normalized'])['original']\n file_data = [data\n for data in self.package['files']\n if data['version'] == latest_version]\n req = self.binstar.download(self.username, self.packagename, latest_version,\n file_data[0]['basename'])\n if req is None:\n raise EnvironmentFileNotDownloaded(self.username, self.packagename)\n self._environment = req.text\n return env.from_yaml(self._environment)\n\n @property\n def package(self):\n if self._package is None:\n try:\n self._package = self.binstar.package(self.username, self.packagename)\n except errors.NotFound:\n self.msg = \"{} was not found on anaconda.org.\\n\"\\\n \"You may need to be logged in. Try running:\\n\"\\\n \" anaconda login\".format(self.name)\n return self._package\n\n @property\n def username(self):\n if self._username is None:\n self._username = self.parse()[0]\n return self._username\n\n @property\n def packagename(self):\n if self._packagename is None:\n self._packagename = self.parse()[1]\n return self._packagename\n\n def parse(self):\n \"\"\"Parse environment definition handle\"\"\"\n return self.name.split('/', 1)\n", "path": "conda_env/specs/binstar.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport os\n\nfrom .binstar import BinstarSpec\nfrom .notebook import NotebookSpec\nfrom .requirements import RequirementsSpec\nfrom .yaml_file import YamlFileSpec\nfrom ..exceptions import (EnvironmentFileExtensionNotValid, EnvironmentFileNotFound,\n SpecNotFound)\n\n\ndef detect(**kwargs):\n filename = kwargs.get('filename')\n remote_definition = kwargs.get('name')\n\n # Check extensions\n all_valid_exts = YamlFileSpec.extensions.union(RequirementsSpec.extensions)\n fname, ext = os.path.splitext(filename)\n\n # First check if file exists and test the known valid extension for specs\n file_exists = filename and os.path.isfile(filename)\n if file_exists:\n if ext == '' or ext not in all_valid_exts:\n raise EnvironmentFileExtensionNotValid(filename)\n elif ext in YamlFileSpec.extensions:\n specs = [YamlFileSpec]\n elif ext in RequirementsSpec.extensions:\n specs = [RequirementsSpec]\n else:\n specs = [NotebookSpec, BinstarSpec]\n\n # Check specifications\n spec_instances = []\n for SpecClass in specs:\n spec = SpecClass(**kwargs)\n spec_instances.append(spec)\n if spec.can_handle():\n return spec\n\n if not file_exists and remote_definition is None:\n raise EnvironmentFileNotFound(filename=filename)\n else:\n raise SpecNotFound(build_message(spec_instances))\n\n\ndef build_message(spec_instances):\n binstar_spec = next((s for s in spec_instances if isinstance(s, BinstarSpec)), None)\n if binstar_spec:\n return binstar_spec.msg\n else:\n return \"\\n\".join([s.msg for s in spec_instances if s.msg is not None])\n", "path": "conda_env/specs/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\nimport re\n\nfrom conda.models.version import normalized_version\nfrom .. import env\nfrom ..exceptions import EnvironmentFileNotDownloaded\n\ntry:\n from binstar_client import errors\n from binstar_client.utils import get_server_api\nexcept ImportError:\n get_server_api = None\n\nENVIRONMENT_TYPE = 'env'\n# TODO: isolate binstar related code into conda_env.utils.binstar\n\n\nclass BinstarSpec(object):\n \"\"\"\n spec = BinstarSpec('darth/deathstar')\n spec.can_handle() # => True / False\n spec.environment # => YAML string\n spec.msg # => Error messages\n :raises: EnvironmentFileDoesNotExist, EnvironmentFileNotDownloaded\n \"\"\"\n\n _environment = None\n _username = None\n _packagename = None\n _package = None\n _file_data = None\n msg = None\n\n def __init__(self, name=None, **kwargs):\n self.name = name\n self.quiet = False\n if get_server_api is not None:\n self.binstar = get_server_api()\n else:\n self.binstar = None\n\n def can_handle(self):\n result = self._can_handle()\n return result\n\n def _can_handle(self):\n \"\"\"\n Validates loader can process environment definition.\n :return: True or False\n \"\"\"\n # TODO: log information about trying to find the package in binstar.org\n if self.valid_name():\n if self.binstar is None:\n self.msg = (\"Anaconda Client is required to interact with anaconda.org or an \"\n \"Anaconda API. Please run `conda install anaconda-client`.\")\n return False\n\n return self.package is not None and self.valid_package()\n return False\n\n def valid_name(self):\n \"\"\"\n Validates name\n :return: True or False\n \"\"\"\n if re.match(\"^(.+)/(.+)$\", str(self.name)) is not None:\n return True\n elif self.name is None:\n self.msg = \"Can't process without a name\"\n else:\n self.msg = \"Invalid name, try the format: user/package\"\n return False\n\n def valid_package(self):\n \"\"\"\n Returns True if package has an environment file\n :return: True or False\n \"\"\"\n return len(self.file_data) > 0\n\n @property\n def file_data(self):\n if self._file_data is None:\n self._file_data = [data\n for data in self.package['files']\n if data['type'] == ENVIRONMENT_TYPE]\n return self._file_data\n\n @property\n def environment(self):\n \"\"\"\n :raises: EnvironmentFileNotDownloaded\n \"\"\"\n if self._environment is None:\n versions = [{'normalized': normalized_version(d['version']), 'original': d['version']}\n for d in self.file_data]\n latest_version = max(versions, key=lambda x: x['normalized'])['original']\n file_data = [data\n for data in self.package['files']\n if data['version'] == latest_version]\n req = self.binstar.download(self.username, self.packagename, latest_version,\n file_data[0]['basename'])\n if req is None:\n raise EnvironmentFileNotDownloaded(self.username, self.packagename)\n self._environment = req.text\n return env.from_yaml(self._environment)\n\n @property\n def package(self):\n if self._package is None:\n try:\n self._package = self.binstar.package(self.username, self.packagename)\n except errors.NotFound:\n self.msg = \"{} was not found on anaconda.org.\\n\"\\\n \"You may need to be logged in. Try running:\\n\"\\\n \" anaconda login\".format(self.name)\n return self._package\n\n @property\n def username(self):\n if self._username is None:\n self._username = self.parse()[0]\n return self._username\n\n @property\n def packagename(self):\n if self._packagename is None:\n self._packagename = self.parse()[1]\n return self._packagename\n\n def parse(self):\n \"\"\"Parse environment definition handle\"\"\"\n return self.name.split('/', 1)\n", "path": "conda_env/specs/binstar.py"}]} | 2,147 | 763 |
gh_patches_debug_17800 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3618 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider western_family is broken
During the global build at 2021-08-04-14-42-45, spider **western_family** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/logs/western_family.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/western_family.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/western_family.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/western_family.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3
4 from locations.items import GeojsonPointItem
5
6
7 class WesternFamilySpider(scrapy.Spider):
8
9 name = "western_family"
10 item_attributes = {"brand": "Western Family"}
11 allowed_domains = ["www.westernfamily.com"]
12 start_urls = (
13 "http://www.westernfamily.com/wp-admin/admin-ajax.php?action=store_search&lat=45.5230622&lng=-122.67648159999999&max_results=2500&search_radius=50000&autoload=1",
14 )
15
16 def parse(self, response):
17 results = response.json()
18 for data in results:
19 properties = {
20 "ref": data["id"],
21 "name": data["store"],
22 "lat": data["lat"],
23 "lon": data["lng"],
24 "addr_full": data["address"],
25 "city": data["city"],
26 "state": data["state"],
27 "postcode": data["zip"],
28 "country": data["country"],
29 "phone": data["phone"],
30 "website": data["url"],
31 }
32
33 yield GeojsonPointItem(**properties)
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/western_family.py b/locations/spiders/western_family.py
deleted file mode 100644
--- a/locations/spiders/western_family.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# -*- coding: utf-8 -*-
-import scrapy
-
-from locations.items import GeojsonPointItem
-
-
-class WesternFamilySpider(scrapy.Spider):
-
- name = "western_family"
- item_attributes = {"brand": "Western Family"}
- allowed_domains = ["www.westernfamily.com"]
- start_urls = (
- "http://www.westernfamily.com/wp-admin/admin-ajax.php?action=store_search&lat=45.5230622&lng=-122.67648159999999&max_results=2500&search_radius=50000&autoload=1",
- )
-
- def parse(self, response):
- results = response.json()
- for data in results:
- properties = {
- "ref": data["id"],
- "name": data["store"],
- "lat": data["lat"],
- "lon": data["lng"],
- "addr_full": data["address"],
- "city": data["city"],
- "state": data["state"],
- "postcode": data["zip"],
- "country": data["country"],
- "phone": data["phone"],
- "website": data["url"],
- }
-
- yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/western_family.py b/locations/spiders/western_family.py\ndeleted file mode 100644\n--- a/locations/spiders/western_family.py\n+++ /dev/null\n@@ -1,33 +0,0 @@\n-# -*- coding: utf-8 -*-\n-import scrapy\n-\n-from locations.items import GeojsonPointItem\n-\n-\n-class WesternFamilySpider(scrapy.Spider):\n-\n- name = \"western_family\"\n- item_attributes = {\"brand\": \"Western Family\"}\n- allowed_domains = [\"www.westernfamily.com\"]\n- start_urls = (\n- \"http://www.westernfamily.com/wp-admin/admin-ajax.php?action=store_search&lat=45.5230622&lng=-122.67648159999999&max_results=2500&search_radius=50000&autoload=1\",\n- )\n-\n- def parse(self, response):\n- results = response.json()\n- for data in results:\n- properties = {\n- \"ref\": data[\"id\"],\n- \"name\": data[\"store\"],\n- \"lat\": data[\"lat\"],\n- \"lon\": data[\"lng\"],\n- \"addr_full\": data[\"address\"],\n- \"city\": data[\"city\"],\n- \"state\": data[\"state\"],\n- \"postcode\": data[\"zip\"],\n- \"country\": data[\"country\"],\n- \"phone\": data[\"phone\"],\n- \"website\": data[\"url\"],\n- }\n-\n- yield GeojsonPointItem(**properties)\n", "issue": "Spider western_family is broken\nDuring the global build at 2021-08-04-14-42-45, spider **western_family** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/logs/western_family.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/western_family.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/western_family.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass WesternFamilySpider(scrapy.Spider):\n\n name = \"western_family\"\n item_attributes = {\"brand\": \"Western Family\"}\n allowed_domains = [\"www.westernfamily.com\"]\n start_urls = (\n \"http://www.westernfamily.com/wp-admin/admin-ajax.php?action=store_search&lat=45.5230622&lng=-122.67648159999999&max_results=2500&search_radius=50000&autoload=1\",\n )\n\n def parse(self, response):\n results = response.json()\n for data in results:\n properties = {\n \"ref\": data[\"id\"],\n \"name\": data[\"store\"],\n \"lat\": data[\"lat\"],\n \"lon\": data[\"lng\"],\n \"addr_full\": data[\"address\"],\n \"city\": data[\"city\"],\n \"state\": data[\"state\"],\n \"postcode\": data[\"zip\"],\n \"country\": data[\"country\"],\n \"phone\": data[\"phone\"],\n \"website\": data[\"url\"],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/western_family.py"}], "after_files": [{"content": null, "path": "locations/spiders/western_family.py"}]} | 777 | 357 |
gh_patches_debug_49884 | rasdani/github-patches | git_diff | scikit-hep__awkward-895 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in `identifier.py`
https://github.com/scikit-hep/awkward-1.0/blob/a0ec3bcacacc81a47fe61a1d99b0bc512a8bb3cf/src/awkward/_v2/identifier.py#L30
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/awkward/_v2/identifier.py`
Content:
```
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 from __future__ import absolute_import
4
5 import awkward as ak
6
7 np = ak.nplike.NumpyMetadata.instance()
8
9
10 class Identifier(object):
11 _numrefs = 0
12
13 @staticmethod
14 def newref():
15 out = Identifier._numrefs
16 Identifier._numrefs += 1
17 return out
18
19 def __init__(self, ref, fieldloc, data):
20 self._ref = ref
21 self._fieldloc = fieldloc
22 if not isinstance(fieldloc, dict) or not all(
23 isinstance(k, int) and isinstance(v, str) for k, v in fieldloc.items()
24 ):
25 raise TypeError("Identifier fieldloc must be a dict of int -> str")
26 self._nplike = ak.nplike.of(data)
27
28 self._data = self._nplike.asarray(data, order="C")
29 if len(self._data.shape) != 2:
30 raise TypeError("Identifer data must be 2-dimensional")
31
32 # TypeError for unsupported types?
33 self._T = self._data.dtype
34 if self._T not in (np.dtype(np.int32), np.dtype(np.int64)):
35 raise TypeError("Identifier data must be int32, int64")
36
37 @classmethod
38 # cpp takes width, length?
39 def zeros(cls, ref, fieldloc, length, width, nplike, dtype):
40 return Identifier(ref, fieldloc, nplike.zeros((length, width), dtype=dtype))
41
42 @classmethod
43 def empty(cls, ref, fieldloc, length, width, nplike, dtype):
44 return Identifier(ref, fieldloc, nplike.empty((length, width), dtype=dtype))
45
46 @property
47 def ref(self):
48 return self._ref
49
50 @property
51 def filedloc(self):
52 return self._fieldloc
53
54 @property
55 def data(self):
56 return self._data
57
58 @property
59 def nplike(self):
60 return self._nplike
61
62 def __len__(self):
63 return len(self._data)
64
65 def width(self):
66 return self._data.shape[1]
67
68 def to64(self):
69 return Identifier(self._ref, self._fieldloc, self._data.astype(np.int64))
70
71 def __getitem__(self, where):
72 return self._data[where]
73
74 def __copy__(self):
75 return Identifier(self._ref, self._fieldloc, self._data.copy())
76
77 def __repr__(self):
78 return self._repr("", "", "")
79
80 def _repr(self, indent, pre, post):
81 out = [indent, pre, "<Identifier ref=" + repr(str(self._ref)) + " fieldloc="]
82 out.append(repr(str(self._fieldloc)))
83 out.append(" length=")
84 out.append(repr(str(len(self._data))))
85 out.append(" width=")
86 out.append(repr(str(self._data.shape[1])))
87 out.append(" at=")
88 out.append(repr(hex(self._data.ctypes.data)))
89 out.append(">\n")
90 out.append(indent + " ")
91 out.append(
92 self._nplike.array_str(self._data, max_line_width=30).replace(
93 "\n", "\n" + indent + " "
94 )
95 )
96 out.append("\n")
97 out.append(indent)
98 out.append("</Identifier>")
99 out.append(post)
100 return "".join(out)
101
102 def convert_to(self, nplike):
103 return Identifier(self._ref, self._fieldloc, nplike.asarray(self._data))
104
105 def referentially_equal(self, other):
106 return (
107 self._ref == other._ref
108 and self._fieldloc == other._fieldloc
109 and self._data.ctypes.data == other._data.ctypes.data
110 and self._data.shape == other._data.shape
111 and self._data.strides == other._data.strides
112 and self._data.dtype == other._data.dtype
113 )
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/awkward/_v2/identifier.py b/src/awkward/_v2/identifier.py
--- a/src/awkward/_v2/identifier.py
+++ b/src/awkward/_v2/identifier.py
@@ -27,7 +27,7 @@
self._data = self._nplike.asarray(data, order="C")
if len(self._data.shape) != 2:
- raise TypeError("Identifer data must be 2-dimensional")
+ raise TypeError("Identifier data must be 2-dimensional")
# TypeError for unsupported types?
self._T = self._data.dtype
| {"golden_diff": "diff --git a/src/awkward/_v2/identifier.py b/src/awkward/_v2/identifier.py\n--- a/src/awkward/_v2/identifier.py\n+++ b/src/awkward/_v2/identifier.py\n@@ -27,7 +27,7 @@\n \n self._data = self._nplike.asarray(data, order=\"C\")\n if len(self._data.shape) != 2:\n- raise TypeError(\"Identifer data must be 2-dimensional\")\n+ raise TypeError(\"Identifier data must be 2-dimensional\")\n \n # TypeError for unsupported types?\n self._T = self._data.dtype\n", "issue": "Typo in `identifier.py`\nhttps://github.com/scikit-hep/awkward-1.0/blob/a0ec3bcacacc81a47fe61a1d99b0bc512a8bb3cf/src/awkward/_v2/identifier.py#L30\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nfrom __future__ import absolute_import\n\nimport awkward as ak\n\nnp = ak.nplike.NumpyMetadata.instance()\n\n\nclass Identifier(object):\n _numrefs = 0\n\n @staticmethod\n def newref():\n out = Identifier._numrefs\n Identifier._numrefs += 1\n return out\n\n def __init__(self, ref, fieldloc, data):\n self._ref = ref\n self._fieldloc = fieldloc\n if not isinstance(fieldloc, dict) or not all(\n isinstance(k, int) and isinstance(v, str) for k, v in fieldloc.items()\n ):\n raise TypeError(\"Identifier fieldloc must be a dict of int -> str\")\n self._nplike = ak.nplike.of(data)\n\n self._data = self._nplike.asarray(data, order=\"C\")\n if len(self._data.shape) != 2:\n raise TypeError(\"Identifer data must be 2-dimensional\")\n\n # TypeError for unsupported types?\n self._T = self._data.dtype\n if self._T not in (np.dtype(np.int32), np.dtype(np.int64)):\n raise TypeError(\"Identifier data must be int32, int64\")\n\n @classmethod\n # cpp takes width, length?\n def zeros(cls, ref, fieldloc, length, width, nplike, dtype):\n return Identifier(ref, fieldloc, nplike.zeros((length, width), dtype=dtype))\n\n @classmethod\n def empty(cls, ref, fieldloc, length, width, nplike, dtype):\n return Identifier(ref, fieldloc, nplike.empty((length, width), dtype=dtype))\n\n @property\n def ref(self):\n return self._ref\n\n @property\n def filedloc(self):\n return self._fieldloc\n\n @property\n def data(self):\n return self._data\n\n @property\n def nplike(self):\n return self._nplike\n\n def __len__(self):\n return len(self._data)\n\n def width(self):\n return self._data.shape[1]\n\n def to64(self):\n return Identifier(self._ref, self._fieldloc, self._data.astype(np.int64))\n\n def __getitem__(self, where):\n return self._data[where]\n\n def __copy__(self):\n return Identifier(self._ref, self._fieldloc, self._data.copy())\n\n def __repr__(self):\n return self._repr(\"\", \"\", \"\")\n\n def _repr(self, indent, pre, post):\n out = [indent, pre, \"<Identifier ref=\" + repr(str(self._ref)) + \" fieldloc=\"]\n out.append(repr(str(self._fieldloc)))\n out.append(\" length=\")\n out.append(repr(str(len(self._data))))\n out.append(\" width=\")\n out.append(repr(str(self._data.shape[1])))\n out.append(\" at=\")\n out.append(repr(hex(self._data.ctypes.data)))\n out.append(\">\\n\")\n out.append(indent + \" \")\n out.append(\n self._nplike.array_str(self._data, max_line_width=30).replace(\n \"\\n\", \"\\n\" + indent + \" \"\n )\n )\n out.append(\"\\n\")\n out.append(indent)\n out.append(\"</Identifier>\")\n out.append(post)\n return \"\".join(out)\n\n def convert_to(self, nplike):\n return Identifier(self._ref, self._fieldloc, nplike.asarray(self._data))\n\n def referentially_equal(self, other):\n return (\n self._ref == other._ref\n and self._fieldloc == other._fieldloc\n and self._data.ctypes.data == other._data.ctypes.data\n and self._data.shape == other._data.shape\n and self._data.strides == other._data.strides\n and self._data.dtype == other._data.dtype\n )\n", "path": "src/awkward/_v2/identifier.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nfrom __future__ import absolute_import\n\nimport awkward as ak\n\nnp = ak.nplike.NumpyMetadata.instance()\n\n\nclass Identifier(object):\n _numrefs = 0\n\n @staticmethod\n def newref():\n out = Identifier._numrefs\n Identifier._numrefs += 1\n return out\n\n def __init__(self, ref, fieldloc, data):\n self._ref = ref\n self._fieldloc = fieldloc\n if not isinstance(fieldloc, dict) or not all(\n isinstance(k, int) and isinstance(v, str) for k, v in fieldloc.items()\n ):\n raise TypeError(\"Identifier fieldloc must be a dict of int -> str\")\n self._nplike = ak.nplike.of(data)\n\n self._data = self._nplike.asarray(data, order=\"C\")\n if len(self._data.shape) != 2:\n raise TypeError(\"Identifier data must be 2-dimensional\")\n\n # TypeError for unsupported types?\n self._T = self._data.dtype\n if self._T not in (np.dtype(np.int32), np.dtype(np.int64)):\n raise TypeError(\"Identifier data must be int32, int64\")\n\n @classmethod\n # cpp takes width, length?\n def zeros(cls, ref, fieldloc, length, width, nplike, dtype):\n return Identifier(ref, fieldloc, nplike.zeros((length, width), dtype=dtype))\n\n @classmethod\n def empty(cls, ref, fieldloc, length, width, nplike, dtype):\n return Identifier(ref, fieldloc, nplike.empty((length, width), dtype=dtype))\n\n @property\n def ref(self):\n return self._ref\n\n @property\n def filedloc(self):\n return self._fieldloc\n\n @property\n def data(self):\n return self._data\n\n @property\n def nplike(self):\n return self._nplike\n\n def __len__(self):\n return len(self._data)\n\n def width(self):\n return self._data.shape[1]\n\n def to64(self):\n return Identifier(self._ref, self._fieldloc, self._data.astype(np.int64))\n\n def __getitem__(self, where):\n return self._data[where]\n\n def __copy__(self):\n return Identifier(self._ref, self._fieldloc, self._data.copy())\n\n def __repr__(self):\n return self._repr(\"\", \"\", \"\")\n\n def _repr(self, indent, pre, post):\n out = [indent, pre, \"<Identifier ref=\" + repr(str(self._ref)) + \" fieldloc=\"]\n out.append(repr(str(self._fieldloc)))\n out.append(\" length=\")\n out.append(repr(str(len(self._data))))\n out.append(\" width=\")\n out.append(repr(str(self._data.shape[1])))\n out.append(\" at=\")\n out.append(repr(hex(self._data.ctypes.data)))\n out.append(\">\\n\")\n out.append(indent + \" \")\n out.append(\n self._nplike.array_str(self._data, max_line_width=30).replace(\n \"\\n\", \"\\n\" + indent + \" \"\n )\n )\n out.append(\"\\n\")\n out.append(indent)\n out.append(\"</Identifier>\")\n out.append(post)\n return \"\".join(out)\n\n def convert_to(self, nplike):\n return Identifier(self._ref, self._fieldloc, nplike.asarray(self._data))\n\n def referentially_equal(self, other):\n return (\n self._ref == other._ref\n and self._fieldloc == other._fieldloc\n and self._data.ctypes.data == other._data.ctypes.data\n and self._data.shape == other._data.shape\n and self._data.strides == other._data.strides\n and self._data.dtype == other._data.dtype\n )\n", "path": "src/awkward/_v2/identifier.py"}]} | 1,463 | 139 |
gh_patches_debug_2393 | rasdani/github-patches | git_diff | ibis-project__ibis-4271 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug(impala) A delimited table should be explicitly stored as textfile
https://github.com/ibis-project/ibis/blob/88ffe3367cb6a34936e578f6a9b68dc30d559507/ibis/backends/impala/ddl.py#L67
when the cluster's default format is set as parquet, this will cause an exception. It should be explicitly stored as textfile. such as
```python
if self.lineterminator is not None:
yield f"LINES TERMINATED BY '{self.lineterminator}'"
yield 'STORED AS TEXTFILE'
yield f"LOCATION '{self.path}'"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ibis/backends/impala/ddl.py`
Content:
```
1 # Copyright 2014 Cloudera Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import json
16
17 from ibis.backends.base.sql.ddl import (
18 AlterTable,
19 BaseDDL,
20 CreateTable,
21 CreateTableWithSchema,
22 DropFunction,
23 format_partition,
24 format_schema,
25 format_tblproperties,
26 )
27 from ibis.backends.base.sql.registry import type_to_sql_string
28
29
30 class CreateTableParquet(CreateTable):
31 def __init__(
32 self,
33 table_name,
34 path,
35 example_file=None,
36 example_table=None,
37 schema=None,
38 external=True,
39 **kwargs,
40 ):
41 super().__init__(
42 table_name,
43 external=external,
44 format='parquet',
45 path=path,
46 **kwargs,
47 )
48 self.example_file = example_file
49 self.example_table = example_table
50 self.schema = schema
51
52 @property
53 def _pieces(self):
54 if self.example_file is not None:
55 yield f"LIKE PARQUET '{self.example_file}'"
56 elif self.example_table is not None:
57 yield f"LIKE {self.example_table}"
58 elif self.schema is not None:
59 yield format_schema(self.schema)
60 else:
61 raise NotImplementedError
62
63 yield self._storage()
64 yield self._location()
65
66
67 class DelimitedFormat:
68 def __init__(
69 self,
70 path,
71 delimiter=None,
72 escapechar=None,
73 na_rep=None,
74 lineterminator=None,
75 ):
76 self.path = path
77 self.delimiter = delimiter
78 self.escapechar = escapechar
79 self.lineterminator = lineterminator
80 self.na_rep = na_rep
81
82 def to_ddl(self):
83 yield 'ROW FORMAT DELIMITED'
84
85 if self.delimiter is not None:
86 yield f"FIELDS TERMINATED BY '{self.delimiter}'"
87
88 if self.escapechar is not None:
89 yield f"ESCAPED BY '{self.escapechar}'"
90
91 if self.lineterminator is not None:
92 yield f"LINES TERMINATED BY '{self.lineterminator}'"
93
94 yield f"LOCATION '{self.path}'"
95
96 if self.na_rep is not None:
97 props = {'serialization.null.format': self.na_rep}
98 yield format_tblproperties(props)
99
100
101 class AvroFormat:
102 def __init__(self, path, avro_schema):
103 self.path = path
104 self.avro_schema = avro_schema
105
106 def to_ddl(self):
107 yield 'STORED AS AVRO'
108 yield f"LOCATION '{self.path}'"
109
110 schema = json.dumps(self.avro_schema, indent=2, sort_keys=True)
111 schema = '\n'.join(x.rstrip() for x in schema.splitlines())
112
113 props = {'avro.schema.literal': schema}
114 yield format_tblproperties(props)
115
116
117 class ParquetFormat:
118 def __init__(self, path):
119 self.path = path
120
121 def to_ddl(self):
122 yield 'STORED AS PARQUET'
123 yield f"LOCATION '{self.path}'"
124
125
126 class CreateTableDelimited(CreateTableWithSchema):
127 def __init__(
128 self,
129 table_name,
130 path,
131 schema,
132 delimiter=None,
133 escapechar=None,
134 lineterminator=None,
135 na_rep=None,
136 external=True,
137 **kwargs,
138 ):
139 table_format = DelimitedFormat(
140 path,
141 delimiter=delimiter,
142 escapechar=escapechar,
143 lineterminator=lineterminator,
144 na_rep=na_rep,
145 )
146 super().__init__(
147 table_name, schema, table_format, external=external, **kwargs
148 )
149
150
151 class CreateTableAvro(CreateTable):
152 def __init__(self, table_name, path, avro_schema, external=True, **kwargs):
153 super().__init__(table_name, external=external, **kwargs)
154 self.table_format = AvroFormat(path, avro_schema)
155
156 @property
157 def _pieces(self):
158 yield '\n'.join(self.table_format.to_ddl())
159
160
161 class LoadData(BaseDDL):
162
163 """
164 Generate DDL for LOAD DATA command. Cannot be cancelled
165 """
166
167 def __init__(
168 self,
169 table_name,
170 path,
171 database=None,
172 partition=None,
173 partition_schema=None,
174 overwrite=False,
175 ):
176 self.table_name = table_name
177 self.database = database
178 self.path = path
179
180 self.partition = partition
181 self.partition_schema = partition_schema
182
183 self.overwrite = overwrite
184
185 def compile(self):
186 overwrite = 'OVERWRITE ' if self.overwrite else ''
187
188 if self.partition is not None:
189 partition = '\n' + format_partition(
190 self.partition, self.partition_schema
191 )
192 else:
193 partition = ''
194
195 scoped_name = self._get_scoped_name(self.table_name, self.database)
196 return "LOAD DATA INPATH '{}' {}INTO TABLE {}{}".format(
197 self.path, overwrite, scoped_name, partition
198 )
199
200
201 class PartitionProperties(AlterTable):
202 def __init__(
203 self,
204 table,
205 partition,
206 partition_schema,
207 location=None,
208 format=None,
209 tbl_properties=None,
210 serde_properties=None,
211 ):
212 super().__init__(
213 table,
214 location=location,
215 format=format,
216 tbl_properties=tbl_properties,
217 serde_properties=serde_properties,
218 )
219 self.partition = partition
220 self.partition_schema = partition_schema
221
222 def _compile(self, cmd, property_prefix=''):
223 part = format_partition(self.partition, self.partition_schema)
224 if cmd:
225 part = f'{cmd} {part}'
226
227 props = self._format_properties(property_prefix)
228 action = f'{self.table} {part}{props}'
229 return self._wrap_command(action)
230
231
232 class AddPartition(PartitionProperties):
233 def __init__(self, table, partition, partition_schema, location=None):
234 super().__init__(table, partition, partition_schema, location=location)
235
236 def compile(self):
237 return self._compile('ADD')
238
239
240 class AlterPartition(PartitionProperties):
241 def compile(self):
242 return self._compile('', 'SET ')
243
244
245 class DropPartition(PartitionProperties):
246 def __init__(self, table, partition, partition_schema):
247 super().__init__(table, partition, partition_schema)
248
249 def compile(self):
250 return self._compile('DROP')
251
252
253 class CacheTable(BaseDDL):
254 def __init__(self, table_name, database=None, pool='default'):
255 self.table_name = table_name
256 self.database = database
257 self.pool = pool
258
259 def compile(self):
260 scoped_name = self._get_scoped_name(self.table_name, self.database)
261 return "ALTER TABLE {} SET CACHED IN '{}'".format(
262 scoped_name, self.pool
263 )
264
265
266 class CreateFunction(BaseDDL):
267
268 _object_type = 'FUNCTION'
269
270 def __init__(self, func, name=None, database=None):
271 self.func = func
272 self.name = name or func.name
273 self.database = database
274
275 def _impala_signature(self):
276 scoped_name = self._get_scoped_name(self.name, self.database)
277 input_sig = _impala_input_signature(self.func.inputs)
278 output_sig = type_to_sql_string(self.func.output)
279
280 return f'{scoped_name}({input_sig}) returns {output_sig}'
281
282
283 class CreateUDF(CreateFunction):
284 def compile(self):
285 create_decl = 'CREATE FUNCTION'
286 impala_sig = self._impala_signature()
287 param_line = "location '{}' symbol='{}'".format(
288 self.func.lib_path, self.func.so_symbol
289 )
290 return ' '.join([create_decl, impala_sig, param_line])
291
292
293 class CreateUDA(CreateFunction):
294 def compile(self):
295 create_decl = 'CREATE AGGREGATE FUNCTION'
296 impala_sig = self._impala_signature()
297 tokens = [f"location '{self.func.lib_path}'"]
298
299 fn_names = (
300 'init_fn',
301 'update_fn',
302 'merge_fn',
303 'serialize_fn',
304 'finalize_fn',
305 )
306
307 for fn in fn_names:
308 value = getattr(self.func, fn)
309 if value is not None:
310 tokens.append(f"{fn}='{value}'")
311
312 return ' '.join([create_decl, impala_sig]) + ' ' + '\n'.join(tokens)
313
314
315 class DropFunction(DropFunction):
316 def _impala_signature(self):
317 full_name = self._get_scoped_name(self.name, self.database)
318 input_sig = _impala_input_signature(self.inputs)
319 return f'{full_name}({input_sig})'
320
321
322 class ListFunction(BaseDDL):
323 def __init__(self, database, like=None, aggregate=False):
324 self.database = database
325 self.like = like
326 self.aggregate = aggregate
327
328 def compile(self):
329 statement = 'SHOW '
330 if self.aggregate:
331 statement += 'AGGREGATE '
332 statement += f'FUNCTIONS IN {self.database}'
333 if self.like:
334 statement += f" LIKE '{self.like}'"
335 return statement
336
337
338 def _impala_input_signature(inputs):
339 # TODO: varargs '{}...'.format(val)
340 return ', '.join(map(type_to_sql_string, inputs))
341
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ibis/backends/impala/ddl.py b/ibis/backends/impala/ddl.py
--- a/ibis/backends/impala/ddl.py
+++ b/ibis/backends/impala/ddl.py
@@ -91,6 +91,7 @@
if self.lineterminator is not None:
yield f"LINES TERMINATED BY '{self.lineterminator}'"
+ yield 'STORED AS TEXTFILE'
yield f"LOCATION '{self.path}'"
if self.na_rep is not None:
| {"golden_diff": "diff --git a/ibis/backends/impala/ddl.py b/ibis/backends/impala/ddl.py\n--- a/ibis/backends/impala/ddl.py\n+++ b/ibis/backends/impala/ddl.py\n@@ -91,6 +91,7 @@\n if self.lineterminator is not None:\n yield f\"LINES TERMINATED BY '{self.lineterminator}'\"\n \n+ yield 'STORED AS TEXTFILE'\n yield f\"LOCATION '{self.path}'\"\n \n if self.na_rep is not None:\n", "issue": "bug(impala) A delimited table should be explicitly stored as textfile\nhttps://github.com/ibis-project/ibis/blob/88ffe3367cb6a34936e578f6a9b68dc30d559507/ibis/backends/impala/ddl.py#L67\r\nwhen the cluster's default format is set as parquet, this will cause an exception. It should be explicitly stored as textfile. such as\r\n\r\n```python\r\n if self.lineterminator is not None:\r\n yield f\"LINES TERMINATED BY '{self.lineterminator}'\"\r\n\r\n yield 'STORED AS TEXTFILE'\r\n yield f\"LOCATION '{self.path}'\"\r\n```\n", "before_files": [{"content": "# Copyright 2014 Cloudera Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nfrom ibis.backends.base.sql.ddl import (\n AlterTable,\n BaseDDL,\n CreateTable,\n CreateTableWithSchema,\n DropFunction,\n format_partition,\n format_schema,\n format_tblproperties,\n)\nfrom ibis.backends.base.sql.registry import type_to_sql_string\n\n\nclass CreateTableParquet(CreateTable):\n def __init__(\n self,\n table_name,\n path,\n example_file=None,\n example_table=None,\n schema=None,\n external=True,\n **kwargs,\n ):\n super().__init__(\n table_name,\n external=external,\n format='parquet',\n path=path,\n **kwargs,\n )\n self.example_file = example_file\n self.example_table = example_table\n self.schema = schema\n\n @property\n def _pieces(self):\n if self.example_file is not None:\n yield f\"LIKE PARQUET '{self.example_file}'\"\n elif self.example_table is not None:\n yield f\"LIKE {self.example_table}\"\n elif self.schema is not None:\n yield format_schema(self.schema)\n else:\n raise NotImplementedError\n\n yield self._storage()\n yield self._location()\n\n\nclass DelimitedFormat:\n def __init__(\n self,\n path,\n delimiter=None,\n escapechar=None,\n na_rep=None,\n lineterminator=None,\n ):\n self.path = path\n self.delimiter = delimiter\n self.escapechar = escapechar\n self.lineterminator = lineterminator\n self.na_rep = na_rep\n\n def to_ddl(self):\n yield 'ROW FORMAT DELIMITED'\n\n if self.delimiter is not None:\n yield f\"FIELDS TERMINATED BY '{self.delimiter}'\"\n\n if self.escapechar is not None:\n yield f\"ESCAPED BY '{self.escapechar}'\"\n\n if self.lineterminator is not None:\n yield f\"LINES TERMINATED BY '{self.lineterminator}'\"\n\n yield f\"LOCATION '{self.path}'\"\n\n if self.na_rep is not None:\n props = {'serialization.null.format': self.na_rep}\n yield format_tblproperties(props)\n\n\nclass AvroFormat:\n def __init__(self, path, avro_schema):\n self.path = path\n self.avro_schema = avro_schema\n\n def to_ddl(self):\n yield 'STORED AS AVRO'\n yield f\"LOCATION '{self.path}'\"\n\n schema = json.dumps(self.avro_schema, indent=2, sort_keys=True)\n schema = '\\n'.join(x.rstrip() for x in schema.splitlines())\n\n props = {'avro.schema.literal': schema}\n yield format_tblproperties(props)\n\n\nclass ParquetFormat:\n def __init__(self, path):\n self.path = path\n\n def to_ddl(self):\n yield 'STORED AS PARQUET'\n yield f\"LOCATION '{self.path}'\"\n\n\nclass CreateTableDelimited(CreateTableWithSchema):\n def __init__(\n self,\n table_name,\n path,\n schema,\n delimiter=None,\n escapechar=None,\n lineterminator=None,\n na_rep=None,\n external=True,\n **kwargs,\n ):\n table_format = DelimitedFormat(\n path,\n delimiter=delimiter,\n escapechar=escapechar,\n lineterminator=lineterminator,\n na_rep=na_rep,\n )\n super().__init__(\n table_name, schema, table_format, external=external, **kwargs\n )\n\n\nclass CreateTableAvro(CreateTable):\n def __init__(self, table_name, path, avro_schema, external=True, **kwargs):\n super().__init__(table_name, external=external, **kwargs)\n self.table_format = AvroFormat(path, avro_schema)\n\n @property\n def _pieces(self):\n yield '\\n'.join(self.table_format.to_ddl())\n\n\nclass LoadData(BaseDDL):\n\n \"\"\"\n Generate DDL for LOAD DATA command. Cannot be cancelled\n \"\"\"\n\n def __init__(\n self,\n table_name,\n path,\n database=None,\n partition=None,\n partition_schema=None,\n overwrite=False,\n ):\n self.table_name = table_name\n self.database = database\n self.path = path\n\n self.partition = partition\n self.partition_schema = partition_schema\n\n self.overwrite = overwrite\n\n def compile(self):\n overwrite = 'OVERWRITE ' if self.overwrite else ''\n\n if self.partition is not None:\n partition = '\\n' + format_partition(\n self.partition, self.partition_schema\n )\n else:\n partition = ''\n\n scoped_name = self._get_scoped_name(self.table_name, self.database)\n return \"LOAD DATA INPATH '{}' {}INTO TABLE {}{}\".format(\n self.path, overwrite, scoped_name, partition\n )\n\n\nclass PartitionProperties(AlterTable):\n def __init__(\n self,\n table,\n partition,\n partition_schema,\n location=None,\n format=None,\n tbl_properties=None,\n serde_properties=None,\n ):\n super().__init__(\n table,\n location=location,\n format=format,\n tbl_properties=tbl_properties,\n serde_properties=serde_properties,\n )\n self.partition = partition\n self.partition_schema = partition_schema\n\n def _compile(self, cmd, property_prefix=''):\n part = format_partition(self.partition, self.partition_schema)\n if cmd:\n part = f'{cmd} {part}'\n\n props = self._format_properties(property_prefix)\n action = f'{self.table} {part}{props}'\n return self._wrap_command(action)\n\n\nclass AddPartition(PartitionProperties):\n def __init__(self, table, partition, partition_schema, location=None):\n super().__init__(table, partition, partition_schema, location=location)\n\n def compile(self):\n return self._compile('ADD')\n\n\nclass AlterPartition(PartitionProperties):\n def compile(self):\n return self._compile('', 'SET ')\n\n\nclass DropPartition(PartitionProperties):\n def __init__(self, table, partition, partition_schema):\n super().__init__(table, partition, partition_schema)\n\n def compile(self):\n return self._compile('DROP')\n\n\nclass CacheTable(BaseDDL):\n def __init__(self, table_name, database=None, pool='default'):\n self.table_name = table_name\n self.database = database\n self.pool = pool\n\n def compile(self):\n scoped_name = self._get_scoped_name(self.table_name, self.database)\n return \"ALTER TABLE {} SET CACHED IN '{}'\".format(\n scoped_name, self.pool\n )\n\n\nclass CreateFunction(BaseDDL):\n\n _object_type = 'FUNCTION'\n\n def __init__(self, func, name=None, database=None):\n self.func = func\n self.name = name or func.name\n self.database = database\n\n def _impala_signature(self):\n scoped_name = self._get_scoped_name(self.name, self.database)\n input_sig = _impala_input_signature(self.func.inputs)\n output_sig = type_to_sql_string(self.func.output)\n\n return f'{scoped_name}({input_sig}) returns {output_sig}'\n\n\nclass CreateUDF(CreateFunction):\n def compile(self):\n create_decl = 'CREATE FUNCTION'\n impala_sig = self._impala_signature()\n param_line = \"location '{}' symbol='{}'\".format(\n self.func.lib_path, self.func.so_symbol\n )\n return ' '.join([create_decl, impala_sig, param_line])\n\n\nclass CreateUDA(CreateFunction):\n def compile(self):\n create_decl = 'CREATE AGGREGATE FUNCTION'\n impala_sig = self._impala_signature()\n tokens = [f\"location '{self.func.lib_path}'\"]\n\n fn_names = (\n 'init_fn',\n 'update_fn',\n 'merge_fn',\n 'serialize_fn',\n 'finalize_fn',\n )\n\n for fn in fn_names:\n value = getattr(self.func, fn)\n if value is not None:\n tokens.append(f\"{fn}='{value}'\")\n\n return ' '.join([create_decl, impala_sig]) + ' ' + '\\n'.join(tokens)\n\n\nclass DropFunction(DropFunction):\n def _impala_signature(self):\n full_name = self._get_scoped_name(self.name, self.database)\n input_sig = _impala_input_signature(self.inputs)\n return f'{full_name}({input_sig})'\n\n\nclass ListFunction(BaseDDL):\n def __init__(self, database, like=None, aggregate=False):\n self.database = database\n self.like = like\n self.aggregate = aggregate\n\n def compile(self):\n statement = 'SHOW '\n if self.aggregate:\n statement += 'AGGREGATE '\n statement += f'FUNCTIONS IN {self.database}'\n if self.like:\n statement += f\" LIKE '{self.like}'\"\n return statement\n\n\ndef _impala_input_signature(inputs):\n # TODO: varargs '{}...'.format(val)\n return ', '.join(map(type_to_sql_string, inputs))\n", "path": "ibis/backends/impala/ddl.py"}], "after_files": [{"content": "# Copyright 2014 Cloudera Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nfrom ibis.backends.base.sql.ddl import (\n AlterTable,\n BaseDDL,\n CreateTable,\n CreateTableWithSchema,\n DropFunction,\n format_partition,\n format_schema,\n format_tblproperties,\n)\nfrom ibis.backends.base.sql.registry import type_to_sql_string\n\n\nclass CreateTableParquet(CreateTable):\n def __init__(\n self,\n table_name,\n path,\n example_file=None,\n example_table=None,\n schema=None,\n external=True,\n **kwargs,\n ):\n super().__init__(\n table_name,\n external=external,\n format='parquet',\n path=path,\n **kwargs,\n )\n self.example_file = example_file\n self.example_table = example_table\n self.schema = schema\n\n @property\n def _pieces(self):\n if self.example_file is not None:\n yield f\"LIKE PARQUET '{self.example_file}'\"\n elif self.example_table is not None:\n yield f\"LIKE {self.example_table}\"\n elif self.schema is not None:\n yield format_schema(self.schema)\n else:\n raise NotImplementedError\n\n yield self._storage()\n yield self._location()\n\n\nclass DelimitedFormat:\n def __init__(\n self,\n path,\n delimiter=None,\n escapechar=None,\n na_rep=None,\n lineterminator=None,\n ):\n self.path = path\n self.delimiter = delimiter\n self.escapechar = escapechar\n self.lineterminator = lineterminator\n self.na_rep = na_rep\n\n def to_ddl(self):\n yield 'ROW FORMAT DELIMITED'\n\n if self.delimiter is not None:\n yield f\"FIELDS TERMINATED BY '{self.delimiter}'\"\n\n if self.escapechar is not None:\n yield f\"ESCAPED BY '{self.escapechar}'\"\n\n if self.lineterminator is not None:\n yield f\"LINES TERMINATED BY '{self.lineterminator}'\"\n\n yield 'STORED AS TEXTFILE'\n yield f\"LOCATION '{self.path}'\"\n\n if self.na_rep is not None:\n props = {'serialization.null.format': self.na_rep}\n yield format_tblproperties(props)\n\n\nclass AvroFormat:\n def __init__(self, path, avro_schema):\n self.path = path\n self.avro_schema = avro_schema\n\n def to_ddl(self):\n yield 'STORED AS AVRO'\n yield f\"LOCATION '{self.path}'\"\n\n schema = json.dumps(self.avro_schema, indent=2, sort_keys=True)\n schema = '\\n'.join(x.rstrip() for x in schema.splitlines())\n\n props = {'avro.schema.literal': schema}\n yield format_tblproperties(props)\n\n\nclass ParquetFormat:\n def __init__(self, path):\n self.path = path\n\n def to_ddl(self):\n yield 'STORED AS PARQUET'\n yield f\"LOCATION '{self.path}'\"\n\n\nclass CreateTableDelimited(CreateTableWithSchema):\n def __init__(\n self,\n table_name,\n path,\n schema,\n delimiter=None,\n escapechar=None,\n lineterminator=None,\n na_rep=None,\n external=True,\n **kwargs,\n ):\n table_format = DelimitedFormat(\n path,\n delimiter=delimiter,\n escapechar=escapechar,\n lineterminator=lineterminator,\n na_rep=na_rep,\n )\n super().__init__(\n table_name, schema, table_format, external=external, **kwargs\n )\n\n\nclass CreateTableAvro(CreateTable):\n def __init__(self, table_name, path, avro_schema, external=True, **kwargs):\n super().__init__(table_name, external=external, **kwargs)\n self.table_format = AvroFormat(path, avro_schema)\n\n @property\n def _pieces(self):\n yield '\\n'.join(self.table_format.to_ddl())\n\n\nclass LoadData(BaseDDL):\n\n \"\"\"\n Generate DDL for LOAD DATA command. Cannot be cancelled\n \"\"\"\n\n def __init__(\n self,\n table_name,\n path,\n database=None,\n partition=None,\n partition_schema=None,\n overwrite=False,\n ):\n self.table_name = table_name\n self.database = database\n self.path = path\n\n self.partition = partition\n self.partition_schema = partition_schema\n\n self.overwrite = overwrite\n\n def compile(self):\n overwrite = 'OVERWRITE ' if self.overwrite else ''\n\n if self.partition is not None:\n partition = '\\n' + format_partition(\n self.partition, self.partition_schema\n )\n else:\n partition = ''\n\n scoped_name = self._get_scoped_name(self.table_name, self.database)\n return \"LOAD DATA INPATH '{}' {}INTO TABLE {}{}\".format(\n self.path, overwrite, scoped_name, partition\n )\n\n\nclass PartitionProperties(AlterTable):\n def __init__(\n self,\n table,\n partition,\n partition_schema,\n location=None,\n format=None,\n tbl_properties=None,\n serde_properties=None,\n ):\n super().__init__(\n table,\n location=location,\n format=format,\n tbl_properties=tbl_properties,\n serde_properties=serde_properties,\n )\n self.partition = partition\n self.partition_schema = partition_schema\n\n def _compile(self, cmd, property_prefix=''):\n part = format_partition(self.partition, self.partition_schema)\n if cmd:\n part = f'{cmd} {part}'\n\n props = self._format_properties(property_prefix)\n action = f'{self.table} {part}{props}'\n return self._wrap_command(action)\n\n\nclass AddPartition(PartitionProperties):\n def __init__(self, table, partition, partition_schema, location=None):\n super().__init__(table, partition, partition_schema, location=location)\n\n def compile(self):\n return self._compile('ADD')\n\n\nclass AlterPartition(PartitionProperties):\n def compile(self):\n return self._compile('', 'SET ')\n\n\nclass DropPartition(PartitionProperties):\n def __init__(self, table, partition, partition_schema):\n super().__init__(table, partition, partition_schema)\n\n def compile(self):\n return self._compile('DROP')\n\n\nclass CacheTable(BaseDDL):\n def __init__(self, table_name, database=None, pool='default'):\n self.table_name = table_name\n self.database = database\n self.pool = pool\n\n def compile(self):\n scoped_name = self._get_scoped_name(self.table_name, self.database)\n return \"ALTER TABLE {} SET CACHED IN '{}'\".format(\n scoped_name, self.pool\n )\n\n\nclass CreateFunction(BaseDDL):\n\n _object_type = 'FUNCTION'\n\n def __init__(self, func, name=None, database=None):\n self.func = func\n self.name = name or func.name\n self.database = database\n\n def _impala_signature(self):\n scoped_name = self._get_scoped_name(self.name, self.database)\n input_sig = _impala_input_signature(self.func.inputs)\n output_sig = type_to_sql_string(self.func.output)\n\n return f'{scoped_name}({input_sig}) returns {output_sig}'\n\n\nclass CreateUDF(CreateFunction):\n def compile(self):\n create_decl = 'CREATE FUNCTION'\n impala_sig = self._impala_signature()\n param_line = \"location '{}' symbol='{}'\".format(\n self.func.lib_path, self.func.so_symbol\n )\n return ' '.join([create_decl, impala_sig, param_line])\n\n\nclass CreateUDA(CreateFunction):\n def compile(self):\n create_decl = 'CREATE AGGREGATE FUNCTION'\n impala_sig = self._impala_signature()\n tokens = [f\"location '{self.func.lib_path}'\"]\n\n fn_names = (\n 'init_fn',\n 'update_fn',\n 'merge_fn',\n 'serialize_fn',\n 'finalize_fn',\n )\n\n for fn in fn_names:\n value = getattr(self.func, fn)\n if value is not None:\n tokens.append(f\"{fn}='{value}'\")\n\n return ' '.join([create_decl, impala_sig]) + ' ' + '\\n'.join(tokens)\n\n\nclass DropFunction(DropFunction):\n def _impala_signature(self):\n full_name = self._get_scoped_name(self.name, self.database)\n input_sig = _impala_input_signature(self.inputs)\n return f'{full_name}({input_sig})'\n\n\nclass ListFunction(BaseDDL):\n def __init__(self, database, like=None, aggregate=False):\n self.database = database\n self.like = like\n self.aggregate = aggregate\n\n def compile(self):\n statement = 'SHOW '\n if self.aggregate:\n statement += 'AGGREGATE '\n statement += f'FUNCTIONS IN {self.database}'\n if self.like:\n statement += f\" LIKE '{self.like}'\"\n return statement\n\n\ndef _impala_input_signature(inputs):\n # TODO: varargs '{}...'.format(val)\n return ', '.join(map(type_to_sql_string, inputs))\n", "path": "ibis/backends/impala/ddl.py"}]} | 3,430 | 125 |
gh_patches_debug_40547 | rasdani/github-patches | git_diff | nf-core__tools-1745 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Link in `nf-core modules info` broken
### Description of the bug
The link created by `nf-core modules info` is broken since the move from the github api. The link is set on https://github.com/nf-core/tools/blob/dev/nf_core/modules/info.py#L164-L168, but the `self.remote_location` is either a https or ssh address so the link is broken.
### Command used and terminal output
```console
$ nf-core modules info
```
### System information
nf-core/tools @2.5dev
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nf_core/modules/info.py`
Content:
```
1 import logging
2 import os
3
4 import questionary
5 import yaml
6 from rich import box
7 from rich.console import Group
8 from rich.markdown import Markdown
9 from rich.panel import Panel
10 from rich.table import Table
11 from rich.text import Text
12
13 import nf_core.utils
14 from nf_core.modules.modules_json import ModulesJson
15
16 from .module_utils import get_repo_type
17 from .modules_command import ModuleCommand
18 from .modules_repo import NF_CORE_MODULES_REMOTE
19
20 log = logging.getLogger(__name__)
21
22
23 class ModuleInfo(ModuleCommand):
24 def __init__(self, pipeline_dir, tool, remote_url, branch, no_pull, base_path):
25 super().__init__(pipeline_dir, remote_url, branch, no_pull, base_path)
26 self.meta = None
27 self.local_path = None
28 self.remote_location = None
29
30 # Quietly check if this is a pipeline or not
31 if pipeline_dir:
32 try:
33 pipeline_dir, repo_type = get_repo_type(pipeline_dir, use_prompt=False)
34 log.debug(f"Found {repo_type} repo: {pipeline_dir}")
35 except UserWarning as e:
36 log.debug(f"Only showing remote info: {e}")
37 pipeline_dir = None
38
39 if self.repo_type == "pipeline":
40 self.modules_json = ModulesJson(self.dir)
41 self.modules_json.check_up_to_date()
42 else:
43 self.modules_json = None
44 self.module = self.init_mod_name(tool)
45
46 def init_mod_name(self, module):
47 """
48 Makes sure that we have a module name before proceeding.
49
50 Args:
51 module: str: Module name to check
52 """
53 if module is None:
54 local = questionary.confirm(
55 "Is the module locally installed?", style=nf_core.utils.nfcore_question_style
56 ).unsafe_ask()
57 if local:
58 if self.repo_type == "modules":
59 modules = self.get_modules_clone_modules()
60 else:
61 modules = self.modules_json.get_all_modules().get(self.modules_repo.fullname)
62 if modules is None:
63 raise UserWarning(f"No modules installed from '{self.modules_repo.remote_url}'")
64 else:
65 modules = self.modules_repo.get_avail_modules()
66 module = questionary.autocomplete(
67 "Please select a module", choices=modules, style=nf_core.utils.nfcore_question_style
68 ).unsafe_ask()
69 while module not in modules:
70 log.info(f"'{module}' is not a valid module name")
71 module = questionary.autocomplete(
72 "Please select a new module", choices=modules, style=nf_core.utils.nfcore_question_style
73 ).unsafe_ask()
74
75 return module
76
77 def get_module_info(self):
78 """Given the name of a module, parse meta.yml and print usage help."""
79
80 # Running with a local install, try to find the local meta
81 if self.dir:
82 self.meta = self.get_local_yaml()
83
84 # Either failed locally or in remote mode
85 if not self.meta:
86 self.meta = self.get_remote_yaml()
87
88 # Could not find the meta
89 if self.meta is False:
90 raise UserWarning(f"Could not find module '{self.module}'")
91
92 return self.generate_module_info_help()
93
94 def get_local_yaml(self):
95 """Attempt to get the meta.yml file from a locally installed module.
96
97 Returns:
98 dict or bool: Parsed meta.yml found, False otherwise
99 """
100
101 if self.repo_type == "pipeline":
102 # Try to find and load the meta.yml file
103 repo_name = self.modules_repo.fullname
104 module_base_path = os.path.join(self.dir, "modules", repo_name)
105 # Check that we have any modules installed from this repo
106 modules = self.modules_json.get_all_modules().get(repo_name)
107 if modules is None:
108 raise LookupError(f"No modules installed from {self.modules_repo.remote_url}")
109
110 if self.module in modules:
111 mod_dir = os.path.join(module_base_path, self.module)
112 meta_fn = os.path.join(mod_dir, "meta.yml")
113 if os.path.exists(meta_fn):
114 log.debug(f"Found local file: {meta_fn}")
115 with open(meta_fn, "r") as fh:
116 self.local_path = mod_dir
117 return yaml.safe_load(fh)
118
119 log.debug(f"Module '{self.module}' meta.yml not found locally")
120 else:
121 module_base_path = os.path.join(self.dir, "modules")
122 if self.module in os.listdir(module_base_path):
123 mod_dir = os.path.join(module_base_path, self.module)
124 meta_fn = os.path.join(mod_dir, "meta.yml")
125 if os.path.exists(meta_fn):
126 log.debug(f"Found local file: {meta_fn}")
127 with open(meta_fn, "r") as fh:
128 self.local_path = mod_dir
129 return yaml.safe_load(fh)
130 log.debug(f"Module '{self.module}' meta.yml not found locally")
131
132 return None
133
134 def get_remote_yaml(self):
135 """Attempt to get the meta.yml file from a remote repo.
136
137 Returns:
138 dict or bool: Parsed meta.yml found, False otherwise
139 """
140 # Check if our requested module is there
141 if self.module not in self.modules_repo.get_avail_modules():
142 return False
143
144 file_contents = self.modules_repo.get_meta_yml(self.module)
145 if file_contents is None:
146 return False
147 self.remote_location = self.modules_repo.remote_url
148 return yaml.safe_load(file_contents)
149
150 def generate_module_info_help(self):
151 """Take the parsed meta.yml and generate rich help.
152
153 Returns:
154 rich renderable
155 """
156
157 renderables = []
158
159 # Intro panel
160 intro_text = Text()
161 if self.local_path:
162 intro_text.append(Text.from_markup(f"Location: [blue]{self.local_path}\n"))
163 elif self.remote_location:
164 intro_text.append(
165 Text.from_markup(
166 f":globe_with_meridians: Repository: [link=https://github.com/{self.remote_location}]{self.remote_location}[/]\n"
167 )
168 )
169
170 if self.meta.get("tools"):
171 tools_strings = []
172 for tool in self.meta["tools"]:
173 for tool_name, tool_meta in tool.items():
174 if "homepage" in tool_meta:
175 tools_strings.append(f"[link={tool_meta['homepage']}]{tool_name}[/link]")
176 else:
177 tools_strings.append(f"{tool_name}")
178 intro_text.append(Text.from_markup(f":wrench: Tools: {', '.join(tools_strings)}\n", style="dim"))
179
180 if self.meta.get("description"):
181 intro_text.append(Text.from_markup(f":book: Description: {self.meta['description']}", style="dim"))
182
183 renderables.append(
184 Panel(
185 intro_text,
186 title=f"[bold]Module: [green]{self.module}\n",
187 title_align="left",
188 )
189 )
190
191 # Inputs
192 if self.meta.get("input"):
193 inputs_table = Table(expand=True, show_lines=True, box=box.MINIMAL_HEAVY_HEAD, padding=0)
194 inputs_table.add_column(":inbox_tray: Inputs")
195 inputs_table.add_column("Description")
196 inputs_table.add_column("Pattern", justify="right", style="green")
197 for input in self.meta["input"]:
198 for key, info in input.items():
199 inputs_table.add_row(
200 f"[orange1 on black] {key} [/][dim i] ({info['type']})",
201 Markdown(info["description"] if info["description"] else ""),
202 info.get("pattern", ""),
203 )
204
205 renderables.append(inputs_table)
206
207 # Outputs
208 if self.meta.get("output"):
209 outputs_table = Table(expand=True, show_lines=True, box=box.MINIMAL_HEAVY_HEAD, padding=0)
210 outputs_table.add_column(":outbox_tray: Outputs")
211 outputs_table.add_column("Description")
212 outputs_table.add_column("Pattern", justify="right", style="green")
213 for output in self.meta["output"]:
214 for key, info in output.items():
215 outputs_table.add_row(
216 f"[orange1 on black] {key} [/][dim i] ({info['type']})",
217 Markdown(info["description"] if info["description"] else ""),
218 info.get("pattern", ""),
219 )
220
221 renderables.append(outputs_table)
222
223 # Installation command
224 if self.remote_location:
225 cmd_base = "nf-core modules"
226 if self.remote_location != NF_CORE_MODULES_REMOTE:
227 cmd_base = f"nf-core modules --git-remote {self.remote_location}"
228 renderables.append(
229 Text.from_markup(f"\n :computer: Installation command: [magenta]{cmd_base} install {self.module}\n")
230 )
231
232 return Group(*renderables)
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nf_core/modules/info.py b/nf_core/modules/info.py
--- a/nf_core/modules/info.py
+++ b/nf_core/modules/info.py
@@ -21,11 +21,46 @@
class ModuleInfo(ModuleCommand):
+ """
+ Class to print information of a module.
+
+ Attributes
+ ----------
+ meta : YAML object
+ stores the information from meta.yml file
+ local_path : str
+ path of the local modules
+ remote_location : str
+ remote repository URL
+ local : bool
+ indicates if the module is locally installed or not
+ repo_type : str
+ repository type. Can be either 'pipeline' or 'modules'
+ modules_json : ModulesJson object
+ contains 'modules.json' file information from a pipeline
+ module : str
+ name of the tool to get information from
+
+ Methods
+ -------
+ init_mod_name(module)
+ Makes sure that we have a module name
+ get_module_info()
+ Given the name of a module, parse meta.yml and print usage help
+ get_local_yaml()
+ Attempt to get the meta.yml file from a locally installed module
+ get_remote_yaml()
+ Attempt to get the meta.yml file from a remote repo
+ generate_module_info_help()
+ Take the parsed meta.yml and generate rich help
+ """
+
def __init__(self, pipeline_dir, tool, remote_url, branch, no_pull, base_path):
super().__init__(pipeline_dir, remote_url, branch, no_pull, base_path)
self.meta = None
self.local_path = None
self.remote_location = None
+ self.local = None
# Quietly check if this is a pipeline or not
if pipeline_dir:
@@ -51,10 +86,10 @@
module: str: Module name to check
"""
if module is None:
- local = questionary.confirm(
+ self.local = questionary.confirm(
"Is the module locally installed?", style=nf_core.utils.nfcore_question_style
).unsafe_ask()
- if local:
+ if self.local:
if self.repo_type == "modules":
modules = self.get_modules_clone_modules()
else:
@@ -78,7 +113,7 @@
"""Given the name of a module, parse meta.yml and print usage help."""
# Running with a local install, try to find the local meta
- if self.dir:
+ if self.local:
self.meta = self.get_local_yaml()
# Either failed locally or in remote mode
@@ -163,7 +198,11 @@
elif self.remote_location:
intro_text.append(
Text.from_markup(
- f":globe_with_meridians: Repository: [link=https://github.com/{self.remote_location}]{self.remote_location}[/]\n"
+ ":globe_with_meridians: Repository: "
+ f"{ '[link={self.remote_location}]' if self.remote_location.startswith('http') else ''}"
+ f"{self.remote_location}"
+ f"{'[/link]' if self.remote_location.startswith('http') else '' }"
+ "\n"
)
)
| {"golden_diff": "diff --git a/nf_core/modules/info.py b/nf_core/modules/info.py\n--- a/nf_core/modules/info.py\n+++ b/nf_core/modules/info.py\n@@ -21,11 +21,46 @@\n \n \n class ModuleInfo(ModuleCommand):\n+ \"\"\"\n+ Class to print information of a module.\n+\n+ Attributes\n+ ----------\n+ meta : YAML object\n+ stores the information from meta.yml file\n+ local_path : str\n+ path of the local modules\n+ remote_location : str\n+ remote repository URL\n+ local : bool\n+ indicates if the module is locally installed or not\n+ repo_type : str\n+ repository type. Can be either 'pipeline' or 'modules'\n+ modules_json : ModulesJson object\n+ contains 'modules.json' file information from a pipeline\n+ module : str\n+ name of the tool to get information from\n+\n+ Methods\n+ -------\n+ init_mod_name(module)\n+ Makes sure that we have a module name\n+ get_module_info()\n+ Given the name of a module, parse meta.yml and print usage help\n+ get_local_yaml()\n+ Attempt to get the meta.yml file from a locally installed module\n+ get_remote_yaml()\n+ Attempt to get the meta.yml file from a remote repo\n+ generate_module_info_help()\n+ Take the parsed meta.yml and generate rich help\n+ \"\"\"\n+\n def __init__(self, pipeline_dir, tool, remote_url, branch, no_pull, base_path):\n super().__init__(pipeline_dir, remote_url, branch, no_pull, base_path)\n self.meta = None\n self.local_path = None\n self.remote_location = None\n+ self.local = None\n \n # Quietly check if this is a pipeline or not\n if pipeline_dir:\n@@ -51,10 +86,10 @@\n module: str: Module name to check\n \"\"\"\n if module is None:\n- local = questionary.confirm(\n+ self.local = questionary.confirm(\n \"Is the module locally installed?\", style=nf_core.utils.nfcore_question_style\n ).unsafe_ask()\n- if local:\n+ if self.local:\n if self.repo_type == \"modules\":\n modules = self.get_modules_clone_modules()\n else:\n@@ -78,7 +113,7 @@\n \"\"\"Given the name of a module, parse meta.yml and print usage help.\"\"\"\n \n # Running with a local install, try to find the local meta\n- if self.dir:\n+ if self.local:\n self.meta = self.get_local_yaml()\n \n # Either failed locally or in remote mode\n@@ -163,7 +198,11 @@\n elif self.remote_location:\n intro_text.append(\n Text.from_markup(\n- f\":globe_with_meridians: Repository: [link=https://github.com/{self.remote_location}]{self.remote_location}[/]\\n\"\n+ \":globe_with_meridians: Repository: \"\n+ f\"{ '[link={self.remote_location}]' if self.remote_location.startswith('http') else ''}\"\n+ f\"{self.remote_location}\"\n+ f\"{'[/link]' if self.remote_location.startswith('http') else '' }\"\n+ \"\\n\"\n )\n )\n", "issue": "Link in `nf-core modules info` broken\n### Description of the bug\r\n\r\nThe link created by `nf-core modules info` is broken since the move from the github api. The link is set on https://github.com/nf-core/tools/blob/dev/nf_core/modules/info.py#L164-L168, but the `self.remote_location` is either a https or ssh address so the link is broken.\r\n\r\n### Command used and terminal output\r\n\r\n```console\r\n$ nf-core modules info\r\n```\r\n\r\n\r\n### System information\r\n\r\nnf-core/tools @2.5dev\n", "before_files": [{"content": "import logging\nimport os\n\nimport questionary\nimport yaml\nfrom rich import box\nfrom rich.console import Group\nfrom rich.markdown import Markdown\nfrom rich.panel import Panel\nfrom rich.table import Table\nfrom rich.text import Text\n\nimport nf_core.utils\nfrom nf_core.modules.modules_json import ModulesJson\n\nfrom .module_utils import get_repo_type\nfrom .modules_command import ModuleCommand\nfrom .modules_repo import NF_CORE_MODULES_REMOTE\n\nlog = logging.getLogger(__name__)\n\n\nclass ModuleInfo(ModuleCommand):\n def __init__(self, pipeline_dir, tool, remote_url, branch, no_pull, base_path):\n super().__init__(pipeline_dir, remote_url, branch, no_pull, base_path)\n self.meta = None\n self.local_path = None\n self.remote_location = None\n\n # Quietly check if this is a pipeline or not\n if pipeline_dir:\n try:\n pipeline_dir, repo_type = get_repo_type(pipeline_dir, use_prompt=False)\n log.debug(f\"Found {repo_type} repo: {pipeline_dir}\")\n except UserWarning as e:\n log.debug(f\"Only showing remote info: {e}\")\n pipeline_dir = None\n\n if self.repo_type == \"pipeline\":\n self.modules_json = ModulesJson(self.dir)\n self.modules_json.check_up_to_date()\n else:\n self.modules_json = None\n self.module = self.init_mod_name(tool)\n\n def init_mod_name(self, module):\n \"\"\"\n Makes sure that we have a module name before proceeding.\n\n Args:\n module: str: Module name to check\n \"\"\"\n if module is None:\n local = questionary.confirm(\n \"Is the module locally installed?\", style=nf_core.utils.nfcore_question_style\n ).unsafe_ask()\n if local:\n if self.repo_type == \"modules\":\n modules = self.get_modules_clone_modules()\n else:\n modules = self.modules_json.get_all_modules().get(self.modules_repo.fullname)\n if modules is None:\n raise UserWarning(f\"No modules installed from '{self.modules_repo.remote_url}'\")\n else:\n modules = self.modules_repo.get_avail_modules()\n module = questionary.autocomplete(\n \"Please select a module\", choices=modules, style=nf_core.utils.nfcore_question_style\n ).unsafe_ask()\n while module not in modules:\n log.info(f\"'{module}' is not a valid module name\")\n module = questionary.autocomplete(\n \"Please select a new module\", choices=modules, style=nf_core.utils.nfcore_question_style\n ).unsafe_ask()\n\n return module\n\n def get_module_info(self):\n \"\"\"Given the name of a module, parse meta.yml and print usage help.\"\"\"\n\n # Running with a local install, try to find the local meta\n if self.dir:\n self.meta = self.get_local_yaml()\n\n # Either failed locally or in remote mode\n if not self.meta:\n self.meta = self.get_remote_yaml()\n\n # Could not find the meta\n if self.meta is False:\n raise UserWarning(f\"Could not find module '{self.module}'\")\n\n return self.generate_module_info_help()\n\n def get_local_yaml(self):\n \"\"\"Attempt to get the meta.yml file from a locally installed module.\n\n Returns:\n dict or bool: Parsed meta.yml found, False otherwise\n \"\"\"\n\n if self.repo_type == \"pipeline\":\n # Try to find and load the meta.yml file\n repo_name = self.modules_repo.fullname\n module_base_path = os.path.join(self.dir, \"modules\", repo_name)\n # Check that we have any modules installed from this repo\n modules = self.modules_json.get_all_modules().get(repo_name)\n if modules is None:\n raise LookupError(f\"No modules installed from {self.modules_repo.remote_url}\")\n\n if self.module in modules:\n mod_dir = os.path.join(module_base_path, self.module)\n meta_fn = os.path.join(mod_dir, \"meta.yml\")\n if os.path.exists(meta_fn):\n log.debug(f\"Found local file: {meta_fn}\")\n with open(meta_fn, \"r\") as fh:\n self.local_path = mod_dir\n return yaml.safe_load(fh)\n\n log.debug(f\"Module '{self.module}' meta.yml not found locally\")\n else:\n module_base_path = os.path.join(self.dir, \"modules\")\n if self.module in os.listdir(module_base_path):\n mod_dir = os.path.join(module_base_path, self.module)\n meta_fn = os.path.join(mod_dir, \"meta.yml\")\n if os.path.exists(meta_fn):\n log.debug(f\"Found local file: {meta_fn}\")\n with open(meta_fn, \"r\") as fh:\n self.local_path = mod_dir\n return yaml.safe_load(fh)\n log.debug(f\"Module '{self.module}' meta.yml not found locally\")\n\n return None\n\n def get_remote_yaml(self):\n \"\"\"Attempt to get the meta.yml file from a remote repo.\n\n Returns:\n dict or bool: Parsed meta.yml found, False otherwise\n \"\"\"\n # Check if our requested module is there\n if self.module not in self.modules_repo.get_avail_modules():\n return False\n\n file_contents = self.modules_repo.get_meta_yml(self.module)\n if file_contents is None:\n return False\n self.remote_location = self.modules_repo.remote_url\n return yaml.safe_load(file_contents)\n\n def generate_module_info_help(self):\n \"\"\"Take the parsed meta.yml and generate rich help.\n\n Returns:\n rich renderable\n \"\"\"\n\n renderables = []\n\n # Intro panel\n intro_text = Text()\n if self.local_path:\n intro_text.append(Text.from_markup(f\"Location: [blue]{self.local_path}\\n\"))\n elif self.remote_location:\n intro_text.append(\n Text.from_markup(\n f\":globe_with_meridians: Repository: [link=https://github.com/{self.remote_location}]{self.remote_location}[/]\\n\"\n )\n )\n\n if self.meta.get(\"tools\"):\n tools_strings = []\n for tool in self.meta[\"tools\"]:\n for tool_name, tool_meta in tool.items():\n if \"homepage\" in tool_meta:\n tools_strings.append(f\"[link={tool_meta['homepage']}]{tool_name}[/link]\")\n else:\n tools_strings.append(f\"{tool_name}\")\n intro_text.append(Text.from_markup(f\":wrench: Tools: {', '.join(tools_strings)}\\n\", style=\"dim\"))\n\n if self.meta.get(\"description\"):\n intro_text.append(Text.from_markup(f\":book: Description: {self.meta['description']}\", style=\"dim\"))\n\n renderables.append(\n Panel(\n intro_text,\n title=f\"[bold]Module: [green]{self.module}\\n\",\n title_align=\"left\",\n )\n )\n\n # Inputs\n if self.meta.get(\"input\"):\n inputs_table = Table(expand=True, show_lines=True, box=box.MINIMAL_HEAVY_HEAD, padding=0)\n inputs_table.add_column(\":inbox_tray: Inputs\")\n inputs_table.add_column(\"Description\")\n inputs_table.add_column(\"Pattern\", justify=\"right\", style=\"green\")\n for input in self.meta[\"input\"]:\n for key, info in input.items():\n inputs_table.add_row(\n f\"[orange1 on black] {key} [/][dim i] ({info['type']})\",\n Markdown(info[\"description\"] if info[\"description\"] else \"\"),\n info.get(\"pattern\", \"\"),\n )\n\n renderables.append(inputs_table)\n\n # Outputs\n if self.meta.get(\"output\"):\n outputs_table = Table(expand=True, show_lines=True, box=box.MINIMAL_HEAVY_HEAD, padding=0)\n outputs_table.add_column(\":outbox_tray: Outputs\")\n outputs_table.add_column(\"Description\")\n outputs_table.add_column(\"Pattern\", justify=\"right\", style=\"green\")\n for output in self.meta[\"output\"]:\n for key, info in output.items():\n outputs_table.add_row(\n f\"[orange1 on black] {key} [/][dim i] ({info['type']})\",\n Markdown(info[\"description\"] if info[\"description\"] else \"\"),\n info.get(\"pattern\", \"\"),\n )\n\n renderables.append(outputs_table)\n\n # Installation command\n if self.remote_location:\n cmd_base = \"nf-core modules\"\n if self.remote_location != NF_CORE_MODULES_REMOTE:\n cmd_base = f\"nf-core modules --git-remote {self.remote_location}\"\n renderables.append(\n Text.from_markup(f\"\\n :computer: Installation command: [magenta]{cmd_base} install {self.module}\\n\")\n )\n\n return Group(*renderables)\n", "path": "nf_core/modules/info.py"}], "after_files": [{"content": "import logging\nimport os\n\nimport questionary\nimport yaml\nfrom rich import box\nfrom rich.console import Group\nfrom rich.markdown import Markdown\nfrom rich.panel import Panel\nfrom rich.table import Table\nfrom rich.text import Text\n\nimport nf_core.utils\nfrom nf_core.modules.modules_json import ModulesJson\n\nfrom .module_utils import get_repo_type\nfrom .modules_command import ModuleCommand\nfrom .modules_repo import NF_CORE_MODULES_REMOTE\n\nlog = logging.getLogger(__name__)\n\n\nclass ModuleInfo(ModuleCommand):\n \"\"\"\n Class to print information of a module.\n\n Attributes\n ----------\n meta : YAML object\n stores the information from meta.yml file\n local_path : str\n path of the local modules\n remote_location : str\n remote repository URL\n local : bool\n indicates if the module is locally installed or not\n repo_type : str\n repository type. Can be either 'pipeline' or 'modules'\n modules_json : ModulesJson object\n contains 'modules.json' file information from a pipeline\n module : str\n name of the tool to get information from\n\n Methods\n -------\n init_mod_name(module)\n Makes sure that we have a module name\n get_module_info()\n Given the name of a module, parse meta.yml and print usage help\n get_local_yaml()\n Attempt to get the meta.yml file from a locally installed module\n get_remote_yaml()\n Attempt to get the meta.yml file from a remote repo\n generate_module_info_help()\n Take the parsed meta.yml and generate rich help\n \"\"\"\n\n def __init__(self, pipeline_dir, tool, remote_url, branch, no_pull, base_path):\n super().__init__(pipeline_dir, remote_url, branch, no_pull, base_path)\n self.meta = None\n self.local_path = None\n self.remote_location = None\n self.local = None\n\n # Quietly check if this is a pipeline or not\n if pipeline_dir:\n try:\n pipeline_dir, repo_type = get_repo_type(pipeline_dir, use_prompt=False)\n log.debug(f\"Found {repo_type} repo: {pipeline_dir}\")\n except UserWarning as e:\n log.debug(f\"Only showing remote info: {e}\")\n pipeline_dir = None\n\n if self.repo_type == \"pipeline\":\n self.modules_json = ModulesJson(self.dir)\n self.modules_json.check_up_to_date()\n else:\n self.modules_json = None\n self.module = self.init_mod_name(tool)\n\n def init_mod_name(self, module):\n \"\"\"\n Makes sure that we have a module name before proceeding.\n\n Args:\n module: str: Module name to check\n \"\"\"\n if module is None:\n self.local = questionary.confirm(\n \"Is the module locally installed?\", style=nf_core.utils.nfcore_question_style\n ).unsafe_ask()\n if self.local:\n if self.repo_type == \"modules\":\n modules = self.get_modules_clone_modules()\n else:\n modules = self.modules_json.get_all_modules().get(self.modules_repo.fullname)\n if modules is None:\n raise UserWarning(f\"No modules installed from '{self.modules_repo.remote_url}'\")\n else:\n modules = self.modules_repo.get_avail_modules()\n module = questionary.autocomplete(\n \"Please select a module\", choices=modules, style=nf_core.utils.nfcore_question_style\n ).unsafe_ask()\n while module not in modules:\n log.info(f\"'{module}' is not a valid module name\")\n module = questionary.autocomplete(\n \"Please select a new module\", choices=modules, style=nf_core.utils.nfcore_question_style\n ).unsafe_ask()\n\n return module\n\n def get_module_info(self):\n \"\"\"Given the name of a module, parse meta.yml and print usage help.\"\"\"\n\n # Running with a local install, try to find the local meta\n if self.local:\n self.meta = self.get_local_yaml()\n\n # Either failed locally or in remote mode\n if not self.meta:\n self.meta = self.get_remote_yaml()\n\n # Could not find the meta\n if self.meta is False:\n raise UserWarning(f\"Could not find module '{self.module}'\")\n\n return self.generate_module_info_help()\n\n def get_local_yaml(self):\n \"\"\"Attempt to get the meta.yml file from a locally installed module.\n\n Returns:\n dict or bool: Parsed meta.yml found, False otherwise\n \"\"\"\n\n if self.repo_type == \"pipeline\":\n # Try to find and load the meta.yml file\n repo_name = self.modules_repo.fullname\n module_base_path = os.path.join(self.dir, \"modules\", repo_name)\n # Check that we have any modules installed from this repo\n modules = self.modules_json.get_all_modules().get(repo_name)\n if modules is None:\n raise LookupError(f\"No modules installed from {self.modules_repo.remote_url}\")\n\n if self.module in modules:\n mod_dir = os.path.join(module_base_path, self.module)\n meta_fn = os.path.join(mod_dir, \"meta.yml\")\n if os.path.exists(meta_fn):\n log.debug(f\"Found local file: {meta_fn}\")\n with open(meta_fn, \"r\") as fh:\n self.local_path = mod_dir\n return yaml.safe_load(fh)\n\n log.debug(f\"Module '{self.module}' meta.yml not found locally\")\n else:\n module_base_path = os.path.join(self.dir, \"modules\")\n if self.module in os.listdir(module_base_path):\n mod_dir = os.path.join(module_base_path, self.module)\n meta_fn = os.path.join(mod_dir, \"meta.yml\")\n if os.path.exists(meta_fn):\n log.debug(f\"Found local file: {meta_fn}\")\n with open(meta_fn, \"r\") as fh:\n self.local_path = mod_dir\n return yaml.safe_load(fh)\n log.debug(f\"Module '{self.module}' meta.yml not found locally\")\n\n return None\n\n def get_remote_yaml(self):\n \"\"\"Attempt to get the meta.yml file from a remote repo.\n\n Returns:\n dict or bool: Parsed meta.yml found, False otherwise\n \"\"\"\n # Check if our requested module is there\n if self.module not in self.modules_repo.get_avail_modules():\n return False\n\n file_contents = self.modules_repo.get_meta_yml(self.module)\n if file_contents is None:\n return False\n self.remote_location = self.modules_repo.remote_url\n return yaml.safe_load(file_contents)\n\n def generate_module_info_help(self):\n \"\"\"Take the parsed meta.yml and generate rich help.\n\n Returns:\n rich renderable\n \"\"\"\n\n renderables = []\n\n # Intro panel\n intro_text = Text()\n if self.local_path:\n intro_text.append(Text.from_markup(f\"Location: [blue]{self.local_path}\\n\"))\n elif self.remote_location:\n intro_text.append(\n Text.from_markup(\n \":globe_with_meridians: Repository: \"\n f\"{ '[link={self.remote_location}]' if self.remote_location.startswith('http') else ''}\"\n f\"{self.remote_location}\"\n f\"{'[/link]' if self.remote_location.startswith('http') else '' }\"\n \"\\n\"\n )\n )\n\n if self.meta.get(\"tools\"):\n tools_strings = []\n for tool in self.meta[\"tools\"]:\n for tool_name, tool_meta in tool.items():\n if \"homepage\" in tool_meta:\n tools_strings.append(f\"[link={tool_meta['homepage']}]{tool_name}[/link]\")\n else:\n tools_strings.append(f\"{tool_name}\")\n intro_text.append(Text.from_markup(f\":wrench: Tools: {', '.join(tools_strings)}\\n\", style=\"dim\"))\n\n if self.meta.get(\"description\"):\n intro_text.append(Text.from_markup(f\":book: Description: {self.meta['description']}\", style=\"dim\"))\n\n renderables.append(\n Panel(\n intro_text,\n title=f\"[bold]Module: [green]{self.module}\\n\",\n title_align=\"left\",\n )\n )\n\n # Inputs\n if self.meta.get(\"input\"):\n inputs_table = Table(expand=True, show_lines=True, box=box.MINIMAL_HEAVY_HEAD, padding=0)\n inputs_table.add_column(\":inbox_tray: Inputs\")\n inputs_table.add_column(\"Description\")\n inputs_table.add_column(\"Pattern\", justify=\"right\", style=\"green\")\n for input in self.meta[\"input\"]:\n for key, info in input.items():\n inputs_table.add_row(\n f\"[orange1 on black] {key} [/][dim i] ({info['type']})\",\n Markdown(info[\"description\"] if info[\"description\"] else \"\"),\n info.get(\"pattern\", \"\"),\n )\n\n renderables.append(inputs_table)\n\n # Outputs\n if self.meta.get(\"output\"):\n outputs_table = Table(expand=True, show_lines=True, box=box.MINIMAL_HEAVY_HEAD, padding=0)\n outputs_table.add_column(\":outbox_tray: Outputs\")\n outputs_table.add_column(\"Description\")\n outputs_table.add_column(\"Pattern\", justify=\"right\", style=\"green\")\n for output in self.meta[\"output\"]:\n for key, info in output.items():\n outputs_table.add_row(\n f\"[orange1 on black] {key} [/][dim i] ({info['type']})\",\n Markdown(info[\"description\"] if info[\"description\"] else \"\"),\n info.get(\"pattern\", \"\"),\n )\n\n renderables.append(outputs_table)\n\n # Installation command\n if self.remote_location:\n cmd_base = \"nf-core modules\"\n if self.remote_location != NF_CORE_MODULES_REMOTE:\n cmd_base = f\"nf-core modules --git-remote {self.remote_location}\"\n renderables.append(\n Text.from_markup(f\"\\n :computer: Installation command: [magenta]{cmd_base} install {self.module}\\n\")\n )\n\n return Group(*renderables)\n", "path": "nf_core/modules/info.py"}]} | 2,831 | 724 |
gh_patches_debug_25479 | rasdani/github-patches | git_diff | Pyomo__pyomo-1763 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update manylinux builder once Python 3.9 is added
We use a Marketplace GitHub Action to build our manylinux wheels for release. The main branch of this does not yet have Python 3.9 support, but there is a PR waiting for approval that would add it:
https://github.com/RalfG/python-wheels-manylinux-build/pull/33
Once this PR is merged, we will need to update the release workflow.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 """
12 Script to generate the installer for pyomo.
13 """
14
15 import sys
16 import os
17
18 def read(*rnames):
19 with open(os.path.join(os.path.dirname(__file__), *rnames)) as README:
20 # Strip all leading badges up to, but not including the COIN-OR
21 # badge so that they do not appear in the PyPI description
22 while True:
23 line = README.readline()
24 if 'COIN-OR' in line:
25 break
26 if line.strip() and '[![' not in line:
27 break
28 return line + README.read()
29
30 def get_version():
31 # Source pyomo/version/info.py to get the version number
32 _verInfo = dict(globals())
33 _verFile = os.path.join(os.path.dirname(__file__),
34 'pyomo','version','info.py')
35 with open(_verFile) as _FILE:
36 exec(_FILE.read(), _verInfo)
37 return _verInfo['__version__']
38
39 from setuptools import setup, find_packages
40
41 CYTHON_REQUIRED = "required"
42 if 'develop' in sys.argv:
43 using_cython = False
44 else:
45 using_cython = "automatic"
46 if '--with-cython' in sys.argv:
47 using_cython = CYTHON_REQUIRED
48 sys.argv.remove('--with-cython')
49 if '--without-cython' in sys.argv:
50 using_cython = False
51 sys.argv.remove('--without-cython')
52
53 ext_modules = []
54 if using_cython:
55 try:
56 import platform
57 if platform.python_implementation() != "CPython":
58 # break out of this try-except (disable Cython)
59 raise RuntimeError("Cython is only supported under CPython")
60 from Cython.Build import cythonize
61 #
62 # Note: The Cython developers recommend that you destribute C source
63 # files to users. But this is fine for evaluating the utility of Cython
64 #
65 import shutil
66 files = [
67 "pyomo/core/expr/numvalue.pyx",
68 "pyomo/core/expr/numeric_expr.pyx",
69 "pyomo/core/expr/logical_expr.pyx",
70 #"pyomo/core/expr/visitor.pyx",
71 "pyomo/core/util.pyx",
72 "pyomo/repn/standard_repn.pyx",
73 "pyomo/repn/plugins/cpxlp.pyx",
74 "pyomo/repn/plugins/gams_writer.pyx",
75 "pyomo/repn/plugins/baron_writer.pyx",
76 "pyomo/repn/plugins/ampl/ampl_.pyx",
77 ]
78 for f in files:
79 shutil.copyfile(f[:-1], f)
80 ext_modules = cythonize(files, compiler_directives={
81 "language_level": 3 if sys.version_info >= (3, ) else 2})
82 except:
83 if using_cython == CYTHON_REQUIRED:
84 print("""
85 ERROR: Cython was explicitly requested with --with-cython, but cythonization
86 of core Pyomo modules failed.
87 """)
88 raise
89 using_cython = False
90
91 def run_setup():
92 setup(name='Pyomo',
93 #
94 # Note: the release number is set in pyomo/version/info.py
95 #
96 version=get_version(),
97 maintainer='Pyomo Developer Team',
98 maintainer_email='[email protected]',
99 url='http://pyomo.org',
100 license='BSD',
101 platforms=["any"],
102 description='Pyomo: Python Optimization Modeling Objects',
103 long_description=read('README.md'),
104 long_description_content_type='text/markdown',
105 keywords=['optimization'],
106 classifiers=[
107 'Development Status :: 5 - Production/Stable',
108 'Intended Audience :: End Users/Desktop',
109 'Intended Audience :: Science/Research',
110 'License :: OSI Approved :: BSD License',
111 'Natural Language :: English',
112 'Operating System :: MacOS',
113 'Operating System :: Microsoft :: Windows',
114 'Operating System :: Unix',
115 'Programming Language :: Python',
116 'Programming Language :: Python :: 2',
117 'Programming Language :: Python :: 2.7',
118 'Programming Language :: Python :: 3',
119 'Programming Language :: Python :: 3.4',
120 'Programming Language :: Python :: 3.5',
121 'Programming Language :: Python :: 3.6',
122 'Programming Language :: Python :: 3.7',
123 'Programming Language :: Python :: 3.8',
124 'Programming Language :: Python :: 3.9',
125 'Programming Language :: Python :: Implementation :: CPython',
126 'Programming Language :: Python :: Implementation :: Jython',
127 'Programming Language :: Python :: Implementation :: PyPy',
128 'Topic :: Scientific/Engineering :: Mathematics',
129 'Topic :: Software Development :: Libraries :: Python Modules' ],
130 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
131 install_requires=[
132 'PyUtilib>=6.0.1.dev0',
133 'enum34;python_version<"3.4"',
134 'ply',
135 'six>=1.4',
136 ],
137 packages=find_packages(exclude=("scripts",)),
138 package_data={"pyomo.contrib.viewer":["*.ui"]},
139 ext_modules = ext_modules,
140 entry_points="""
141 [console_scripts]
142 runbenders=pyomo.pysp.benders:Benders_main
143 evaluate_xhat=pyomo.pysp.evaluate_xhat:EvaluateXhat_main
144 runph=pyomo.pysp.phinit:PH_main
145 runef=pyomo.pysp.ef_writer_script:main
146 phsolverserver=pyomo.pysp.phsolverserver:main
147 scenariotreeserver=pyomo.pysp.scenariotree.server_pyro:main
148 computeconf=pyomo.pysp.computeconf:main
149
150 results_schema=pyomo.scripting.commands:results_schema
151 pyro_mip_server = pyomo.scripting.pyro_mip_server:main
152 test.pyomo = pyomo.scripting.runtests:runPyomoTests
153 pyomo = pyomo.scripting.pyomo_main:main_console_script
154 pyomo_ns = pyomo.scripting.commands:pyomo_ns
155 pyomo_nsc = pyomo.scripting.commands:pyomo_nsc
156 kill_pyro_mip_servers = pyomo.scripting.commands:kill_pyro_mip_servers
157 launch_pyro_mip_servers = pyomo.scripting.commands:launch_pyro_mip_servers
158 readsol = pyomo.scripting.commands:readsol
159 OSSolverService = pyomo.scripting.commands:OSSolverService
160 pyomo_python = pyomo.scripting.commands:pyomo_python
161 pyomo_old=pyomo.scripting.pyomo_command:main
162
163 [pyomo.command]
164 pyomo.runbenders=pyomo.pysp.benders
165 pyomo.evaluate_xhat=pyomo.pysp.evaluate_xhat
166 pyomo.runph=pyomo.pysp.phinit
167 pyomo.runef=pyomo.pysp.ef_writer_script
168 pyomo.phsolverserver=pyomo.pysp.phsolverserver
169 pyomo.scenariotreeserver=pyomo.pysp.scenariotree.server_pyro
170 pyomo.computeconf=pyomo.pysp.computeconf
171
172 pyomo.help = pyomo.scripting.driver_help
173 pyomo.test.pyomo = pyomo.scripting.runtests
174 pyomo.pyro_mip_server = pyomo.scripting.pyro_mip_server
175 pyomo.results_schema=pyomo.scripting.commands
176 pyomo.viewer=pyomo.contrib.viewer.pyomo_viewer
177 """
178 )
179
180 try:
181 run_setup()
182 except SystemExit as e_info:
183 # Cython can generate a SystemExit exception on Windows if the
184 # environment is missing / has an incorrect Microsoft compiler.
185 # Since Cython is not strictly required, we will disable Cython and
186 # try re-running setup(), but only for this very specific situation.
187 if 'Microsoft Visual C++' not in str(e_info):
188 raise
189 elif using_cython == CYTHON_REQUIRED:
190 print("""
191 ERROR: Cython was explicitly requested with --with-cython, but cythonization
192 of core Pyomo modules failed.
193 """)
194 raise
195 else:
196 print("""
197 ERROR: setup() failed:
198 %s
199 Re-running setup() without the Cython modules
200 """ % (str(e_info),))
201 ext_modules = []
202 run_setup()
203 print("""
204 WARNING: Installation completed successfully, but the attempt to cythonize
205 core Pyomo modules failed. Cython provides performance
206 optimizations and is not required for any Pyomo functionality.
207 Cython returned the following error:
208 "%s"
209 """ % (str(e_info),))
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -113,11 +113,7 @@
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.4',
- 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
@@ -127,10 +123,9 @@
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules' ],
- python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
+ python_requires='>=3.6',
install_requires=[
'PyUtilib>=6.0.1.dev0',
- 'enum34;python_version<"3.4"',
'ply',
'six>=1.4',
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -113,11 +113,7 @@\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n- 'Programming Language :: Python :: 2',\n- 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.4',\n- 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n@@ -127,10 +123,9 @@\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules' ],\n- python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n+ python_requires='>=3.6',\n install_requires=[\n 'PyUtilib>=6.0.1.dev0',\n- 'enum34;python_version<\"3.4\"',\n 'ply',\n 'six>=1.4',\n ],\n", "issue": "Update manylinux builder once Python 3.9 is added\nWe use a Marketplace GitHub Action to build our manylinux wheels for release. The main branch of this does not yet have Python 3.9 support, but there is a PR waiting for approval that would add it:\r\n\r\nhttps://github.com/RalfG/python-wheels-manylinux-build/pull/33\r\n\r\nOnce this PR is merged, we will need to update the release workflow.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n\"\"\"\nScript to generate the installer for pyomo.\n\"\"\"\n\nimport sys\nimport os\n\ndef read(*rnames):\n with open(os.path.join(os.path.dirname(__file__), *rnames)) as README:\n # Strip all leading badges up to, but not including the COIN-OR\n # badge so that they do not appear in the PyPI description\n while True:\n line = README.readline()\n if 'COIN-OR' in line:\n break\n if line.strip() and '[![' not in line:\n break\n return line + README.read()\n\ndef get_version():\n # Source pyomo/version/info.py to get the version number\n _verInfo = dict(globals())\n _verFile = os.path.join(os.path.dirname(__file__),\n 'pyomo','version','info.py')\n with open(_verFile) as _FILE:\n exec(_FILE.read(), _verInfo)\n return _verInfo['__version__']\n\nfrom setuptools import setup, find_packages\n\nCYTHON_REQUIRED = \"required\"\nif 'develop' in sys.argv:\n using_cython = False\nelse:\n using_cython = \"automatic\"\nif '--with-cython' in sys.argv:\n using_cython = CYTHON_REQUIRED\n sys.argv.remove('--with-cython')\nif '--without-cython' in sys.argv:\n using_cython = False\n sys.argv.remove('--without-cython')\n\next_modules = []\nif using_cython:\n try:\n import platform\n if platform.python_implementation() != \"CPython\":\n # break out of this try-except (disable Cython)\n raise RuntimeError(\"Cython is only supported under CPython\")\n from Cython.Build import cythonize\n #\n # Note: The Cython developers recommend that you destribute C source\n # files to users. But this is fine for evaluating the utility of Cython\n #\n import shutil\n files = [\n \"pyomo/core/expr/numvalue.pyx\",\n \"pyomo/core/expr/numeric_expr.pyx\",\n \"pyomo/core/expr/logical_expr.pyx\",\n #\"pyomo/core/expr/visitor.pyx\",\n \"pyomo/core/util.pyx\",\n \"pyomo/repn/standard_repn.pyx\",\n \"pyomo/repn/plugins/cpxlp.pyx\",\n \"pyomo/repn/plugins/gams_writer.pyx\",\n \"pyomo/repn/plugins/baron_writer.pyx\",\n \"pyomo/repn/plugins/ampl/ampl_.pyx\",\n ]\n for f in files:\n shutil.copyfile(f[:-1], f)\n ext_modules = cythonize(files, compiler_directives={\n \"language_level\": 3 if sys.version_info >= (3, ) else 2})\n except:\n if using_cython == CYTHON_REQUIRED:\n print(\"\"\"\nERROR: Cython was explicitly requested with --with-cython, but cythonization\n of core Pyomo modules failed.\n\"\"\")\n raise\n using_cython = False\n\ndef run_setup():\n setup(name='Pyomo',\n #\n # Note: the release number is set in pyomo/version/info.py\n #\n version=get_version(),\n maintainer='Pyomo Developer Team',\n maintainer_email='[email protected]',\n url='http://pyomo.org',\n license='BSD',\n platforms=[\"any\"],\n description='Pyomo: Python Optimization Modeling Objects',\n long_description=read('README.md'),\n long_description_content_type='text/markdown',\n keywords=['optimization'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: Jython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules' ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=[\n 'PyUtilib>=6.0.1.dev0',\n 'enum34;python_version<\"3.4\"',\n 'ply',\n 'six>=1.4',\n ],\n packages=find_packages(exclude=(\"scripts\",)),\n package_data={\"pyomo.contrib.viewer\":[\"*.ui\"]},\n ext_modules = ext_modules,\n entry_points=\"\"\"\n [console_scripts]\n runbenders=pyomo.pysp.benders:Benders_main\n evaluate_xhat=pyomo.pysp.evaluate_xhat:EvaluateXhat_main\n runph=pyomo.pysp.phinit:PH_main\n runef=pyomo.pysp.ef_writer_script:main\n phsolverserver=pyomo.pysp.phsolverserver:main\n scenariotreeserver=pyomo.pysp.scenariotree.server_pyro:main\n computeconf=pyomo.pysp.computeconf:main\n\n results_schema=pyomo.scripting.commands:results_schema\n pyro_mip_server = pyomo.scripting.pyro_mip_server:main\n test.pyomo = pyomo.scripting.runtests:runPyomoTests\n pyomo = pyomo.scripting.pyomo_main:main_console_script\n pyomo_ns = pyomo.scripting.commands:pyomo_ns\n pyomo_nsc = pyomo.scripting.commands:pyomo_nsc\n kill_pyro_mip_servers = pyomo.scripting.commands:kill_pyro_mip_servers\n launch_pyro_mip_servers = pyomo.scripting.commands:launch_pyro_mip_servers\n readsol = pyomo.scripting.commands:readsol\n OSSolverService = pyomo.scripting.commands:OSSolverService\n pyomo_python = pyomo.scripting.commands:pyomo_python\n pyomo_old=pyomo.scripting.pyomo_command:main\n\n [pyomo.command]\n pyomo.runbenders=pyomo.pysp.benders\n pyomo.evaluate_xhat=pyomo.pysp.evaluate_xhat\n pyomo.runph=pyomo.pysp.phinit\n pyomo.runef=pyomo.pysp.ef_writer_script\n pyomo.phsolverserver=pyomo.pysp.phsolverserver\n pyomo.scenariotreeserver=pyomo.pysp.scenariotree.server_pyro\n pyomo.computeconf=pyomo.pysp.computeconf\n\n pyomo.help = pyomo.scripting.driver_help\n pyomo.test.pyomo = pyomo.scripting.runtests\n pyomo.pyro_mip_server = pyomo.scripting.pyro_mip_server\n pyomo.results_schema=pyomo.scripting.commands\n pyomo.viewer=pyomo.contrib.viewer.pyomo_viewer\n \"\"\"\n )\n\ntry:\n run_setup()\nexcept SystemExit as e_info:\n # Cython can generate a SystemExit exception on Windows if the\n # environment is missing / has an incorrect Microsoft compiler.\n # Since Cython is not strictly required, we will disable Cython and\n # try re-running setup(), but only for this very specific situation.\n if 'Microsoft Visual C++' not in str(e_info):\n raise\n elif using_cython == CYTHON_REQUIRED:\n print(\"\"\"\nERROR: Cython was explicitly requested with --with-cython, but cythonization\n of core Pyomo modules failed.\n\"\"\")\n raise\n else:\n print(\"\"\"\nERROR: setup() failed:\n %s\nRe-running setup() without the Cython modules\n\"\"\" % (str(e_info),))\n ext_modules = []\n run_setup()\n print(\"\"\"\nWARNING: Installation completed successfully, but the attempt to cythonize\n core Pyomo modules failed. Cython provides performance\n optimizations and is not required for any Pyomo functionality.\n Cython returned the following error:\n \"%s\"\n\"\"\" % (str(e_info),))\n", "path": "setup.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n\"\"\"\nScript to generate the installer for pyomo.\n\"\"\"\n\nimport sys\nimport os\n\ndef read(*rnames):\n with open(os.path.join(os.path.dirname(__file__), *rnames)) as README:\n # Strip all leading badges up to, but not including the COIN-OR\n # badge so that they do not appear in the PyPI description\n while True:\n line = README.readline()\n if 'COIN-OR' in line:\n break\n if line.strip() and '[![' not in line:\n break\n return line + README.read()\n\ndef get_version():\n # Source pyomo/version/info.py to get the version number\n _verInfo = dict(globals())\n _verFile = os.path.join(os.path.dirname(__file__),\n 'pyomo','version','info.py')\n with open(_verFile) as _FILE:\n exec(_FILE.read(), _verInfo)\n return _verInfo['__version__']\n\nfrom setuptools import setup, find_packages\n\nCYTHON_REQUIRED = \"required\"\nif 'develop' in sys.argv:\n using_cython = False\nelse:\n using_cython = \"automatic\"\nif '--with-cython' in sys.argv:\n using_cython = CYTHON_REQUIRED\n sys.argv.remove('--with-cython')\nif '--without-cython' in sys.argv:\n using_cython = False\n sys.argv.remove('--without-cython')\n\next_modules = []\nif using_cython:\n try:\n import platform\n if platform.python_implementation() != \"CPython\":\n # break out of this try-except (disable Cython)\n raise RuntimeError(\"Cython is only supported under CPython\")\n from Cython.Build import cythonize\n #\n # Note: The Cython developers recommend that you destribute C source\n # files to users. But this is fine for evaluating the utility of Cython\n #\n import shutil\n files = [\n \"pyomo/core/expr/numvalue.pyx\",\n \"pyomo/core/expr/numeric_expr.pyx\",\n \"pyomo/core/expr/logical_expr.pyx\",\n #\"pyomo/core/expr/visitor.pyx\",\n \"pyomo/core/util.pyx\",\n \"pyomo/repn/standard_repn.pyx\",\n \"pyomo/repn/plugins/cpxlp.pyx\",\n \"pyomo/repn/plugins/gams_writer.pyx\",\n \"pyomo/repn/plugins/baron_writer.pyx\",\n \"pyomo/repn/plugins/ampl/ampl_.pyx\",\n ]\n for f in files:\n shutil.copyfile(f[:-1], f)\n ext_modules = cythonize(files, compiler_directives={\n \"language_level\": 3 if sys.version_info >= (3, ) else 2})\n except:\n if using_cython == CYTHON_REQUIRED:\n print(\"\"\"\nERROR: Cython was explicitly requested with --with-cython, but cythonization\n of core Pyomo modules failed.\n\"\"\")\n raise\n using_cython = False\n\ndef run_setup():\n setup(name='Pyomo',\n #\n # Note: the release number is set in pyomo/version/info.py\n #\n version=get_version(),\n maintainer='Pyomo Developer Team',\n maintainer_email='[email protected]',\n url='http://pyomo.org',\n license='BSD',\n platforms=[\"any\"],\n description='Pyomo: Python Optimization Modeling Objects',\n long_description=read('README.md'),\n long_description_content_type='text/markdown',\n keywords=['optimization'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: Jython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules' ],\n python_requires='>=3.6',\n install_requires=[\n 'PyUtilib>=6.0.1.dev0',\n 'ply',\n 'six>=1.4',\n ],\n packages=find_packages(exclude=(\"scripts\",)),\n package_data={\"pyomo.contrib.viewer\":[\"*.ui\"]},\n ext_modules = ext_modules,\n entry_points=\"\"\"\n [console_scripts]\n runbenders=pyomo.pysp.benders:Benders_main\n evaluate_xhat=pyomo.pysp.evaluate_xhat:EvaluateXhat_main\n runph=pyomo.pysp.phinit:PH_main\n runef=pyomo.pysp.ef_writer_script:main\n phsolverserver=pyomo.pysp.phsolverserver:main\n scenariotreeserver=pyomo.pysp.scenariotree.server_pyro:main\n computeconf=pyomo.pysp.computeconf:main\n\n results_schema=pyomo.scripting.commands:results_schema\n pyro_mip_server = pyomo.scripting.pyro_mip_server:main\n test.pyomo = pyomo.scripting.runtests:runPyomoTests\n pyomo = pyomo.scripting.pyomo_main:main_console_script\n pyomo_ns = pyomo.scripting.commands:pyomo_ns\n pyomo_nsc = pyomo.scripting.commands:pyomo_nsc\n kill_pyro_mip_servers = pyomo.scripting.commands:kill_pyro_mip_servers\n launch_pyro_mip_servers = pyomo.scripting.commands:launch_pyro_mip_servers\n readsol = pyomo.scripting.commands:readsol\n OSSolverService = pyomo.scripting.commands:OSSolverService\n pyomo_python = pyomo.scripting.commands:pyomo_python\n pyomo_old=pyomo.scripting.pyomo_command:main\n\n [pyomo.command]\n pyomo.runbenders=pyomo.pysp.benders\n pyomo.evaluate_xhat=pyomo.pysp.evaluate_xhat\n pyomo.runph=pyomo.pysp.phinit\n pyomo.runef=pyomo.pysp.ef_writer_script\n pyomo.phsolverserver=pyomo.pysp.phsolverserver\n pyomo.scenariotreeserver=pyomo.pysp.scenariotree.server_pyro\n pyomo.computeconf=pyomo.pysp.computeconf\n\n pyomo.help = pyomo.scripting.driver_help\n pyomo.test.pyomo = pyomo.scripting.runtests\n pyomo.pyro_mip_server = pyomo.scripting.pyro_mip_server\n pyomo.results_schema=pyomo.scripting.commands\n pyomo.viewer=pyomo.contrib.viewer.pyomo_viewer\n \"\"\"\n )\n\ntry:\n run_setup()\nexcept SystemExit as e_info:\n # Cython can generate a SystemExit exception on Windows if the\n # environment is missing / has an incorrect Microsoft compiler.\n # Since Cython is not strictly required, we will disable Cython and\n # try re-running setup(), but only for this very specific situation.\n if 'Microsoft Visual C++' not in str(e_info):\n raise\n elif using_cython == CYTHON_REQUIRED:\n print(\"\"\"\nERROR: Cython was explicitly requested with --with-cython, but cythonization\n of core Pyomo modules failed.\n\"\"\")\n raise\n else:\n print(\"\"\"\nERROR: setup() failed:\n %s\nRe-running setup() without the Cython modules\n\"\"\" % (str(e_info),))\n ext_modules = []\n run_setup()\n print(\"\"\"\nWARNING: Installation completed successfully, but the attempt to cythonize\n core Pyomo modules failed. Cython provides performance\n optimizations and is not required for any Pyomo functionality.\n Cython returned the following error:\n \"%s\"\n\"\"\" % (str(e_info),))\n", "path": "setup.py"}]} | 2,858 | 291 |
gh_patches_debug_35675 | rasdani/github-patches | git_diff | ibis-project__ibis-635 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve docstrings for hdfs_connect and impala_connect
@laserson can you take a crack at these?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ibis/impala/api.py`
Content:
```
1 # Copyright 2015 Cloudera Inc
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from ibis.impala.client import (ImpalaConnection, ImpalaClient, # noqa
15 Database, ImpalaTable)
16 from ibis.impala.udf import * # noqa
17 from ibis.impala.madlib import MADLibAPI # noqa
18 from ibis.config import options
19
20
21 def connect(host='localhost', port=21050, database='default', timeout=45,
22 use_ssl=False, ca_cert=None, user=None, password=None,
23 auth_mechanism='NOSASL', kerberos_service_name='impala',
24 pool_size=8, hdfs_client=None):
25 """
26 Create an Impala Client for use with Ibis
27
28 Parameters
29 ----------
30 host : host name
31 port : int, default 21050 (HiveServer 2)
32 database :
33 timeout :
34 use_ssl : boolean
35 ca_cert :
36 user :
37 password :
38 auth_mechanism : {'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}
39 kerberos_service_name : string, default 'impala'
40
41 Examples
42 --------
43 >>> hdfs = ibis.hdfs_connect(**hdfs_params)
44 >>> client = ibis.impala.connect(hdfs_client=hdfs, **impala_params)
45
46 Returns
47 -------
48 con : ImpalaClient
49 """
50 params = {
51 'host': host,
52 'port': port,
53 'database': database,
54 'timeout': timeout,
55 'use_ssl': use_ssl,
56 'ca_cert': ca_cert,
57 'user': user,
58 'password': password,
59 'auth_mechanism': auth_mechanism,
60 'kerberos_service_name': kerberos_service_name
61 }
62
63 con = ImpalaConnection(pool_size=pool_size, **params)
64 client = ImpalaClient(con, hdfs_client=hdfs_client)
65
66 if options.default_backend is None:
67 options.default_backend = client
68
69 return client
70
```
Path: `ibis/__init__.py`
Content:
```
1 # Copyright 2014 Cloudera Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 # flake8: noqa
17
18 __version__ = '0.5.0'
19
20 from ibis.filesystems import HDFS, WebHDFS
21 from ibis.common import IbisError
22
23 import ibis.expr.api as api
24 import ibis.expr.types as ir
25
26 # __all__ is defined
27 from ibis.expr.api import *
28
29 import ibis.impala.api as impala
30 import ibis.sql.sqlite.api as sqlite
31
32 import ibis.config_init
33 from ibis.config import options
34 import ibis.util as util
35
36
37 # Deprecated
38 impala_connect = util.deprecate(impala.connect,
39 'impala_connect is deprecated, use'
40 ' ibis.impala.connect instead')
41
42
43 def make_client(db, hdfs_client=None):
44 """
45 Create an Ibis client from a database connection and optional additional
46 connections (like HDFS)
47
48 Parameters
49 ----------
50 db : Connection
51 e.g. produced by ibis.impala.connect
52 hdfs_client : ibis HDFS client
53
54 Examples
55 --------
56 >>> con = ibis.impala.connect(**impala_params)
57 >>> hdfs = ibis.hdfs_connect(**hdfs_params)
58 >>> client = ibis.make_client(con, hdfs_client=hdfs)
59
60 Returns
61 -------
62 client : IbisClient
63 """
64 db._hdfs = hdfs_client
65 return db
66
67 make_client = util.deprecate(
68 make_client, ('make_client is deprecated. '
69 'Use ibis.impala.connect '
70 ' with hdfs_client=hdfs_client'))
71
72
73 def hdfs_connect(host='localhost', port=50070, protocol='webhdfs',
74 auth_mechanism='NOSASL', verify=True, **kwds):
75 """
76 Connect to HDFS
77
78 Parameters
79 ----------
80 host : string
81 port : int, default 50070 (webhdfs default)
82 protocol : {'webhdfs'}
83 auth_mechanism : {'NOSASL' <- default, 'GSSAPI', 'LDAP', 'PLAIN'}
84 verify : boolean, default False
85 Set to False to turn off verifying SSL certificates
86
87 Other keywords are forwarded to hdfs library classes
88
89 Returns
90 -------
91 client : ibis HDFS client
92 """
93 import requests
94 session = kwds.setdefault('session', requests.Session())
95 session.verify = verify
96 if auth_mechanism in ['GSSAPI', 'LDAP']:
97 try:
98 import requests_kerberos
99 except ImportError:
100 raise IbisError(
101 "Unable to import requests-kerberos, which is required for "
102 "Kerberos HDFS support. Install it by executing `pip install "
103 "requests-kerberos` or `pip install hdfs[kerberos]`.")
104 from hdfs.ext.kerberos import KerberosClient
105 url = 'https://{0}:{1}'.format(host, port) # note SSL
106 kwds.setdefault('mutual_auth', 'OPTIONAL')
107 hdfs_client = KerberosClient(url, **kwds)
108 else:
109 from hdfs.client import InsecureClient
110 url = 'http://{0}:{1}'.format(host, port)
111 hdfs_client = InsecureClient(url, **kwds)
112 return WebHDFS(hdfs_client)
113
114 def test(impala=False):
115 import pytest
116 import ibis
117 import os
118
119 ibis_dir, _ = os.path.split(ibis.__file__)
120
121 args = ['--pyargs', ibis_dir]
122 if impala:
123 args.append('--impala')
124 pytest.main(args)
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ibis/__init__.py b/ibis/__init__.py
--- a/ibis/__init__.py
+++ b/ibis/__init__.py
@@ -77,18 +77,19 @@
Parameters
----------
- host : string
- port : int, default 50070 (webhdfs default)
+ host : string, Host name of the HDFS NameNode
+ port : int, NameNode's WebHDFS port (default 50070)
protocol : {'webhdfs'}
- auth_mechanism : {'NOSASL' <- default, 'GSSAPI', 'LDAP', 'PLAIN'}
- verify : boolean, default False
- Set to False to turn off verifying SSL certificates
+ auth_mechanism : string, Set to NOSASL or PLAIN for non-secure clusters.
+ Set to GSSAPI or LDAP for Kerberos-secured clusters.
+ verify : boolean, Set to False to turn off verifying SSL certificates.
+ (default True)
Other keywords are forwarded to hdfs library classes
Returns
-------
- client : ibis HDFS client
+ client : WebHDFS
"""
import requests
session = kwds.setdefault('session', requests.Session())
diff --git a/ibis/impala/api.py b/ibis/impala/api.py
--- a/ibis/impala/api.py
+++ b/ibis/impala/api.py
@@ -23,20 +23,27 @@
auth_mechanism='NOSASL', kerberos_service_name='impala',
pool_size=8, hdfs_client=None):
"""
- Create an Impala Client for use with Ibis
+ Create an ImpalaClient for use with Ibis.
Parameters
----------
- host : host name
- port : int, default 21050 (HiveServer 2)
- database :
- timeout :
- use_ssl : boolean
- ca_cert :
- user :
- password :
- auth_mechanism : {'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}
- kerberos_service_name : string, default 'impala'
+ host : string, Host name of the impalad or HiveServer2 in Hive
+ port : int, Defaults to 21050 (Impala's HiveServer2)
+ database : string, Default database when obtaining new cursors
+ timeout : int, Connection timeout (seconds) when communicating with
+ HiveServer2
+ use_ssl : boolean, Use SSL when connecting to HiveServer2
+ ca_cert : string, Local path to 3rd party CA certificate or copy of server
+ certificate for self-signed certificates. If SSL is enabled, but this
+ argument is None, then certificate validation is skipped.
+ user : string, LDAP user to authenticate
+ password : string, LDAP password to authenticate
+ auth_mechanism : string, {'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}.
+ Use NOSASL for non-secured Impala connections. Use PLAIN for
+ non-secured Hive clusters. Use LDAP for LDAP authenticated
+ connections. Use GSSAPI for Kerberos-secured clusters.
+ kerberos_service_name : string, Specify particular impalad service
+ principal.
Examples
--------
| {"golden_diff": "diff --git a/ibis/__init__.py b/ibis/__init__.py\n--- a/ibis/__init__.py\n+++ b/ibis/__init__.py\n@@ -77,18 +77,19 @@\n \n Parameters\n ----------\n- host : string\n- port : int, default 50070 (webhdfs default)\n+ host : string, Host name of the HDFS NameNode\n+ port : int, NameNode's WebHDFS port (default 50070)\n protocol : {'webhdfs'}\n- auth_mechanism : {'NOSASL' <- default, 'GSSAPI', 'LDAP', 'PLAIN'}\n- verify : boolean, default False\n- Set to False to turn off verifying SSL certificates\n+ auth_mechanism : string, Set to NOSASL or PLAIN for non-secure clusters.\n+ Set to GSSAPI or LDAP for Kerberos-secured clusters.\n+ verify : boolean, Set to False to turn off verifying SSL certificates.\n+ (default True)\n \n Other keywords are forwarded to hdfs library classes\n \n Returns\n -------\n- client : ibis HDFS client\n+ client : WebHDFS\n \"\"\"\n import requests\n session = kwds.setdefault('session', requests.Session())\ndiff --git a/ibis/impala/api.py b/ibis/impala/api.py\n--- a/ibis/impala/api.py\n+++ b/ibis/impala/api.py\n@@ -23,20 +23,27 @@\n auth_mechanism='NOSASL', kerberos_service_name='impala',\n pool_size=8, hdfs_client=None):\n \"\"\"\n- Create an Impala Client for use with Ibis\n+ Create an ImpalaClient for use with Ibis.\n \n Parameters\n ----------\n- host : host name\n- port : int, default 21050 (HiveServer 2)\n- database :\n- timeout :\n- use_ssl : boolean\n- ca_cert :\n- user :\n- password :\n- auth_mechanism : {'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}\n- kerberos_service_name : string, default 'impala'\n+ host : string, Host name of the impalad or HiveServer2 in Hive\n+ port : int, Defaults to 21050 (Impala's HiveServer2)\n+ database : string, Default database when obtaining new cursors\n+ timeout : int, Connection timeout (seconds) when communicating with\n+ HiveServer2\n+ use_ssl : boolean, Use SSL when connecting to HiveServer2\n+ ca_cert : string, Local path to 3rd party CA certificate or copy of server\n+ certificate for self-signed certificates. If SSL is enabled, but this\n+ argument is None, then certificate validation is skipped.\n+ user : string, LDAP user to authenticate\n+ password : string, LDAP password to authenticate\n+ auth_mechanism : string, {'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}.\n+ Use NOSASL for non-secured Impala connections. Use PLAIN for\n+ non-secured Hive clusters. Use LDAP for LDAP authenticated\n+ connections. Use GSSAPI for Kerberos-secured clusters.\n+ kerberos_service_name : string, Specify particular impalad service\n+ principal.\n \n Examples\n --------\n", "issue": "Improve docstrings for hdfs_connect and impala_connect\n@laserson can you take a crack at these?\n\n", "before_files": [{"content": "# Copyright 2015 Cloudera Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ibis.impala.client import (ImpalaConnection, ImpalaClient, # noqa\n Database, ImpalaTable)\nfrom ibis.impala.udf import * # noqa\nfrom ibis.impala.madlib import MADLibAPI # noqa\nfrom ibis.config import options\n\n\ndef connect(host='localhost', port=21050, database='default', timeout=45,\n use_ssl=False, ca_cert=None, user=None, password=None,\n auth_mechanism='NOSASL', kerberos_service_name='impala',\n pool_size=8, hdfs_client=None):\n \"\"\"\n Create an Impala Client for use with Ibis\n\n Parameters\n ----------\n host : host name\n port : int, default 21050 (HiveServer 2)\n database :\n timeout :\n use_ssl : boolean\n ca_cert :\n user :\n password :\n auth_mechanism : {'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}\n kerberos_service_name : string, default 'impala'\n\n Examples\n --------\n >>> hdfs = ibis.hdfs_connect(**hdfs_params)\n >>> client = ibis.impala.connect(hdfs_client=hdfs, **impala_params)\n\n Returns\n -------\n con : ImpalaClient\n \"\"\"\n params = {\n 'host': host,\n 'port': port,\n 'database': database,\n 'timeout': timeout,\n 'use_ssl': use_ssl,\n 'ca_cert': ca_cert,\n 'user': user,\n 'password': password,\n 'auth_mechanism': auth_mechanism,\n 'kerberos_service_name': kerberos_service_name\n }\n\n con = ImpalaConnection(pool_size=pool_size, **params)\n client = ImpalaClient(con, hdfs_client=hdfs_client)\n\n if options.default_backend is None:\n options.default_backend = client\n\n return client\n", "path": "ibis/impala/api.py"}, {"content": "# Copyright 2014 Cloudera Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# flake8: noqa\n\n__version__ = '0.5.0'\n\nfrom ibis.filesystems import HDFS, WebHDFS\nfrom ibis.common import IbisError\n\nimport ibis.expr.api as api\nimport ibis.expr.types as ir\n\n# __all__ is defined\nfrom ibis.expr.api import *\n\nimport ibis.impala.api as impala\nimport ibis.sql.sqlite.api as sqlite\n\nimport ibis.config_init\nfrom ibis.config import options\nimport ibis.util as util\n\n\n# Deprecated\nimpala_connect = util.deprecate(impala.connect,\n 'impala_connect is deprecated, use'\n ' ibis.impala.connect instead')\n\n\ndef make_client(db, hdfs_client=None):\n \"\"\"\n Create an Ibis client from a database connection and optional additional\n connections (like HDFS)\n\n Parameters\n ----------\n db : Connection\n e.g. produced by ibis.impala.connect\n hdfs_client : ibis HDFS client\n\n Examples\n --------\n >>> con = ibis.impala.connect(**impala_params)\n >>> hdfs = ibis.hdfs_connect(**hdfs_params)\n >>> client = ibis.make_client(con, hdfs_client=hdfs)\n\n Returns\n -------\n client : IbisClient\n \"\"\"\n db._hdfs = hdfs_client\n return db\n\nmake_client = util.deprecate(\n make_client, ('make_client is deprecated. '\n 'Use ibis.impala.connect '\n ' with hdfs_client=hdfs_client'))\n\n\ndef hdfs_connect(host='localhost', port=50070, protocol='webhdfs',\n auth_mechanism='NOSASL', verify=True, **kwds):\n \"\"\"\n Connect to HDFS\n\n Parameters\n ----------\n host : string\n port : int, default 50070 (webhdfs default)\n protocol : {'webhdfs'}\n auth_mechanism : {'NOSASL' <- default, 'GSSAPI', 'LDAP', 'PLAIN'}\n verify : boolean, default False\n Set to False to turn off verifying SSL certificates\n\n Other keywords are forwarded to hdfs library classes\n\n Returns\n -------\n client : ibis HDFS client\n \"\"\"\n import requests\n session = kwds.setdefault('session', requests.Session())\n session.verify = verify\n if auth_mechanism in ['GSSAPI', 'LDAP']:\n try:\n import requests_kerberos\n except ImportError:\n raise IbisError(\n \"Unable to import requests-kerberos, which is required for \"\n \"Kerberos HDFS support. Install it by executing `pip install \"\n \"requests-kerberos` or `pip install hdfs[kerberos]`.\")\n from hdfs.ext.kerberos import KerberosClient\n url = 'https://{0}:{1}'.format(host, port) # note SSL\n kwds.setdefault('mutual_auth', 'OPTIONAL')\n hdfs_client = KerberosClient(url, **kwds)\n else:\n from hdfs.client import InsecureClient\n url = 'http://{0}:{1}'.format(host, port)\n hdfs_client = InsecureClient(url, **kwds)\n return WebHDFS(hdfs_client)\n\ndef test(impala=False):\n import pytest\n import ibis\n import os\n\n ibis_dir, _ = os.path.split(ibis.__file__)\n\n args = ['--pyargs', ibis_dir]\n if impala:\n args.append('--impala')\n pytest.main(args)\n", "path": "ibis/__init__.py"}], "after_files": [{"content": "# Copyright 2015 Cloudera Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ibis.impala.client import (ImpalaConnection, ImpalaClient, # noqa\n Database, ImpalaTable)\nfrom ibis.impala.udf import * # noqa\nfrom ibis.impala.madlib import MADLibAPI # noqa\nfrom ibis.config import options\n\n\ndef connect(host='localhost', port=21050, database='default', timeout=45,\n use_ssl=False, ca_cert=None, user=None, password=None,\n auth_mechanism='NOSASL', kerberos_service_name='impala',\n pool_size=8, hdfs_client=None):\n \"\"\"\n Create an ImpalaClient for use with Ibis.\n\n Parameters\n ----------\n host : string, Host name of the impalad or HiveServer2 in Hive\n port : int, Defaults to 21050 (Impala's HiveServer2)\n database : string, Default database when obtaining new cursors\n timeout : int, Connection timeout (seconds) when communicating with\n HiveServer2\n use_ssl : boolean, Use SSL when connecting to HiveServer2\n ca_cert : string, Local path to 3rd party CA certificate or copy of server\n certificate for self-signed certificates. If SSL is enabled, but this\n argument is None, then certificate validation is skipped.\n user : string, LDAP user to authenticate\n password : string, LDAP password to authenticate\n auth_mechanism : string, {'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}.\n Use NOSASL for non-secured Impala connections. Use PLAIN for\n non-secured Hive clusters. Use LDAP for LDAP authenticated\n connections. Use GSSAPI for Kerberos-secured clusters.\n kerberos_service_name : string, Specify particular impalad service\n principal.\n\n Examples\n --------\n >>> hdfs = ibis.hdfs_connect(**hdfs_params)\n >>> client = ibis.impala.connect(hdfs_client=hdfs, **impala_params)\n\n Returns\n -------\n con : ImpalaClient\n \"\"\"\n params = {\n 'host': host,\n 'port': port,\n 'database': database,\n 'timeout': timeout,\n 'use_ssl': use_ssl,\n 'ca_cert': ca_cert,\n 'user': user,\n 'password': password,\n 'auth_mechanism': auth_mechanism,\n 'kerberos_service_name': kerberos_service_name\n }\n\n con = ImpalaConnection(pool_size=pool_size, **params)\n client = ImpalaClient(con, hdfs_client=hdfs_client)\n\n if options.default_backend is None:\n options.default_backend = client\n\n return client\n", "path": "ibis/impala/api.py"}, {"content": "# Copyright 2014 Cloudera Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# flake8: noqa\n\n__version__ = '0.5.0'\n\nfrom ibis.filesystems import HDFS, WebHDFS\nfrom ibis.common import IbisError\n\nimport ibis.expr.api as api\nimport ibis.expr.types as ir\n\n# __all__ is defined\nfrom ibis.expr.api import *\n\nimport ibis.impala.api as impala\nimport ibis.sql.sqlite.api as sqlite\n\nimport ibis.config_init\nfrom ibis.config import options\nimport ibis.util as util\n\n\n# Deprecated\nimpala_connect = util.deprecate(impala.connect,\n 'impala_connect is deprecated, use'\n ' ibis.impala.connect instead')\n\n\ndef make_client(db, hdfs_client=None):\n \"\"\"\n Create an Ibis client from a database connection and optional additional\n connections (like HDFS)\n\n Parameters\n ----------\n db : Connection\n e.g. produced by ibis.impala.connect\n hdfs_client : ibis HDFS client\n\n Examples\n --------\n >>> con = ibis.impala.connect(**impala_params)\n >>> hdfs = ibis.hdfs_connect(**hdfs_params)\n >>> client = ibis.make_client(con, hdfs_client=hdfs)\n\n Returns\n -------\n client : IbisClient\n \"\"\"\n db._hdfs = hdfs_client\n return db\n\nmake_client = util.deprecate(\n make_client, ('make_client is deprecated. '\n 'Use ibis.impala.connect '\n ' with hdfs_client=hdfs_client'))\n\n\ndef hdfs_connect(host='localhost', port=50070, protocol='webhdfs',\n auth_mechanism='NOSASL', verify=True, **kwds):\n \"\"\"\n Connect to HDFS\n\n Parameters\n ----------\n host : string, Host name of the HDFS NameNode\n port : int, NameNode's WebHDFS port (default 50070)\n protocol : {'webhdfs'}\n auth_mechanism : string, Set to NOSASL or PLAIN for non-secure clusters.\n Set to GSSAPI or LDAP for Kerberos-secured clusters.\n verify : boolean, Set to False to turn off verifying SSL certificates.\n (default True)\n\n Other keywords are forwarded to hdfs library classes\n\n Returns\n -------\n client : WebHDFS\n \"\"\"\n import requests\n session = kwds.setdefault('session', requests.Session())\n session.verify = verify\n if auth_mechanism in ['GSSAPI', 'LDAP']:\n try:\n import requests_kerberos\n except ImportError:\n raise IbisError(\n \"Unable to import requests-kerberos, which is required for \"\n \"Kerberos HDFS support. Install it by executing `pip install \"\n \"requests-kerberos` or `pip install hdfs[kerberos]`.\")\n from hdfs.ext.kerberos import KerberosClient\n url = 'https://{0}:{1}'.format(host, port) # note SSL\n kwds.setdefault('mutual_auth', 'OPTIONAL')\n hdfs_client = KerberosClient(url, **kwds)\n else:\n from hdfs.client import InsecureClient\n url = 'http://{0}:{1}'.format(host, port)\n hdfs_client = InsecureClient(url, **kwds)\n return WebHDFS(hdfs_client)\n\ndef test(impala=False):\n import pytest\n import ibis\n import os\n\n ibis_dir, _ = os.path.split(ibis.__file__)\n\n args = ['--pyargs', ibis_dir]\n if impala:\n args.append('--impala')\n pytest.main(args)\n", "path": "ibis/__init__.py"}]} | 2,179 | 783 |
gh_patches_debug_37345 | rasdani/github-patches | git_diff | kymatio__kymatio-1001 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
JTFS needs a JAX frontend
v0.4.0
Already addressed by #1001 (@cyrusvahidi)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kymatio/scattering1d/frontend/jax_frontend.py`
Content:
```
1 from ...frontend.jax_frontend import ScatteringJax
2 from .numpy_frontend import ScatteringNumPy1D
3 from .base_frontend import ScatteringBase1D
4
5 class ScatteringJax1D(ScatteringJax, ScatteringNumPy1D):
6 # This class inherits the attribute "frontend" from ScatteringJax
7 # It overrides the __init__ function present in ScatteringNumPy1D
8 # in order to add the default argument for backend and call the
9 # ScatteringJax.__init__
10 # Through ScatteringBase1D._instantiate_backend the jax backend will
11 # be loaded
12
13
14 def __init__(self, J, shape, Q=1, T=None, stride=None, max_order=2,
15 oversampling=0, out_type='array', backend='jax'):
16
17 ScatteringJax.__init__(self)
18 ScatteringBase1D.__init__(self, J, shape, Q, T, stride, max_order,
19 oversampling, out_type, backend)
20 ScatteringBase1D._instantiate_backend(self, 'kymatio.scattering1d.backend.')
21 ScatteringBase1D.build(self)
22 ScatteringBase1D.create_filters(self)
23
24 ScatteringJax1D._document()
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kymatio/scattering1d/frontend/jax_frontend.py b/kymatio/scattering1d/frontend/jax_frontend.py
--- a/kymatio/scattering1d/frontend/jax_frontend.py
+++ b/kymatio/scattering1d/frontend/jax_frontend.py
@@ -1,6 +1,7 @@
from ...frontend.jax_frontend import ScatteringJax
-from .numpy_frontend import ScatteringNumPy1D
-from .base_frontend import ScatteringBase1D
+from .numpy_frontend import ScatteringNumPy1D, TimeFrequencyScatteringNumPy
+from .base_frontend import ScatteringBase1D, TimeFrequencyScatteringBase
+
class ScatteringJax1D(ScatteringJax, ScatteringNumPy1D):
# This class inherits the attribute "frontend" from ScatteringJax
@@ -10,15 +11,77 @@
# Through ScatteringBase1D._instantiate_backend the jax backend will
# be loaded
-
- def __init__(self, J, shape, Q=1, T=None, stride=None, max_order=2,
- oversampling=0, out_type='array', backend='jax'):
+ def __init__(
+ self,
+ J,
+ shape,
+ Q=1,
+ T=None,
+ stride=None,
+ max_order=2,
+ oversampling=0,
+ out_type="array",
+ backend="jax",
+ ):
ScatteringJax.__init__(self)
- ScatteringBase1D.__init__(self, J, shape, Q, T, stride, max_order,
- oversampling, out_type, backend)
- ScatteringBase1D._instantiate_backend(self, 'kymatio.scattering1d.backend.')
+ ScatteringBase1D.__init__(
+ self, J, shape, Q, T, stride, max_order, oversampling, out_type, backend
+ )
+ ScatteringBase1D._instantiate_backend(self, "kymatio.scattering1d.backend.")
ScatteringBase1D.build(self)
ScatteringBase1D.create_filters(self)
+
ScatteringJax1D._document()
+
+
+class TimeFrequencyScatteringJax(ScatteringJax, TimeFrequencyScatteringNumPy):
+ # This class inherits the attribute "frontend" from ScatteringJax
+ # It overrides the __init__ function present in TimeFrequencyScatteringNumPy
+ # in order to add the default argument for backend and call the
+ # ScatteringJax.__init__
+ # Through TimeFrequencyScatteringBase._instantiate_backend the jax backend will
+ # be loaded
+
+ def __init__(
+ self,
+ *,
+ J,
+ J_fr,
+ shape,
+ Q,
+ T=None,
+ stride=None,
+ Q_fr=1,
+ F=None,
+ stride_fr=None,
+ out_type="array",
+ format="joint",
+ backend="jax"
+ ):
+
+ ScatteringJax.__init__(self)
+ TimeFrequencyScatteringBase.__init__(
+ self,
+ J=J,
+ J_fr=J_fr,
+ shape=shape,
+ Q=Q,
+ T=T,
+ stride=stride,
+ Q_fr=Q_fr,
+ F=F,
+ stride_fr=stride_fr,
+ out_type=out_type,
+ format=format,
+ backend=backend,
+ )
+ ScatteringBase1D._instantiate_backend(self, "kymatio.scattering1d.backend.")
+ TimeFrequencyScatteringBase.build(self)
+ TimeFrequencyScatteringBase.create_filters(self)
+
+
+TimeFrequencyScatteringJax._document()
+
+__all__ = ["ScatteringJax1D", "TimeFrequencyScatteringJax"]
| {"golden_diff": "diff --git a/kymatio/scattering1d/frontend/jax_frontend.py b/kymatio/scattering1d/frontend/jax_frontend.py\n--- a/kymatio/scattering1d/frontend/jax_frontend.py\n+++ b/kymatio/scattering1d/frontend/jax_frontend.py\n@@ -1,6 +1,7 @@\n from ...frontend.jax_frontend import ScatteringJax\n-from .numpy_frontend import ScatteringNumPy1D\n-from .base_frontend import ScatteringBase1D\n+from .numpy_frontend import ScatteringNumPy1D, TimeFrequencyScatteringNumPy\n+from .base_frontend import ScatteringBase1D, TimeFrequencyScatteringBase\n+\n \n class ScatteringJax1D(ScatteringJax, ScatteringNumPy1D):\n # This class inherits the attribute \"frontend\" from ScatteringJax\n@@ -10,15 +11,77 @@\n # Through ScatteringBase1D._instantiate_backend the jax backend will\n # be loaded\n \n-\n- def __init__(self, J, shape, Q=1, T=None, stride=None, max_order=2,\n- oversampling=0, out_type='array', backend='jax'):\n+ def __init__(\n+ self,\n+ J,\n+ shape,\n+ Q=1,\n+ T=None,\n+ stride=None,\n+ max_order=2,\n+ oversampling=0,\n+ out_type=\"array\",\n+ backend=\"jax\",\n+ ):\n \n ScatteringJax.__init__(self)\n- ScatteringBase1D.__init__(self, J, shape, Q, T, stride, max_order,\n- oversampling, out_type, backend)\n- ScatteringBase1D._instantiate_backend(self, 'kymatio.scattering1d.backend.')\n+ ScatteringBase1D.__init__(\n+ self, J, shape, Q, T, stride, max_order, oversampling, out_type, backend\n+ )\n+ ScatteringBase1D._instantiate_backend(self, \"kymatio.scattering1d.backend.\")\n ScatteringBase1D.build(self)\n ScatteringBase1D.create_filters(self)\n \n+\n ScatteringJax1D._document()\n+\n+\n+class TimeFrequencyScatteringJax(ScatteringJax, TimeFrequencyScatteringNumPy):\n+ # This class inherits the attribute \"frontend\" from ScatteringJax\n+ # It overrides the __init__ function present in TimeFrequencyScatteringNumPy\n+ # in order to add the default argument for backend and call the\n+ # ScatteringJax.__init__\n+ # Through TimeFrequencyScatteringBase._instantiate_backend the jax backend will\n+ # be loaded\n+\n+ def __init__(\n+ self,\n+ *,\n+ J,\n+ J_fr,\n+ shape,\n+ Q,\n+ T=None,\n+ stride=None,\n+ Q_fr=1,\n+ F=None,\n+ stride_fr=None,\n+ out_type=\"array\",\n+ format=\"joint\",\n+ backend=\"jax\"\n+ ):\n+\n+ ScatteringJax.__init__(self)\n+ TimeFrequencyScatteringBase.__init__(\n+ self,\n+ J=J,\n+ J_fr=J_fr,\n+ shape=shape,\n+ Q=Q,\n+ T=T,\n+ stride=stride,\n+ Q_fr=Q_fr,\n+ F=F,\n+ stride_fr=stride_fr,\n+ out_type=out_type,\n+ format=format,\n+ backend=backend,\n+ )\n+ ScatteringBase1D._instantiate_backend(self, \"kymatio.scattering1d.backend.\")\n+ TimeFrequencyScatteringBase.build(self)\n+ TimeFrequencyScatteringBase.create_filters(self)\n+\n+\n+TimeFrequencyScatteringJax._document()\n+\n+__all__ = [\"ScatteringJax1D\", \"TimeFrequencyScatteringJax\"]\n", "issue": "JTFS needs a JAX frontend\nv0.4.0\r\n\r\nAlready addressed by #1001 (@cyrusvahidi)\n", "before_files": [{"content": "from ...frontend.jax_frontend import ScatteringJax\nfrom .numpy_frontend import ScatteringNumPy1D\nfrom .base_frontend import ScatteringBase1D\n\nclass ScatteringJax1D(ScatteringJax, ScatteringNumPy1D):\n # This class inherits the attribute \"frontend\" from ScatteringJax\n # It overrides the __init__ function present in ScatteringNumPy1D\n # in order to add the default argument for backend and call the\n # ScatteringJax.__init__\n # Through ScatteringBase1D._instantiate_backend the jax backend will\n # be loaded\n\n\n def __init__(self, J, shape, Q=1, T=None, stride=None, max_order=2,\n oversampling=0, out_type='array', backend='jax'):\n\n ScatteringJax.__init__(self)\n ScatteringBase1D.__init__(self, J, shape, Q, T, stride, max_order,\n oversampling, out_type, backend)\n ScatteringBase1D._instantiate_backend(self, 'kymatio.scattering1d.backend.')\n ScatteringBase1D.build(self)\n ScatteringBase1D.create_filters(self)\n\nScatteringJax1D._document()\n", "path": "kymatio/scattering1d/frontend/jax_frontend.py"}], "after_files": [{"content": "from ...frontend.jax_frontend import ScatteringJax\nfrom .numpy_frontend import ScatteringNumPy1D, TimeFrequencyScatteringNumPy\nfrom .base_frontend import ScatteringBase1D, TimeFrequencyScatteringBase\n\n\nclass ScatteringJax1D(ScatteringJax, ScatteringNumPy1D):\n # This class inherits the attribute \"frontend\" from ScatteringJax\n # It overrides the __init__ function present in ScatteringNumPy1D\n # in order to add the default argument for backend and call the\n # ScatteringJax.__init__\n # Through ScatteringBase1D._instantiate_backend the jax backend will\n # be loaded\n\n def __init__(\n self,\n J,\n shape,\n Q=1,\n T=None,\n stride=None,\n max_order=2,\n oversampling=0,\n out_type=\"array\",\n backend=\"jax\",\n ):\n\n ScatteringJax.__init__(self)\n ScatteringBase1D.__init__(\n self, J, shape, Q, T, stride, max_order, oversampling, out_type, backend\n )\n ScatteringBase1D._instantiate_backend(self, \"kymatio.scattering1d.backend.\")\n ScatteringBase1D.build(self)\n ScatteringBase1D.create_filters(self)\n\n\nScatteringJax1D._document()\n\n\nclass TimeFrequencyScatteringJax(ScatteringJax, TimeFrequencyScatteringNumPy):\n # This class inherits the attribute \"frontend\" from ScatteringJax\n # It overrides the __init__ function present in TimeFrequencyScatteringNumPy\n # in order to add the default argument for backend and call the\n # ScatteringJax.__init__\n # Through TimeFrequencyScatteringBase._instantiate_backend the jax backend will\n # be loaded\n\n def __init__(\n self,\n *,\n J,\n J_fr,\n shape,\n Q,\n T=None,\n stride=None,\n Q_fr=1,\n F=None,\n stride_fr=None,\n out_type=\"array\",\n format=\"joint\",\n backend=\"jax\"\n ):\n\n ScatteringJax.__init__(self)\n TimeFrequencyScatteringBase.__init__(\n self,\n J=J,\n J_fr=J_fr,\n shape=shape,\n Q=Q,\n T=T,\n stride=stride,\n Q_fr=Q_fr,\n F=F,\n stride_fr=stride_fr,\n out_type=out_type,\n format=format,\n backend=backend,\n )\n ScatteringBase1D._instantiate_backend(self, \"kymatio.scattering1d.backend.\")\n TimeFrequencyScatteringBase.build(self)\n TimeFrequencyScatteringBase.create_filters(self)\n\n\nTimeFrequencyScatteringJax._document()\n\n__all__ = [\"ScatteringJax1D\", \"TimeFrequencyScatteringJax\"]\n", "path": "kymatio/scattering1d/frontend/jax_frontend.py"}]} | 613 | 860 |
gh_patches_debug_35729 | rasdani/github-patches | git_diff | beetbox__beets-1267 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve extractart to extract cover art of all albums for a query
The current `extractart` command of the `embedart` plugin is only able to extract the cover art of a single file. If the query the user specifies results in more than one item, only the art of the first one is extracted.
I would like to improve the command such the user queries for albums (like `ls -a` command) and one cover art is extracted per album and placed inside the album folder. The command should try all files of an album until an art is found. The command could be executed like `beet extractart -a <query>`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/embedart.py`
Content:
```
1 # This file is part of beets.
2 # Copyright 2015, Adrian Sampson.
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """Allows beets to embed album art into file metadata."""
16 from __future__ import (division, absolute_import, print_function,
17 unicode_literals)
18
19 import os.path
20 import imghdr
21 import subprocess
22 import platform
23 from tempfile import NamedTemporaryFile
24
25 from beets.plugins import BeetsPlugin
26 from beets import mediafile
27 from beets import ui
28 from beets.ui import decargs
29 from beets.util import syspath, normpath, displayable_path
30 from beets.util.artresizer import ArtResizer
31 from beets import config
32
33
34 class EmbedCoverArtPlugin(BeetsPlugin):
35 """Allows albumart to be embedded into the actual files.
36 """
37 def __init__(self):
38 super(EmbedCoverArtPlugin, self).__init__()
39 self.config.add({
40 'maxwidth': 0,
41 'auto': True,
42 'compare_threshold': 0,
43 'ifempty': False,
44 })
45
46 if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:
47 self.config['maxwidth'] = 0
48 self._log.warning(u"ImageMagick or PIL not found; "
49 u"'maxwidth' option ignored")
50 if self.config['compare_threshold'].get(int) and not \
51 ArtResizer.shared.can_compare:
52 self.config['compare_threshold'] = 0
53 self._log.warning(u"ImageMagick 6.8.7 or higher not installed; "
54 u"'compare_threshold' option ignored")
55
56 self.register_listener('album_imported', self.album_imported)
57
58 def commands(self):
59 # Embed command.
60 embed_cmd = ui.Subcommand(
61 'embedart', help='embed image files into file metadata'
62 )
63 embed_cmd.parser.add_option(
64 '-f', '--file', metavar='PATH', help='the image file to embed'
65 )
66 maxwidth = self.config['maxwidth'].get(int)
67 compare_threshold = self.config['compare_threshold'].get(int)
68 ifempty = self.config['ifempty'].get(bool)
69
70 def embed_func(lib, opts, args):
71 if opts.file:
72 imagepath = normpath(opts.file)
73 if not os.path.isfile(syspath(imagepath)):
74 raise ui.UserError(u'image file {0} not found'.format(
75 displayable_path(imagepath)
76 ))
77 for item in lib.items(decargs(args)):
78 self.embed_item(item, imagepath, maxwidth, None,
79 compare_threshold, ifempty)
80 else:
81 for album in lib.albums(decargs(args)):
82 self.embed_album(album, maxwidth)
83
84 embed_cmd.func = embed_func
85
86 # Extract command.
87 extract_cmd = ui.Subcommand('extractart',
88 help='extract an image from file metadata')
89 extract_cmd.parser.add_option('-o', dest='outpath',
90 help='image output file')
91
92 def extract_func(lib, opts, args):
93 outpath = normpath(opts.outpath or config['art_filename'].get())
94 for item in lib.items(decargs(args)):
95 if self.extract(outpath, item):
96 return
97 extract_cmd.func = extract_func
98
99 # Clear command.
100 clear_cmd = ui.Subcommand('clearart',
101 help='remove images from file metadata')
102
103 def clear_func(lib, opts, args):
104 self.clear(lib, decargs(args))
105 clear_cmd.func = clear_func
106
107 return [embed_cmd, extract_cmd, clear_cmd]
108
109 def album_imported(self, lib, album):
110 """Automatically embed art into imported albums.
111 """
112 if album.artpath and self.config['auto']:
113 max_width = self.config['maxwidth'].get(int)
114 self.embed_album(album, max_width, True)
115
116 def embed_item(self, item, imagepath, maxwidth=None, itempath=None,
117 compare_threshold=0, ifempty=False, as_album=False):
118 """Embed an image into the item's media file.
119 """
120 if compare_threshold:
121 if not self.check_art_similarity(item, imagepath,
122 compare_threshold):
123 self._log.info(u'Image not similar; skipping.')
124 return
125 if ifempty and self.get_art(item):
126 self._log.info(u'media file already contained art')
127 return
128 if maxwidth and not as_album:
129 imagepath = self.resize_image(imagepath, maxwidth)
130
131 try:
132 self._log.debug(u'embedding {0}', displayable_path(imagepath))
133 item['images'] = [self._mediafile_image(imagepath, maxwidth)]
134 except IOError as exc:
135 self._log.warning(u'could not read image file: {0}', exc)
136 else:
137 # We don't want to store the image in the database.
138 item.try_write(itempath)
139 del item['images']
140
141 def embed_album(self, album, maxwidth=None, quiet=False):
142 """Embed album art into all of the album's items.
143 """
144 imagepath = album.artpath
145 if not imagepath:
146 self._log.info(u'No album art present for {0}', album)
147 return
148 if not os.path.isfile(syspath(imagepath)):
149 self._log.info(u'Album art not found at {0} for {1}',
150 displayable_path(imagepath), album)
151 return
152 if maxwidth:
153 imagepath = self.resize_image(imagepath, maxwidth)
154
155 self._log.info(u'Embedding album art into {0}', album)
156
157 for item in album.items():
158 thresh = self.config['compare_threshold'].get(int)
159 ifempty = self.config['ifempty'].get(bool)
160 self.embed_item(item, imagepath, maxwidth, None,
161 thresh, ifempty, as_album=True)
162
163 def resize_image(self, imagepath, maxwidth):
164 """Returns path to an image resized to maxwidth.
165 """
166 self._log.debug(u'Resizing album art to {0} pixels wide', maxwidth)
167 imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath))
168 return imagepath
169
170 def check_art_similarity(self, item, imagepath, compare_threshold):
171 """A boolean indicating if an image is similar to embedded item art.
172 """
173 with NamedTemporaryFile(delete=True) as f:
174 art = self.extract(f.name, item)
175
176 if art:
177 is_windows = platform.system() == "Windows"
178
179 # Converting images to grayscale tends to minimize the weight
180 # of colors in the diff score.
181 convert_proc = subprocess.Popen(
182 ['convert', syspath(imagepath), syspath(art),
183 '-colorspace', 'gray', 'MIFF:-'],
184 stdout=subprocess.PIPE,
185 close_fds=not is_windows,
186 )
187 compare_proc = subprocess.Popen(
188 ['compare', '-metric', 'PHASH', '-', 'null:'],
189 stdin=convert_proc.stdout,
190 stdout=subprocess.PIPE,
191 stderr=subprocess.PIPE,
192 close_fds=not is_windows,
193 )
194 convert_proc.stdout.close()
195
196 stdout, stderr = compare_proc.communicate()
197 if compare_proc.returncode:
198 if compare_proc.returncode != 1:
199 self._log.debug(u'IM phashes compare failed for {0}, '
200 u'{1}', displayable_path(imagepath),
201 displayable_path(art))
202 return
203 out_str = stderr
204 else:
205 out_str = stdout
206
207 try:
208 phash_diff = float(out_str)
209 except ValueError:
210 self._log.debug(u'IM output is not a number: {0!r}',
211 out_str)
212 return
213
214 self._log.debug(u'compare PHASH score is {0}', phash_diff)
215 return phash_diff <= compare_threshold
216
217 return True
218
219 def _mediafile_image(self, image_path, maxwidth=None):
220 """Return a `mediafile.Image` object for the path.
221 """
222
223 with open(syspath(image_path), 'rb') as f:
224 data = f.read()
225 return mediafile.Image(data, type=mediafile.ImageType.front)
226
227 def get_art(self, item):
228 # Extract the art.
229 try:
230 mf = mediafile.MediaFile(syspath(item.path))
231 except mediafile.UnreadableFileError as exc:
232 self._log.warning(u'Could not extract art from {0}: {1}',
233 displayable_path(item.path), exc)
234 return
235
236 return mf.art
237
238 # 'extractart' command.
239
240 def extract(self, outpath, item):
241 art = self.get_art(item)
242
243 if not art:
244 self._log.info(u'No album art present in {0}, skipping.', item)
245 return
246
247 # Add an extension to the filename.
248 ext = imghdr.what(None, h=art)
249 if not ext:
250 self._log.warning(u'Unknown image type in {0}.',
251 displayable_path(item.path))
252 return
253 outpath += '.' + ext
254
255 self._log.info(u'Extracting album art from: {0} to: {1}',
256 item, displayable_path(outpath))
257 with open(syspath(outpath), 'wb') as f:
258 f.write(art)
259 return outpath
260
261 # 'clearart' command.
262 def clear(self, lib, query):
263 id3v23 = config['id3v23'].get(bool)
264
265 items = lib.items(query)
266 self._log.info(u'Clearing album art from {0} items', len(items))
267 for item in items:
268 self._log.debug(u'Clearing art for {0}', item)
269 try:
270 mf = mediafile.MediaFile(syspath(item.path), id3v23)
271 except mediafile.UnreadableFileError as exc:
272 self._log.warning(u'Could not read file {0}: {1}',
273 displayable_path(item.path), exc)
274 else:
275 del mf.art
276 mf.save()
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/embedart.py b/beetsplug/embedart.py
--- a/beetsplug/embedart.py
+++ b/beetsplug/embedart.py
@@ -88,12 +88,30 @@
help='extract an image from file metadata')
extract_cmd.parser.add_option('-o', dest='outpath',
help='image output file')
+ extract_cmd.parser.add_option('-n', dest='filename',
+ help='image filename to create for all '
+ 'matched albums')
+ extract_cmd.parser.add_option('-a', dest='associate',
+ action='store_true',
+ help='associate the extracted images '
+ 'with the album')
def extract_func(lib, opts, args):
- outpath = normpath(opts.outpath or config['art_filename'].get())
- for item in lib.items(decargs(args)):
- if self.extract(outpath, item):
+ if opts.outpath:
+ self.extract_first(normpath(opts.outpath),
+ lib.items(decargs(args)))
+ else:
+ filename = opts.filename or config['art_filename'].get()
+ if os.path.dirname(filename) != '':
+ self._log.error(u"Only specify a name rather than a path "
+ u"for -n")
return
+ for album in lib.albums(decargs(args)):
+ artpath = normpath(os.path.join(album.path, filename))
+ artpath = self.extract_first(artpath, album.items())
+ if artpath and opts.associate:
+ album.set_art(artpath)
+ album.store()
extract_cmd.func = extract_func
# Clear command.
@@ -236,7 +254,6 @@
return mf.art
# 'extractart' command.
-
def extract(self, outpath, item):
art = self.get_art(item)
@@ -258,6 +275,12 @@
f.write(art)
return outpath
+ def extract_first(self, outpath, items):
+ for item in items:
+ real_path = self.extract(outpath, item)
+ if real_path:
+ return real_path
+
# 'clearart' command.
def clear(self, lib, query):
id3v23 = config['id3v23'].get(bool)
| {"golden_diff": "diff --git a/beetsplug/embedart.py b/beetsplug/embedart.py\n--- a/beetsplug/embedart.py\n+++ b/beetsplug/embedart.py\n@@ -88,12 +88,30 @@\n help='extract an image from file metadata')\n extract_cmd.parser.add_option('-o', dest='outpath',\n help='image output file')\n+ extract_cmd.parser.add_option('-n', dest='filename',\n+ help='image filename to create for all '\n+ 'matched albums')\n+ extract_cmd.parser.add_option('-a', dest='associate',\n+ action='store_true',\n+ help='associate the extracted images '\n+ 'with the album')\n \n def extract_func(lib, opts, args):\n- outpath = normpath(opts.outpath or config['art_filename'].get())\n- for item in lib.items(decargs(args)):\n- if self.extract(outpath, item):\n+ if opts.outpath:\n+ self.extract_first(normpath(opts.outpath),\n+ lib.items(decargs(args)))\n+ else:\n+ filename = opts.filename or config['art_filename'].get()\n+ if os.path.dirname(filename) != '':\n+ self._log.error(u\"Only specify a name rather than a path \"\n+ u\"for -n\")\n return\n+ for album in lib.albums(decargs(args)):\n+ artpath = normpath(os.path.join(album.path, filename))\n+ artpath = self.extract_first(artpath, album.items())\n+ if artpath and opts.associate:\n+ album.set_art(artpath)\n+ album.store()\n extract_cmd.func = extract_func\n \n # Clear command.\n@@ -236,7 +254,6 @@\n return mf.art\n \n # 'extractart' command.\n-\n def extract(self, outpath, item):\n art = self.get_art(item)\n \n@@ -258,6 +275,12 @@\n f.write(art)\n return outpath\n \n+ def extract_first(self, outpath, items):\n+ for item in items:\n+ real_path = self.extract(outpath, item)\n+ if real_path:\n+ return real_path\n+\n # 'clearart' command.\n def clear(self, lib, query):\n id3v23 = config['id3v23'].get(bool)\n", "issue": "Improve extractart to extract cover art of all albums for a query\nThe current `extractart` command of the `embedart` plugin is only able to extract the cover art of a single file. If the query the user specifies results in more than one item, only the art of the first one is extracted.\n\nI would like to improve the command such the user queries for albums (like `ls -a` command) and one cover art is extracted per album and placed inside the album folder. The command should try all files of an album until an art is found. The command could be executed like `beet extractart -a <query>`.\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2015, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Allows beets to embed album art into file metadata.\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport os.path\nimport imghdr\nimport subprocess\nimport platform\nfrom tempfile import NamedTemporaryFile\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import mediafile\nfrom beets import ui\nfrom beets.ui import decargs\nfrom beets.util import syspath, normpath, displayable_path\nfrom beets.util.artresizer import ArtResizer\nfrom beets import config\n\n\nclass EmbedCoverArtPlugin(BeetsPlugin):\n \"\"\"Allows albumart to be embedded into the actual files.\n \"\"\"\n def __init__(self):\n super(EmbedCoverArtPlugin, self).__init__()\n self.config.add({\n 'maxwidth': 0,\n 'auto': True,\n 'compare_threshold': 0,\n 'ifempty': False,\n })\n\n if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:\n self.config['maxwidth'] = 0\n self._log.warning(u\"ImageMagick or PIL not found; \"\n u\"'maxwidth' option ignored\")\n if self.config['compare_threshold'].get(int) and not \\\n ArtResizer.shared.can_compare:\n self.config['compare_threshold'] = 0\n self._log.warning(u\"ImageMagick 6.8.7 or higher not installed; \"\n u\"'compare_threshold' option ignored\")\n\n self.register_listener('album_imported', self.album_imported)\n\n def commands(self):\n # Embed command.\n embed_cmd = ui.Subcommand(\n 'embedart', help='embed image files into file metadata'\n )\n embed_cmd.parser.add_option(\n '-f', '--file', metavar='PATH', help='the image file to embed'\n )\n maxwidth = self.config['maxwidth'].get(int)\n compare_threshold = self.config['compare_threshold'].get(int)\n ifempty = self.config['ifempty'].get(bool)\n\n def embed_func(lib, opts, args):\n if opts.file:\n imagepath = normpath(opts.file)\n if not os.path.isfile(syspath(imagepath)):\n raise ui.UserError(u'image file {0} not found'.format(\n displayable_path(imagepath)\n ))\n for item in lib.items(decargs(args)):\n self.embed_item(item, imagepath, maxwidth, None,\n compare_threshold, ifempty)\n else:\n for album in lib.albums(decargs(args)):\n self.embed_album(album, maxwidth)\n\n embed_cmd.func = embed_func\n\n # Extract command.\n extract_cmd = ui.Subcommand('extractart',\n help='extract an image from file metadata')\n extract_cmd.parser.add_option('-o', dest='outpath',\n help='image output file')\n\n def extract_func(lib, opts, args):\n outpath = normpath(opts.outpath or config['art_filename'].get())\n for item in lib.items(decargs(args)):\n if self.extract(outpath, item):\n return\n extract_cmd.func = extract_func\n\n # Clear command.\n clear_cmd = ui.Subcommand('clearart',\n help='remove images from file metadata')\n\n def clear_func(lib, opts, args):\n self.clear(lib, decargs(args))\n clear_cmd.func = clear_func\n\n return [embed_cmd, extract_cmd, clear_cmd]\n\n def album_imported(self, lib, album):\n \"\"\"Automatically embed art into imported albums.\n \"\"\"\n if album.artpath and self.config['auto']:\n max_width = self.config['maxwidth'].get(int)\n self.embed_album(album, max_width, True)\n\n def embed_item(self, item, imagepath, maxwidth=None, itempath=None,\n compare_threshold=0, ifempty=False, as_album=False):\n \"\"\"Embed an image into the item's media file.\n \"\"\"\n if compare_threshold:\n if not self.check_art_similarity(item, imagepath,\n compare_threshold):\n self._log.info(u'Image not similar; skipping.')\n return\n if ifempty and self.get_art(item):\n self._log.info(u'media file already contained art')\n return\n if maxwidth and not as_album:\n imagepath = self.resize_image(imagepath, maxwidth)\n\n try:\n self._log.debug(u'embedding {0}', displayable_path(imagepath))\n item['images'] = [self._mediafile_image(imagepath, maxwidth)]\n except IOError as exc:\n self._log.warning(u'could not read image file: {0}', exc)\n else:\n # We don't want to store the image in the database.\n item.try_write(itempath)\n del item['images']\n\n def embed_album(self, album, maxwidth=None, quiet=False):\n \"\"\"Embed album art into all of the album's items.\n \"\"\"\n imagepath = album.artpath\n if not imagepath:\n self._log.info(u'No album art present for {0}', album)\n return\n if not os.path.isfile(syspath(imagepath)):\n self._log.info(u'Album art not found at {0} for {1}',\n displayable_path(imagepath), album)\n return\n if maxwidth:\n imagepath = self.resize_image(imagepath, maxwidth)\n\n self._log.info(u'Embedding album art into {0}', album)\n\n for item in album.items():\n thresh = self.config['compare_threshold'].get(int)\n ifempty = self.config['ifempty'].get(bool)\n self.embed_item(item, imagepath, maxwidth, None,\n thresh, ifempty, as_album=True)\n\n def resize_image(self, imagepath, maxwidth):\n \"\"\"Returns path to an image resized to maxwidth.\n \"\"\"\n self._log.debug(u'Resizing album art to {0} pixels wide', maxwidth)\n imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath))\n return imagepath\n\n def check_art_similarity(self, item, imagepath, compare_threshold):\n \"\"\"A boolean indicating if an image is similar to embedded item art.\n \"\"\"\n with NamedTemporaryFile(delete=True) as f:\n art = self.extract(f.name, item)\n\n if art:\n is_windows = platform.system() == \"Windows\"\n\n # Converting images to grayscale tends to minimize the weight\n # of colors in the diff score.\n convert_proc = subprocess.Popen(\n ['convert', syspath(imagepath), syspath(art),\n '-colorspace', 'gray', 'MIFF:-'],\n stdout=subprocess.PIPE,\n close_fds=not is_windows,\n )\n compare_proc = subprocess.Popen(\n ['compare', '-metric', 'PHASH', '-', 'null:'],\n stdin=convert_proc.stdout,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n close_fds=not is_windows,\n )\n convert_proc.stdout.close()\n\n stdout, stderr = compare_proc.communicate()\n if compare_proc.returncode:\n if compare_proc.returncode != 1:\n self._log.debug(u'IM phashes compare failed for {0}, '\n u'{1}', displayable_path(imagepath),\n displayable_path(art))\n return\n out_str = stderr\n else:\n out_str = stdout\n\n try:\n phash_diff = float(out_str)\n except ValueError:\n self._log.debug(u'IM output is not a number: {0!r}',\n out_str)\n return\n\n self._log.debug(u'compare PHASH score is {0}', phash_diff)\n return phash_diff <= compare_threshold\n\n return True\n\n def _mediafile_image(self, image_path, maxwidth=None):\n \"\"\"Return a `mediafile.Image` object for the path.\n \"\"\"\n\n with open(syspath(image_path), 'rb') as f:\n data = f.read()\n return mediafile.Image(data, type=mediafile.ImageType.front)\n\n def get_art(self, item):\n # Extract the art.\n try:\n mf = mediafile.MediaFile(syspath(item.path))\n except mediafile.UnreadableFileError as exc:\n self._log.warning(u'Could not extract art from {0}: {1}',\n displayable_path(item.path), exc)\n return\n\n return mf.art\n\n # 'extractart' command.\n\n def extract(self, outpath, item):\n art = self.get_art(item)\n\n if not art:\n self._log.info(u'No album art present in {0}, skipping.', item)\n return\n\n # Add an extension to the filename.\n ext = imghdr.what(None, h=art)\n if not ext:\n self._log.warning(u'Unknown image type in {0}.',\n displayable_path(item.path))\n return\n outpath += '.' + ext\n\n self._log.info(u'Extracting album art from: {0} to: {1}',\n item, displayable_path(outpath))\n with open(syspath(outpath), 'wb') as f:\n f.write(art)\n return outpath\n\n # 'clearart' command.\n def clear(self, lib, query):\n id3v23 = config['id3v23'].get(bool)\n\n items = lib.items(query)\n self._log.info(u'Clearing album art from {0} items', len(items))\n for item in items:\n self._log.debug(u'Clearing art for {0}', item)\n try:\n mf = mediafile.MediaFile(syspath(item.path), id3v23)\n except mediafile.UnreadableFileError as exc:\n self._log.warning(u'Could not read file {0}: {1}',\n displayable_path(item.path), exc)\n else:\n del mf.art\n mf.save()\n", "path": "beetsplug/embedart.py"}], "after_files": [{"content": "# This file is part of beets.\n# Copyright 2015, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Allows beets to embed album art into file metadata.\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport os.path\nimport imghdr\nimport subprocess\nimport platform\nfrom tempfile import NamedTemporaryFile\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import mediafile\nfrom beets import ui\nfrom beets.ui import decargs\nfrom beets.util import syspath, normpath, displayable_path\nfrom beets.util.artresizer import ArtResizer\nfrom beets import config\n\n\nclass EmbedCoverArtPlugin(BeetsPlugin):\n \"\"\"Allows albumart to be embedded into the actual files.\n \"\"\"\n def __init__(self):\n super(EmbedCoverArtPlugin, self).__init__()\n self.config.add({\n 'maxwidth': 0,\n 'auto': True,\n 'compare_threshold': 0,\n 'ifempty': False,\n })\n\n if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:\n self.config['maxwidth'] = 0\n self._log.warning(u\"ImageMagick or PIL not found; \"\n u\"'maxwidth' option ignored\")\n if self.config['compare_threshold'].get(int) and not \\\n ArtResizer.shared.can_compare:\n self.config['compare_threshold'] = 0\n self._log.warning(u\"ImageMagick 6.8.7 or higher not installed; \"\n u\"'compare_threshold' option ignored\")\n\n self.register_listener('album_imported', self.album_imported)\n\n def commands(self):\n # Embed command.\n embed_cmd = ui.Subcommand(\n 'embedart', help='embed image files into file metadata'\n )\n embed_cmd.parser.add_option(\n '-f', '--file', metavar='PATH', help='the image file to embed'\n )\n maxwidth = self.config['maxwidth'].get(int)\n compare_threshold = self.config['compare_threshold'].get(int)\n ifempty = self.config['ifempty'].get(bool)\n\n def embed_func(lib, opts, args):\n if opts.file:\n imagepath = normpath(opts.file)\n if not os.path.isfile(syspath(imagepath)):\n raise ui.UserError(u'image file {0} not found'.format(\n displayable_path(imagepath)\n ))\n for item in lib.items(decargs(args)):\n self.embed_item(item, imagepath, maxwidth, None,\n compare_threshold, ifempty)\n else:\n for album in lib.albums(decargs(args)):\n self.embed_album(album, maxwidth)\n\n embed_cmd.func = embed_func\n\n # Extract command.\n extract_cmd = ui.Subcommand('extractart',\n help='extract an image from file metadata')\n extract_cmd.parser.add_option('-o', dest='outpath',\n help='image output file')\n extract_cmd.parser.add_option('-n', dest='filename',\n help='image filename to create for all '\n 'matched albums')\n extract_cmd.parser.add_option('-a', dest='associate',\n action='store_true',\n help='associate the extracted images '\n 'with the album')\n\n def extract_func(lib, opts, args):\n if opts.outpath:\n self.extract_first(normpath(opts.outpath),\n lib.items(decargs(args)))\n else:\n filename = opts.filename or config['art_filename'].get()\n if os.path.dirname(filename) != '':\n self._log.error(u\"Only specify a name rather than a path \"\n u\"for -n\")\n return\n for album in lib.albums(decargs(args)):\n artpath = normpath(os.path.join(album.path, filename))\n artpath = self.extract_first(artpath, album.items())\n if artpath and opts.associate:\n album.set_art(artpath)\n album.store()\n extract_cmd.func = extract_func\n\n # Clear command.\n clear_cmd = ui.Subcommand('clearart',\n help='remove images from file metadata')\n\n def clear_func(lib, opts, args):\n self.clear(lib, decargs(args))\n clear_cmd.func = clear_func\n\n return [embed_cmd, extract_cmd, clear_cmd]\n\n def album_imported(self, lib, album):\n \"\"\"Automatically embed art into imported albums.\n \"\"\"\n if album.artpath and self.config['auto']:\n max_width = self.config['maxwidth'].get(int)\n self.embed_album(album, max_width, True)\n\n def embed_item(self, item, imagepath, maxwidth=None, itempath=None,\n compare_threshold=0, ifempty=False, as_album=False):\n \"\"\"Embed an image into the item's media file.\n \"\"\"\n if compare_threshold:\n if not self.check_art_similarity(item, imagepath,\n compare_threshold):\n self._log.info(u'Image not similar; skipping.')\n return\n if ifempty and self.get_art(item):\n self._log.info(u'media file already contained art')\n return\n if maxwidth and not as_album:\n imagepath = self.resize_image(imagepath, maxwidth)\n\n try:\n self._log.debug(u'embedding {0}', displayable_path(imagepath))\n item['images'] = [self._mediafile_image(imagepath, maxwidth)]\n except IOError as exc:\n self._log.warning(u'could not read image file: {0}', exc)\n else:\n # We don't want to store the image in the database.\n item.try_write(itempath)\n del item['images']\n\n def embed_album(self, album, maxwidth=None, quiet=False):\n \"\"\"Embed album art into all of the album's items.\n \"\"\"\n imagepath = album.artpath\n if not imagepath:\n self._log.info(u'No album art present for {0}', album)\n return\n if not os.path.isfile(syspath(imagepath)):\n self._log.info(u'Album art not found at {0} for {1}',\n displayable_path(imagepath), album)\n return\n if maxwidth:\n imagepath = self.resize_image(imagepath, maxwidth)\n\n self._log.info(u'Embedding album art into {0}', album)\n\n for item in album.items():\n thresh = self.config['compare_threshold'].get(int)\n ifempty = self.config['ifempty'].get(bool)\n self.embed_item(item, imagepath, maxwidth, None,\n thresh, ifempty, as_album=True)\n\n def resize_image(self, imagepath, maxwidth):\n \"\"\"Returns path to an image resized to maxwidth.\n \"\"\"\n self._log.debug(u'Resizing album art to {0} pixels wide', maxwidth)\n imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath))\n return imagepath\n\n def check_art_similarity(self, item, imagepath, compare_threshold):\n \"\"\"A boolean indicating if an image is similar to embedded item art.\n \"\"\"\n with NamedTemporaryFile(delete=True) as f:\n art = self.extract(f.name, item)\n\n if art:\n is_windows = platform.system() == \"Windows\"\n\n # Converting images to grayscale tends to minimize the weight\n # of colors in the diff score.\n convert_proc = subprocess.Popen(\n ['convert', syspath(imagepath), syspath(art),\n '-colorspace', 'gray', 'MIFF:-'],\n stdout=subprocess.PIPE,\n close_fds=not is_windows,\n )\n compare_proc = subprocess.Popen(\n ['compare', '-metric', 'PHASH', '-', 'null:'],\n stdin=convert_proc.stdout,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n close_fds=not is_windows,\n )\n convert_proc.stdout.close()\n\n stdout, stderr = compare_proc.communicate()\n if compare_proc.returncode:\n if compare_proc.returncode != 1:\n self._log.debug(u'IM phashes compare failed for {0}, '\n u'{1}', displayable_path(imagepath),\n displayable_path(art))\n return\n out_str = stderr\n else:\n out_str = stdout\n\n try:\n phash_diff = float(out_str)\n except ValueError:\n self._log.debug(u'IM output is not a number: {0!r}',\n out_str)\n return\n\n self._log.debug(u'compare PHASH score is {0}', phash_diff)\n return phash_diff <= compare_threshold\n\n return True\n\n def _mediafile_image(self, image_path, maxwidth=None):\n \"\"\"Return a `mediafile.Image` object for the path.\n \"\"\"\n\n with open(syspath(image_path), 'rb') as f:\n data = f.read()\n return mediafile.Image(data, type=mediafile.ImageType.front)\n\n def get_art(self, item):\n # Extract the art.\n try:\n mf = mediafile.MediaFile(syspath(item.path))\n except mediafile.UnreadableFileError as exc:\n self._log.warning(u'Could not extract art from {0}: {1}',\n displayable_path(item.path), exc)\n return\n\n return mf.art\n\n # 'extractart' command.\n def extract(self, outpath, item):\n art = self.get_art(item)\n\n if not art:\n self._log.info(u'No album art present in {0}, skipping.', item)\n return\n\n # Add an extension to the filename.\n ext = imghdr.what(None, h=art)\n if not ext:\n self._log.warning(u'Unknown image type in {0}.',\n displayable_path(item.path))\n return\n outpath += '.' + ext\n\n self._log.info(u'Extracting album art from: {0} to: {1}',\n item, displayable_path(outpath))\n with open(syspath(outpath), 'wb') as f:\n f.write(art)\n return outpath\n\n def extract_first(self, outpath, items):\n for item in items:\n real_path = self.extract(outpath, item)\n if real_path:\n return real_path\n\n # 'clearart' command.\n def clear(self, lib, query):\n id3v23 = config['id3v23'].get(bool)\n\n items = lib.items(query)\n self._log.info(u'Clearing album art from {0} items', len(items))\n for item in items:\n self._log.debug(u'Clearing art for {0}', item)\n try:\n mf = mediafile.MediaFile(syspath(item.path), id3v23)\n except mediafile.UnreadableFileError as exc:\n self._log.warning(u'Could not read file {0}: {1}',\n displayable_path(item.path), exc)\n else:\n del mf.art\n mf.save()\n", "path": "beetsplug/embedart.py"}]} | 3,379 | 515 |
gh_patches_debug_30558 | rasdani/github-patches | git_diff | ManimCommunity__manim-1847 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow `Cross` to be initialized without being based on an existing Mobject.
## Enhancement proposal
<!-- Add a clear and concise description of your enhancement proposal. In particular,
if your enhancement introduces changes to the API, illustrate them with
(fictional) code examples. -->
Currently Cross needs a mobject passed to it, I think it should be reworked to not necessarily require this. Also maybe easily specifying the length of the cross in the `__init__` too?
Current `Cross` code:
```py
class Cross(VGroup):
def __init__(self, mobject, stroke_color=RED, stroke_width=6, **kwargs):
VGroup.__init__(
self,
Line(UP + LEFT, DOWN + RIGHT),
Line(UP + RIGHT, DOWN + LEFT),
)
self.replace(mobject, stretch=True)
self.set_stroke(color=stroke_color, width=stroke_width)
```
## Additional comments
<!-- Add further context that you think might be relevant. -->
replacing `VGroup.__init__` with `super().__init__` too
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/mobject/shape_matchers.py`
Content:
```
1 """Mobjects used to mark and annotate other mobjects."""
2
3 __all__ = ["SurroundingRectangle", "BackgroundRectangle", "Cross", "Underline"]
4
5
6 from ..constants import *
7 from ..mobject.geometry import Line, Rectangle
8 from ..mobject.types.vectorized_mobject import VGroup, VMobject
9 from ..utils.color import BLACK, RED, YELLOW, Color
10
11
12 class SurroundingRectangle(Rectangle):
13 r"""A rectangle surrounding a :class:`~.Mobject`
14
15 Examples
16 --------
17
18 .. manim:: SurroundingRectExample
19 :save_last_frame:
20
21 class SurroundingRectExample(Scene):
22 def construct(self):
23 title = Title("A Quote from Newton")
24 quote = Text(
25 "If I have seen further than others, \n"
26 "it is by standing upon the shoulders of giants.",
27 color=BLUE
28 ).scale(0.75)
29 box = SurroundingRectangle(quote, color=YELLOW, buff=MED_LARGE_BUFF)
30 self.add(title, box, quote)
31
32 """
33
34 def __init__(self, mobject, color=YELLOW, buff=SMALL_BUFF, **kwargs):
35 self.color = color
36 self.buff = buff
37 Rectangle.__init__(
38 self,
39 color=color,
40 width=mobject.width + 2 * self.buff,
41 height=mobject.height + 2 * self.buff,
42 **kwargs
43 )
44 self.move_to(mobject)
45
46
47 class BackgroundRectangle(SurroundingRectangle):
48 """A background rectangle
49
50 Examples
51 --------
52
53 .. manim:: ExampleBackgroundRectangle
54 :save_last_frame:
55
56 class ExampleBackgroundRectangle(Scene):
57 def construct(self):
58 circle = Circle().shift(LEFT)
59 circle.set_stroke(color=GREEN, width=20)
60 triangle = Triangle().shift(2 * RIGHT)
61 triangle.set_fill(PINK, opacity=0.5)
62 backgroundRectangle1 = BackgroundRectangle(circle, color=WHITE, fill_opacity=0.15)
63 backgroundRectangle2 = BackgroundRectangle(triangle, color=WHITE, fill_opacity=0.15)
64 self.add(backgroundRectangle1)
65 self.add(backgroundRectangle2)
66 self.add(circle)
67 self.add(triangle)
68 self.play(Rotate(backgroundRectangle1, PI / 4))
69 self.play(Rotate(backgroundRectangle2, PI / 2))
70 """
71
72 def __init__(
73 self,
74 mobject,
75 color=BLACK,
76 stroke_width=0,
77 stroke_opacity=0,
78 fill_opacity=0.75,
79 buff=0,
80 **kwargs
81 ):
82 SurroundingRectangle.__init__(
83 self,
84 mobject,
85 color=color,
86 stroke_width=stroke_width,
87 stroke_opacity=stroke_opacity,
88 fill_opacity=fill_opacity,
89 buff=buff,
90 **kwargs
91 )
92 self.original_fill_opacity = self.fill_opacity
93
94 def pointwise_become_partial(self, mobject, a, b):
95 self.set_fill(opacity=b * self.original_fill_opacity)
96 return self
97
98 def set_style(
99 self,
100 stroke_color=None,
101 stroke_width=None,
102 fill_color=None,
103 fill_opacity=None,
104 family=True,
105 ):
106 # Unchangeable style, except for fill_opacity
107 super().set_style(
108 stroke_color=BLACK,
109 stroke_width=0,
110 fill_color=BLACK,
111 fill_opacity=fill_opacity,
112 )
113 return self
114
115 def get_fill_color(self):
116 return Color(self.color)
117
118
119 class Cross(VGroup):
120 def __init__(self, mobject, stroke_color=RED, stroke_width=6, **kwargs):
121 VGroup.__init__(
122 self,
123 Line(UP + LEFT, DOWN + RIGHT),
124 Line(UP + RIGHT, DOWN + LEFT),
125 )
126 self.replace(mobject, stretch=True)
127 self.set_stroke(color=stroke_color, width=stroke_width)
128
129
130 class Underline(Line):
131 """Creates an underline.
132
133 Parameters
134 ----------
135 Line
136 The underline.
137
138 Examples
139 --------
140 .. manim:: UnderLine
141 :save_last_frame:
142
143 class UnderLine(Scene):
144 def construct(self):
145 man = Tex("Manim") # Full Word
146 ul = Underline(man) # Underlining the word
147 self.add(man, ul)
148 """
149
150 def __init__(self, mobject, buff=SMALL_BUFF, **kwargs):
151 super().__init__(LEFT, RIGHT, buff=buff, **kwargs)
152 self.match_width(mobject)
153 self.next_to(mobject, DOWN, buff=self.buff)
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/mobject/shape_matchers.py b/manim/mobject/shape_matchers.py
--- a/manim/mobject/shape_matchers.py
+++ b/manim/mobject/shape_matchers.py
@@ -2,10 +2,12 @@
__all__ = ["SurroundingRectangle", "BackgroundRectangle", "Cross", "Underline"]
+from typing import Optional
from ..constants import *
from ..mobject.geometry import Line, Rectangle
-from ..mobject.types.vectorized_mobject import VGroup, VMobject
+from ..mobject.mobject import Mobject
+from ..mobject.types.vectorized_mobject import VGroup
from ..utils.color import BLACK, RED, YELLOW, Color
@@ -117,13 +119,44 @@
class Cross(VGroup):
- def __init__(self, mobject, stroke_color=RED, stroke_width=6, **kwargs):
- VGroup.__init__(
- self,
- Line(UP + LEFT, DOWN + RIGHT),
- Line(UP + RIGHT, DOWN + LEFT),
+ """Creates a cross.
+
+ Parameters
+ ----------
+ mobject
+ The mobject linked to this instance. It fits the mobject when specified. Defaults to None.
+ stroke_color
+ Specifies the color of the cross lines. Defaults to RED.
+ stroke_width
+ Specifies the width of the cross lines. Defaults to 6.
+ scale_factor
+ Scales the cross to the provided units. Defaults to 1.
+
+ Examples
+ --------
+ .. manim:: ExampleCross
+ :save_last_frame:
+
+ class ExampleCross(Scene):
+ def construct(self):
+ cross = Cross()
+ self.add(cross)
+ """
+
+ def __init__(
+ self,
+ mobject: Optional["Mobject"] = None,
+ stroke_color: Color = RED,
+ stroke_width: float = 6,
+ scale_factor: float = 1,
+ **kwargs
+ ):
+ super().__init__(
+ Line(UP + LEFT, DOWN + RIGHT), Line(UP + RIGHT, DOWN + LEFT), **kwargs
)
- self.replace(mobject, stretch=True)
+ if mobject is not None:
+ self.replace(mobject, stretch=True)
+ self.scale(scale_factor)
self.set_stroke(color=stroke_color, width=stroke_width)
| {"golden_diff": "diff --git a/manim/mobject/shape_matchers.py b/manim/mobject/shape_matchers.py\n--- a/manim/mobject/shape_matchers.py\n+++ b/manim/mobject/shape_matchers.py\n@@ -2,10 +2,12 @@\n \n __all__ = [\"SurroundingRectangle\", \"BackgroundRectangle\", \"Cross\", \"Underline\"]\n \n+from typing import Optional\n \n from ..constants import *\n from ..mobject.geometry import Line, Rectangle\n-from ..mobject.types.vectorized_mobject import VGroup, VMobject\n+from ..mobject.mobject import Mobject\n+from ..mobject.types.vectorized_mobject import VGroup\n from ..utils.color import BLACK, RED, YELLOW, Color\n \n \n@@ -117,13 +119,44 @@\n \n \n class Cross(VGroup):\n- def __init__(self, mobject, stroke_color=RED, stroke_width=6, **kwargs):\n- VGroup.__init__(\n- self,\n- Line(UP + LEFT, DOWN + RIGHT),\n- Line(UP + RIGHT, DOWN + LEFT),\n+ \"\"\"Creates a cross.\n+\n+ Parameters\n+ ----------\n+ mobject\n+ The mobject linked to this instance. It fits the mobject when specified. Defaults to None.\n+ stroke_color\n+ Specifies the color of the cross lines. Defaults to RED.\n+ stroke_width\n+ Specifies the width of the cross lines. Defaults to 6.\n+ scale_factor\n+ Scales the cross to the provided units. Defaults to 1.\n+\n+ Examples\n+ --------\n+ .. manim:: ExampleCross\n+ :save_last_frame:\n+\n+ class ExampleCross(Scene):\n+ def construct(self):\n+ cross = Cross()\n+ self.add(cross)\n+ \"\"\"\n+\n+ def __init__(\n+ self,\n+ mobject: Optional[\"Mobject\"] = None,\n+ stroke_color: Color = RED,\n+ stroke_width: float = 6,\n+ scale_factor: float = 1,\n+ **kwargs\n+ ):\n+ super().__init__(\n+ Line(UP + LEFT, DOWN + RIGHT), Line(UP + RIGHT, DOWN + LEFT), **kwargs\n )\n- self.replace(mobject, stretch=True)\n+ if mobject is not None:\n+ self.replace(mobject, stretch=True)\n+ self.scale(scale_factor)\n self.set_stroke(color=stroke_color, width=stroke_width)\n", "issue": "Allow `Cross` to be initialized without being based on an existing Mobject.\n## Enhancement proposal\r\n<!-- Add a clear and concise description of your enhancement proposal. In particular,\r\n if your enhancement introduces changes to the API, illustrate them with\r\n (fictional) code examples. -->\r\n\r\nCurrently Cross needs a mobject passed to it, I think it should be reworked to not necessarily require this. Also maybe easily specifying the length of the cross in the `__init__` too?\r\n\r\n\r\nCurrent `Cross` code:\r\n```py\r\nclass Cross(VGroup):\r\n def __init__(self, mobject, stroke_color=RED, stroke_width=6, **kwargs):\r\n VGroup.__init__(\r\n self,\r\n Line(UP + LEFT, DOWN + RIGHT),\r\n Line(UP + RIGHT, DOWN + LEFT),\r\n )\r\n self.replace(mobject, stretch=True)\r\n self.set_stroke(color=stroke_color, width=stroke_width)\r\n```\r\n\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant. -->\r\nreplacing `VGroup.__init__` with `super().__init__` too \r\n\n", "before_files": [{"content": "\"\"\"Mobjects used to mark and annotate other mobjects.\"\"\"\n\n__all__ = [\"SurroundingRectangle\", \"BackgroundRectangle\", \"Cross\", \"Underline\"]\n\n\nfrom ..constants import *\nfrom ..mobject.geometry import Line, Rectangle\nfrom ..mobject.types.vectorized_mobject import VGroup, VMobject\nfrom ..utils.color import BLACK, RED, YELLOW, Color\n\n\nclass SurroundingRectangle(Rectangle):\n r\"\"\"A rectangle surrounding a :class:`~.Mobject`\n\n Examples\n --------\n\n .. manim:: SurroundingRectExample\n :save_last_frame:\n\n class SurroundingRectExample(Scene):\n def construct(self):\n title = Title(\"A Quote from Newton\")\n quote = Text(\n \"If I have seen further than others, \\n\"\n \"it is by standing upon the shoulders of giants.\",\n color=BLUE\n ).scale(0.75)\n box = SurroundingRectangle(quote, color=YELLOW, buff=MED_LARGE_BUFF)\n self.add(title, box, quote)\n\n \"\"\"\n\n def __init__(self, mobject, color=YELLOW, buff=SMALL_BUFF, **kwargs):\n self.color = color\n self.buff = buff\n Rectangle.__init__(\n self,\n color=color,\n width=mobject.width + 2 * self.buff,\n height=mobject.height + 2 * self.buff,\n **kwargs\n )\n self.move_to(mobject)\n\n\nclass BackgroundRectangle(SurroundingRectangle):\n \"\"\"A background rectangle\n\n Examples\n --------\n\n .. manim:: ExampleBackgroundRectangle\n :save_last_frame:\n\n class ExampleBackgroundRectangle(Scene):\n def construct(self):\n circle = Circle().shift(LEFT)\n circle.set_stroke(color=GREEN, width=20)\n triangle = Triangle().shift(2 * RIGHT)\n triangle.set_fill(PINK, opacity=0.5)\n backgroundRectangle1 = BackgroundRectangle(circle, color=WHITE, fill_opacity=0.15)\n backgroundRectangle2 = BackgroundRectangle(triangle, color=WHITE, fill_opacity=0.15)\n self.add(backgroundRectangle1)\n self.add(backgroundRectangle2)\n self.add(circle)\n self.add(triangle)\n self.play(Rotate(backgroundRectangle1, PI / 4))\n self.play(Rotate(backgroundRectangle2, PI / 2))\n \"\"\"\n\n def __init__(\n self,\n mobject,\n color=BLACK,\n stroke_width=0,\n stroke_opacity=0,\n fill_opacity=0.75,\n buff=0,\n **kwargs\n ):\n SurroundingRectangle.__init__(\n self,\n mobject,\n color=color,\n stroke_width=stroke_width,\n stroke_opacity=stroke_opacity,\n fill_opacity=fill_opacity,\n buff=buff,\n **kwargs\n )\n self.original_fill_opacity = self.fill_opacity\n\n def pointwise_become_partial(self, mobject, a, b):\n self.set_fill(opacity=b * self.original_fill_opacity)\n return self\n\n def set_style(\n self,\n stroke_color=None,\n stroke_width=None,\n fill_color=None,\n fill_opacity=None,\n family=True,\n ):\n # Unchangeable style, except for fill_opacity\n super().set_style(\n stroke_color=BLACK,\n stroke_width=0,\n fill_color=BLACK,\n fill_opacity=fill_opacity,\n )\n return self\n\n def get_fill_color(self):\n return Color(self.color)\n\n\nclass Cross(VGroup):\n def __init__(self, mobject, stroke_color=RED, stroke_width=6, **kwargs):\n VGroup.__init__(\n self,\n Line(UP + LEFT, DOWN + RIGHT),\n Line(UP + RIGHT, DOWN + LEFT),\n )\n self.replace(mobject, stretch=True)\n self.set_stroke(color=stroke_color, width=stroke_width)\n\n\nclass Underline(Line):\n \"\"\"Creates an underline.\n\n Parameters\n ----------\n Line\n The underline.\n\n Examples\n --------\n .. manim:: UnderLine\n :save_last_frame:\n\n class UnderLine(Scene):\n def construct(self):\n man = Tex(\"Manim\") # Full Word\n ul = Underline(man) # Underlining the word\n self.add(man, ul)\n \"\"\"\n\n def __init__(self, mobject, buff=SMALL_BUFF, **kwargs):\n super().__init__(LEFT, RIGHT, buff=buff, **kwargs)\n self.match_width(mobject)\n self.next_to(mobject, DOWN, buff=self.buff)\n", "path": "manim/mobject/shape_matchers.py"}], "after_files": [{"content": "\"\"\"Mobjects used to mark and annotate other mobjects.\"\"\"\n\n__all__ = [\"SurroundingRectangle\", \"BackgroundRectangle\", \"Cross\", \"Underline\"]\n\nfrom typing import Optional\n\nfrom ..constants import *\nfrom ..mobject.geometry import Line, Rectangle\nfrom ..mobject.mobject import Mobject\nfrom ..mobject.types.vectorized_mobject import VGroup\nfrom ..utils.color import BLACK, RED, YELLOW, Color\n\n\nclass SurroundingRectangle(Rectangle):\n r\"\"\"A rectangle surrounding a :class:`~.Mobject`\n\n Examples\n --------\n\n .. manim:: SurroundingRectExample\n :save_last_frame:\n\n class SurroundingRectExample(Scene):\n def construct(self):\n title = Title(\"A Quote from Newton\")\n quote = Text(\n \"If I have seen further than others, \\n\"\n \"it is by standing upon the shoulders of giants.\",\n color=BLUE\n ).scale(0.75)\n box = SurroundingRectangle(quote, color=YELLOW, buff=MED_LARGE_BUFF)\n self.add(title, box, quote)\n\n \"\"\"\n\n def __init__(self, mobject, color=YELLOW, buff=SMALL_BUFF, **kwargs):\n self.color = color\n self.buff = buff\n Rectangle.__init__(\n self,\n color=color,\n width=mobject.width + 2 * self.buff,\n height=mobject.height + 2 * self.buff,\n **kwargs\n )\n self.move_to(mobject)\n\n\nclass BackgroundRectangle(SurroundingRectangle):\n \"\"\"A background rectangle\n\n Examples\n --------\n\n .. manim:: ExampleBackgroundRectangle\n :save_last_frame:\n\n class ExampleBackgroundRectangle(Scene):\n def construct(self):\n circle = Circle().shift(LEFT)\n circle.set_stroke(color=GREEN, width=20)\n triangle = Triangle().shift(2 * RIGHT)\n triangle.set_fill(PINK, opacity=0.5)\n backgroundRectangle1 = BackgroundRectangle(circle, color=WHITE, fill_opacity=0.15)\n backgroundRectangle2 = BackgroundRectangle(triangle, color=WHITE, fill_opacity=0.15)\n self.add(backgroundRectangle1)\n self.add(backgroundRectangle2)\n self.add(circle)\n self.add(triangle)\n self.play(Rotate(backgroundRectangle1, PI / 4))\n self.play(Rotate(backgroundRectangle2, PI / 2))\n \"\"\"\n\n def __init__(\n self,\n mobject,\n color=BLACK,\n stroke_width=0,\n stroke_opacity=0,\n fill_opacity=0.75,\n buff=0,\n **kwargs\n ):\n SurroundingRectangle.__init__(\n self,\n mobject,\n color=color,\n stroke_width=stroke_width,\n stroke_opacity=stroke_opacity,\n fill_opacity=fill_opacity,\n buff=buff,\n **kwargs\n )\n self.original_fill_opacity = self.fill_opacity\n\n def pointwise_become_partial(self, mobject, a, b):\n self.set_fill(opacity=b * self.original_fill_opacity)\n return self\n\n def set_style(\n self,\n stroke_color=None,\n stroke_width=None,\n fill_color=None,\n fill_opacity=None,\n family=True,\n ):\n # Unchangeable style, except for fill_opacity\n super().set_style(\n stroke_color=BLACK,\n stroke_width=0,\n fill_color=BLACK,\n fill_opacity=fill_opacity,\n )\n return self\n\n def get_fill_color(self):\n return Color(self.color)\n\n\nclass Cross(VGroup):\n \"\"\"Creates a cross.\n\n Parameters\n ----------\n mobject\n The mobject linked to this instance. It fits the mobject when specified. Defaults to None.\n stroke_color\n Specifies the color of the cross lines. Defaults to RED.\n stroke_width\n Specifies the width of the cross lines. Defaults to 6.\n scale_factor\n Scales the cross to the provided units. Defaults to 1.\n\n Examples\n --------\n .. manim:: ExampleCross\n :save_last_frame:\n\n class ExampleCross(Scene):\n def construct(self):\n cross = Cross()\n self.add(cross)\n \"\"\"\n\n def __init__(\n self,\n mobject: Optional[\"Mobject\"] = None,\n stroke_color: Color = RED,\n stroke_width: float = 6,\n scale_factor: float = 1,\n **kwargs\n ):\n super().__init__(\n Line(UP + LEFT, DOWN + RIGHT), Line(UP + RIGHT, DOWN + LEFT), **kwargs\n )\n if mobject is not None:\n self.replace(mobject, stretch=True)\n self.scale(scale_factor)\n self.set_stroke(color=stroke_color, width=stroke_width)\n\n\nclass Underline(Line):\n \"\"\"Creates an underline.\n\n Parameters\n ----------\n Line\n The underline.\n\n Examples\n --------\n .. manim:: UnderLine\n :save_last_frame:\n\n class UnderLine(Scene):\n def construct(self):\n man = Tex(\"Manim\") # Full Word\n ul = Underline(man) # Underlining the word\n self.add(man, ul)\n \"\"\"\n\n def __init__(self, mobject, buff=SMALL_BUFF, **kwargs):\n super().__init__(LEFT, RIGHT, buff=buff, **kwargs)\n self.match_width(mobject)\n self.next_to(mobject, DOWN, buff=self.buff)\n", "path": "manim/mobject/shape_matchers.py"}]} | 1,840 | 541 |
gh_patches_debug_15397 | rasdani/github-patches | git_diff | crytic__slither-1945 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
similar variables detector is extremely slow
This detector makes up the majority of runtime on large codebases
```
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 41.286 41.286 /Users/alpharush/tob/slither/slither/__main__.py:81(process_all)
1 0.000 0.000 39.059 39.059 /Users/alpharush/tob/slither/slither/__main__.py:58(process_single)
1 0.000 0.000 33.319 33.319 /Users/alpharush/tob/slither/slither/__main__.py:111(_process)
1 0.000 0.000 33.317 33.317 /Users/alpharush/tob/slither/slither/slither.py:198(run_detectors)
1 0.000 0.000 33.317 33.317 /Users/alpharush/tob/slither/slither/slither.py:204(<listcomp>)
84 0.001 0.000 33.317 0.397 /Users/alpharush/tob/slither/slither/detectors/abstract_detector.py:176(detect)
1 0.000 0.000 31.215 31.215 /Users/alpharush/tob/slither/slither/detectors/variables/similar_variables.py:72(_detect)
```
https://github.com/crytic/slither/blob/master/slither/detectors/variables/similar_variables.py#L63-L66
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/detectors/variables/similar_variables.py`
Content:
```
1 """
2 Check for state variables too similar
3 Do not check contract inheritance
4 """
5 import difflib
6 from typing import List, Set, Tuple
7
8 from slither.core.declarations.contract import Contract
9 from slither.core.variables.local_variable import LocalVariable
10 from slither.detectors.abstract_detector import (
11 AbstractDetector,
12 DetectorClassification,
13 DETECTOR_INFO,
14 )
15 from slither.utils.output import Output
16
17
18 class SimilarVarsDetection(AbstractDetector):
19 """
20 Variable similar detector
21 """
22
23 ARGUMENT = "similar-names"
24 HELP = "Variable names are too similar"
25 IMPACT = DetectorClassification.INFORMATIONAL
26 CONFIDENCE = DetectorClassification.MEDIUM
27
28 WIKI = (
29 "https://github.com/crytic/slither/wiki/Detector-Documentation#variable-names-too-similar"
30 )
31
32 WIKI_TITLE = "Variable names too similar"
33 WIKI_DESCRIPTION = "Detect variables with names that are too similar."
34 WIKI_EXPLOIT_SCENARIO = "Bob uses several variables with similar names. As a result, his code is difficult to review."
35 WIKI_RECOMMENDATION = "Prevent variables from having similar names."
36
37 @staticmethod
38 def similar(seq1: str, seq2: str) -> bool:
39 """Test the name similarity
40
41 Two name are similar if difflib.SequenceMatcher on the lowercase
42 version of the name is greater than 0.90
43 See: https://docs.python.org/2/library/difflib.html
44 Args:
45 seq1 (str): first name
46 seq2 (str): second name
47 Returns:
48 bool: true if names are similar
49 """
50 if len(seq1) != len(seq2):
51 return False
52 val = difflib.SequenceMatcher(a=seq1.lower(), b=seq2.lower()).ratio()
53 ret = val > 0.90
54 return ret
55
56 @staticmethod
57 def detect_sim(contract: Contract) -> Set[Tuple[LocalVariable, LocalVariable]]:
58 """Detect variables with similar name
59
60 Returns:
61 bool: true if variables have similar name
62 """
63 all_var = [x.variables for x in contract.functions]
64 all_var = [x for l in all_var for x in l]
65
66 contract_var = contract.variables
67
68 all_var = set(all_var + contract_var)
69
70 ret = []
71 for v1 in all_var:
72 for v2 in all_var:
73 if v1.name.lower() != v2.name.lower():
74 if SimilarVarsDetection.similar(v1.name, v2.name):
75 if (v2, v1) not in ret:
76 ret.append((v1, v2))
77
78 return set(ret)
79
80 def _detect(self) -> List[Output]:
81 """Detect similar variables name
82
83 Returns:
84 list: {'vuln', 'filename,'contract','vars'}
85 """
86 results = []
87 for c in self.contracts:
88 allVars = self.detect_sim(c)
89 if allVars:
90 for (v1, v2) in sorted(allVars, key=lambda x: (x[0].name, x[1].name)):
91 v_left = v1 if v1.name < v2.name else v2
92 v_right = v2 if v_left == v1 else v1
93 info: DETECTOR_INFO = [
94 "Variable ",
95 v_left,
96 " is too similar to ",
97 v_right,
98 "\n",
99 ]
100 json = self.generate_result(info)
101 results.append(json)
102 return results
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/slither/detectors/variables/similar_variables.py b/slither/detectors/variables/similar_variables.py
--- a/slither/detectors/variables/similar_variables.py
+++ b/slither/detectors/variables/similar_variables.py
@@ -65,12 +65,16 @@
contract_var = contract.variables
- all_var = set(all_var + contract_var)
+ all_var = list(set(all_var + contract_var))
ret = []
- for v1 in all_var:
- for v2 in all_var:
- if v1.name.lower() != v2.name.lower():
+ # pylint: disable=consider-using-enumerate
+ for i in range(len(all_var)):
+ v1 = all_var[i]
+ _v1_name_lower = v1.name.lower()
+ for j in range(i, len(all_var)):
+ v2 = all_var[j]
+ if _v1_name_lower != v2.name.lower():
if SimilarVarsDetection.similar(v1.name, v2.name):
if (v2, v1) not in ret:
ret.append((v1, v2))
| {"golden_diff": "diff --git a/slither/detectors/variables/similar_variables.py b/slither/detectors/variables/similar_variables.py\n--- a/slither/detectors/variables/similar_variables.py\n+++ b/slither/detectors/variables/similar_variables.py\n@@ -65,12 +65,16 @@\n \n contract_var = contract.variables\n \n- all_var = set(all_var + contract_var)\n+ all_var = list(set(all_var + contract_var))\n \n ret = []\n- for v1 in all_var:\n- for v2 in all_var:\n- if v1.name.lower() != v2.name.lower():\n+ # pylint: disable=consider-using-enumerate\n+ for i in range(len(all_var)):\n+ v1 = all_var[i]\n+ _v1_name_lower = v1.name.lower()\n+ for j in range(i, len(all_var)):\n+ v2 = all_var[j]\n+ if _v1_name_lower != v2.name.lower():\n if SimilarVarsDetection.similar(v1.name, v2.name):\n if (v2, v1) not in ret:\n ret.append((v1, v2))\n", "issue": " similar variables detector is extremely slow\n This detector makes up the majority of runtime on large codebases\r\n```\r\nncalls tottime percall cumtime percall filename:lineno(function)\r\n 1 0.000 0.000 41.286 41.286 /Users/alpharush/tob/slither/slither/__main__.py:81(process_all)\r\n 1 0.000 0.000 39.059 39.059 /Users/alpharush/tob/slither/slither/__main__.py:58(process_single)\r\n 1 0.000 0.000 33.319 33.319 /Users/alpharush/tob/slither/slither/__main__.py:111(_process)\r\n 1 0.000 0.000 33.317 33.317 /Users/alpharush/tob/slither/slither/slither.py:198(run_detectors)\r\n 1 0.000 0.000 33.317 33.317 /Users/alpharush/tob/slither/slither/slither.py:204(<listcomp>)\r\n 84 0.001 0.000 33.317 0.397 /Users/alpharush/tob/slither/slither/detectors/abstract_detector.py:176(detect)\r\n 1 0.000 0.000 31.215 31.215 /Users/alpharush/tob/slither/slither/detectors/variables/similar_variables.py:72(_detect)\r\n\r\n```\r\nhttps://github.com/crytic/slither/blob/master/slither/detectors/variables/similar_variables.py#L63-L66\n", "before_files": [{"content": "\"\"\"\nCheck for state variables too similar\nDo not check contract inheritance\n\"\"\"\nimport difflib\nfrom typing import List, Set, Tuple\n\nfrom slither.core.declarations.contract import Contract\nfrom slither.core.variables.local_variable import LocalVariable\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.utils.output import Output\n\n\nclass SimilarVarsDetection(AbstractDetector):\n \"\"\"\n Variable similar detector\n \"\"\"\n\n ARGUMENT = \"similar-names\"\n HELP = \"Variable names are too similar\"\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = (\n \"https://github.com/crytic/slither/wiki/Detector-Documentation#variable-names-too-similar\"\n )\n\n WIKI_TITLE = \"Variable names too similar\"\n WIKI_DESCRIPTION = \"Detect variables with names that are too similar.\"\n WIKI_EXPLOIT_SCENARIO = \"Bob uses several variables with similar names. As a result, his code is difficult to review.\"\n WIKI_RECOMMENDATION = \"Prevent variables from having similar names.\"\n\n @staticmethod\n def similar(seq1: str, seq2: str) -> bool:\n \"\"\"Test the name similarity\n\n Two name are similar if difflib.SequenceMatcher on the lowercase\n version of the name is greater than 0.90\n See: https://docs.python.org/2/library/difflib.html\n Args:\n seq1 (str): first name\n seq2 (str): second name\n Returns:\n bool: true if names are similar\n \"\"\"\n if len(seq1) != len(seq2):\n return False\n val = difflib.SequenceMatcher(a=seq1.lower(), b=seq2.lower()).ratio()\n ret = val > 0.90\n return ret\n\n @staticmethod\n def detect_sim(contract: Contract) -> Set[Tuple[LocalVariable, LocalVariable]]:\n \"\"\"Detect variables with similar name\n\n Returns:\n bool: true if variables have similar name\n \"\"\"\n all_var = [x.variables for x in contract.functions]\n all_var = [x for l in all_var for x in l]\n\n contract_var = contract.variables\n\n all_var = set(all_var + contract_var)\n\n ret = []\n for v1 in all_var:\n for v2 in all_var:\n if v1.name.lower() != v2.name.lower():\n if SimilarVarsDetection.similar(v1.name, v2.name):\n if (v2, v1) not in ret:\n ret.append((v1, v2))\n\n return set(ret)\n\n def _detect(self) -> List[Output]:\n \"\"\"Detect similar variables name\n\n Returns:\n list: {'vuln', 'filename,'contract','vars'}\n \"\"\"\n results = []\n for c in self.contracts:\n allVars = self.detect_sim(c)\n if allVars:\n for (v1, v2) in sorted(allVars, key=lambda x: (x[0].name, x[1].name)):\n v_left = v1 if v1.name < v2.name else v2\n v_right = v2 if v_left == v1 else v1\n info: DETECTOR_INFO = [\n \"Variable \",\n v_left,\n \" is too similar to \",\n v_right,\n \"\\n\",\n ]\n json = self.generate_result(info)\n results.append(json)\n return results\n", "path": "slither/detectors/variables/similar_variables.py"}], "after_files": [{"content": "\"\"\"\nCheck for state variables too similar\nDo not check contract inheritance\n\"\"\"\nimport difflib\nfrom typing import List, Set, Tuple\n\nfrom slither.core.declarations.contract import Contract\nfrom slither.core.variables.local_variable import LocalVariable\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.utils.output import Output\n\n\nclass SimilarVarsDetection(AbstractDetector):\n \"\"\"\n Variable similar detector\n \"\"\"\n\n ARGUMENT = \"similar-names\"\n HELP = \"Variable names are too similar\"\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = (\n \"https://github.com/crytic/slither/wiki/Detector-Documentation#variable-names-too-similar\"\n )\n\n WIKI_TITLE = \"Variable names too similar\"\n WIKI_DESCRIPTION = \"Detect variables with names that are too similar.\"\n WIKI_EXPLOIT_SCENARIO = \"Bob uses several variables with similar names. As a result, his code is difficult to review.\"\n WIKI_RECOMMENDATION = \"Prevent variables from having similar names.\"\n\n @staticmethod\n def similar(seq1: str, seq2: str) -> bool:\n \"\"\"Test the name similarity\n\n Two name are similar if difflib.SequenceMatcher on the lowercase\n version of the name is greater than 0.90\n See: https://docs.python.org/2/library/difflib.html\n Args:\n seq1 (str): first name\n seq2 (str): second name\n Returns:\n bool: true if names are similar\n \"\"\"\n if len(seq1) != len(seq2):\n return False\n val = difflib.SequenceMatcher(a=seq1.lower(), b=seq2.lower()).ratio()\n ret = val > 0.90\n return ret\n\n @staticmethod\n def detect_sim(contract: Contract) -> Set[Tuple[LocalVariable, LocalVariable]]:\n \"\"\"Detect variables with similar name\n\n Returns:\n bool: true if variables have similar name\n \"\"\"\n all_var = [x.variables for x in contract.functions]\n all_var = [x for l in all_var for x in l]\n\n contract_var = contract.variables\n\n all_var = list(set(all_var + contract_var))\n\n ret = []\n # pylint: disable=consider-using-enumerate\n for i in range(len(all_var)):\n v1 = all_var[i]\n _v1_name_lower = v1.name.lower()\n for j in range(i, len(all_var)):\n v2 = all_var[j]\n if _v1_name_lower != v2.name.lower():\n if SimilarVarsDetection.similar(v1.name, v2.name):\n if (v2, v1) not in ret:\n ret.append((v1, v2))\n\n return set(ret)\n\n def _detect(self) -> List[Output]:\n \"\"\"Detect similar variables name\n\n Returns:\n list: {'vuln', 'filename,'contract','vars'}\n \"\"\"\n results = []\n for c in self.contracts:\n allVars = self.detect_sim(c)\n if allVars:\n for (v1, v2) in sorted(allVars, key=lambda x: (x[0].name, x[1].name)):\n v_left = v1 if v1.name < v2.name else v2\n v_right = v2 if v_left == v1 else v1\n info: DETECTOR_INFO = [\n \"Variable \",\n v_left,\n \" is too similar to \",\n v_right,\n \"\\n\",\n ]\n json = self.generate_result(info)\n results.append(json)\n return results\n", "path": "slither/detectors/variables/similar_variables.py"}]} | 1,704 | 258 |
gh_patches_debug_28132 | rasdani/github-patches | git_diff | bokeh__bokeh-5457 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
If main.py is run by bokeh serve, warn about running with directory name instead
Lots of reports of people running, e.g.
```
bokeh serve --show crossfilter/main.py
```
Which prevents all the features of "directory style" apps from being enabled.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/command/util.py`
Content:
```
1 ''' Provide utility functions for implementing the Bokeh command.
2
3 '''
4 from __future__ import print_function
5
6 import os
7 import sys
8
9 from bokeh.application import Application
10 from bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler
11
12 def die(message):
13 ''' Print an error message and exit.
14
15 Args:
16 message (str) : error message to print
17
18 '''
19 print(message, file=sys.stderr)
20 sys.exit(1)
21
22 def build_single_handler_application(path, argv=None):
23 ''' Return a Bokeh application built using a single handler for a file
24 or directory.
25
26 Args:
27 path (str) : path to a file or directory for creating a Bokeh
28 application.
29 argv (seq[str], optional) : command line arguments to pass to the
30 application handler
31
32 Returns:
33 Application
34
35 Raises:
36 RuntimeError
37
38 '''
39 argv = argv or []
40 path = os.path.abspath(path)
41 if os.path.isdir(path):
42 handler = DirectoryHandler(filename=path, argv=argv)
43 else:
44 if path.endswith(".ipynb"):
45 handler = NotebookHandler(filename=path, argv=argv)
46 elif path.endswith(".py"):
47 handler = ScriptHandler(filename=path, argv=argv)
48 else:
49 raise ValueError("Expected a '.py' script or '.ipynb' notebook, got: '%s'" % path)
50
51 if handler.failed:
52 raise RuntimeError("Error loading %s:\n\n%s\n%s " % (path, handler.error, handler.error_detail))
53
54 application = Application(handler)
55
56 return application
57
58 def build_single_handler_applications(paths, argvs=None):
59 ''' Return a dictionary mapping routes to Bokeh applications built using
60 single handlers, for specified files or directories.
61
62 Args:
63 path (seq[str]) : paths to files or directories for creating Bokeh
64 applications.
65 argvs (dict[str, list[str]], optional) : mapping of paths to command
66 line arguments to pass to the handler for each path
67
68 Returns:
69 dict[str, Application]
70
71 Raises:
72 RuntimeError
73
74 '''
75 applications = {}
76 argvs = {} or argvs
77
78 for path in paths:
79 application = build_single_handler_application(path, argvs.get(path, []))
80
81 route = application.handlers[0].url_path()
82
83 if not route:
84 if '/' in applications:
85 raise RuntimeError("Don't know the URL path to use for %s" % (path))
86 route = '/'
87 applications[route] = application
88
89 return applications
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bokeh/command/util.py b/bokeh/command/util.py
--- a/bokeh/command/util.py
+++ b/bokeh/command/util.py
@@ -5,6 +5,7 @@
import os
import sys
+import warnings
from bokeh.application import Application
from bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler
@@ -19,6 +20,16 @@
print(message, file=sys.stderr)
sys.exit(1)
+DIRSTYLE_MAIN_WARNING = """
+It looks like you might be running the main.py of a directory app directly.
+If this is the case, to enable the features of directory style apps, you must
+call "bokeh serve" on the directory instead. For example:
+
+ bokeh serve my_app_dir/
+
+If this is not the case, renaming main.py will supress this warning.
+"""
+
def build_single_handler_application(path, argv=None):
''' Return a Bokeh application built using a single handler for a file
or directory.
@@ -44,6 +55,8 @@
if path.endswith(".ipynb"):
handler = NotebookHandler(filename=path, argv=argv)
elif path.endswith(".py"):
+ if path.endswith("main.py"):
+ warnings.warn(DIRSTYLE_MAIN_WARNING)
handler = ScriptHandler(filename=path, argv=argv)
else:
raise ValueError("Expected a '.py' script or '.ipynb' notebook, got: '%s'" % path)
| {"golden_diff": "diff --git a/bokeh/command/util.py b/bokeh/command/util.py\n--- a/bokeh/command/util.py\n+++ b/bokeh/command/util.py\n@@ -5,6 +5,7 @@\n \n import os\n import sys\n+import warnings\n \n from bokeh.application import Application\n from bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler\n@@ -19,6 +20,16 @@\n print(message, file=sys.stderr)\n sys.exit(1)\n \n+DIRSTYLE_MAIN_WARNING = \"\"\"\n+It looks like you might be running the main.py of a directory app directly.\n+If this is the case, to enable the features of directory style apps, you must\n+call \"bokeh serve\" on the directory instead. For example:\n+\n+ bokeh serve my_app_dir/\n+\n+If this is not the case, renaming main.py will supress this warning.\n+\"\"\"\n+\n def build_single_handler_application(path, argv=None):\n ''' Return a Bokeh application built using a single handler for a file\n or directory.\n@@ -44,6 +55,8 @@\n if path.endswith(\".ipynb\"):\n handler = NotebookHandler(filename=path, argv=argv)\n elif path.endswith(\".py\"):\n+ if path.endswith(\"main.py\"):\n+ warnings.warn(DIRSTYLE_MAIN_WARNING)\n handler = ScriptHandler(filename=path, argv=argv)\n else:\n raise ValueError(\"Expected a '.py' script or '.ipynb' notebook, got: '%s'\" % path)\n", "issue": "If main.py is run by bokeh serve, warn about running with directory name instead\nLots of reports of people running, e.g.\r\n```\r\nbokeh serve --show crossfilter/main.py\r\n```\r\nWhich prevents all the features of \"directory style\" apps from being enabled. \n", "before_files": [{"content": "''' Provide utility functions for implementing the Bokeh command.\n\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler\n\ndef die(message):\n ''' Print an error message and exit.\n\n Args:\n message (str) : error message to print\n\n '''\n print(message, file=sys.stderr)\n sys.exit(1)\n\ndef build_single_handler_application(path, argv=None):\n ''' Return a Bokeh application built using a single handler for a file\n or directory.\n\n Args:\n path (str) : path to a file or directory for creating a Bokeh\n application.\n argv (seq[str], optional) : command line arguments to pass to the\n application handler\n\n Returns:\n Application\n\n Raises:\n RuntimeError\n\n '''\n argv = argv or []\n path = os.path.abspath(path)\n if os.path.isdir(path):\n handler = DirectoryHandler(filename=path, argv=argv)\n else:\n if path.endswith(\".ipynb\"):\n handler = NotebookHandler(filename=path, argv=argv)\n elif path.endswith(\".py\"):\n handler = ScriptHandler(filename=path, argv=argv)\n else:\n raise ValueError(\"Expected a '.py' script or '.ipynb' notebook, got: '%s'\" % path)\n\n if handler.failed:\n raise RuntimeError(\"Error loading %s:\\n\\n%s\\n%s \" % (path, handler.error, handler.error_detail))\n\n application = Application(handler)\n\n return application\n\ndef build_single_handler_applications(paths, argvs=None):\n ''' Return a dictionary mapping routes to Bokeh applications built using\n single handlers, for specified files or directories.\n\n Args:\n path (seq[str]) : paths to files or directories for creating Bokeh\n applications.\n argvs (dict[str, list[str]], optional) : mapping of paths to command\n line arguments to pass to the handler for each path\n\n Returns:\n dict[str, Application]\n\n Raises:\n RuntimeError\n\n '''\n applications = {}\n argvs = {} or argvs\n\n for path in paths:\n application = build_single_handler_application(path, argvs.get(path, []))\n\n route = application.handlers[0].url_path()\n\n if not route:\n if '/' in applications:\n raise RuntimeError(\"Don't know the URL path to use for %s\" % (path))\n route = '/'\n applications[route] = application\n\n return applications\n", "path": "bokeh/command/util.py"}], "after_files": [{"content": "''' Provide utility functions for implementing the Bokeh command.\n\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport warnings\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler\n\ndef die(message):\n ''' Print an error message and exit.\n\n Args:\n message (str) : error message to print\n\n '''\n print(message, file=sys.stderr)\n sys.exit(1)\n\nDIRSTYLE_MAIN_WARNING = \"\"\"\nIt looks like you might be running the main.py of a directory app directly.\nIf this is the case, to enable the features of directory style apps, you must\ncall \"bokeh serve\" on the directory instead. For example:\n\n bokeh serve my_app_dir/\n\nIf this is not the case, renaming main.py will supress this warning.\n\"\"\"\n\ndef build_single_handler_application(path, argv=None):\n ''' Return a Bokeh application built using a single handler for a file\n or directory.\n\n Args:\n path (str) : path to a file or directory for creating a Bokeh\n application.\n argv (seq[str], optional) : command line arguments to pass to the\n application handler\n\n Returns:\n Application\n\n Raises:\n RuntimeError\n\n '''\n argv = argv or []\n path = os.path.abspath(path)\n if os.path.isdir(path):\n handler = DirectoryHandler(filename=path, argv=argv)\n else:\n if path.endswith(\".ipynb\"):\n handler = NotebookHandler(filename=path, argv=argv)\n elif path.endswith(\".py\"):\n if path.endswith(\"main.py\"):\n warnings.warn(DIRSTYLE_MAIN_WARNING)\n handler = ScriptHandler(filename=path, argv=argv)\n else:\n raise ValueError(\"Expected a '.py' script or '.ipynb' notebook, got: '%s'\" % path)\n\n if handler.failed:\n raise RuntimeError(\"Error loading %s:\\n\\n%s\\n%s \" % (path, handler.error, handler.error_detail))\n\n application = Application(handler)\n\n return application\n\ndef build_single_handler_applications(paths, argvs=None):\n ''' Return a dictionary mapping routes to Bokeh applications built using\n single handlers, for specified files or directories.\n\n Args:\n path (seq[str]) : paths to files or directories for creating Bokeh\n applications.\n argvs (dict[str, list[str]], optional) : mapping of paths to command\n line arguments to pass to the handler for each path\n\n Returns:\n dict[str, Application]\n\n Raises:\n RuntimeError\n\n '''\n applications = {}\n argvs = {} or argvs\n\n for path in paths:\n application = build_single_handler_application(path, argvs.get(path, []))\n\n route = application.handlers[0].url_path()\n\n if not route:\n if '/' in applications:\n raise RuntimeError(\"Don't know the URL path to use for %s\" % (path))\n route = '/'\n applications[route] = application\n\n return applications\n", "path": "bokeh/command/util.py"}]} | 1,037 | 326 |
gh_patches_debug_12174 | rasdani/github-patches | git_diff | privacyidea__privacyidea-1517 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove sub modules
We often have problems, when building plugins. Plugins have another release pace than the server.
We should remove the submodules
* pam_python
* freeradius
* simplesamlphp
* adminclient
and give them their own versioning.
The debian build environment has to be added/moved to those repositories.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from setuptools import setup, find_packages
3 import os
4 import stat
5 import sys
6
7 #VERSION="2.1dev4"
8 VERSION="3.0dev1"
9
10 # Taken from kennethreitz/requests/setup.py
11 package_directory = os.path.realpath(os.path.dirname(__file__))
12
13
14 def get_file_contents(file_path):
15 """Get the context of the file using full path name."""
16 content = ""
17 try:
18 full_path = os.path.join(package_directory, file_path)
19 content = open(full_path, 'r').read()
20 except:
21 print >> sys.stderr, "### could not open file {0!r}".format(file_path)
22 return content
23
24 def get_file_list(file_path):
25 full_path = os.path.join(package_directory, file_path)
26 file_list = os.listdir(full_path)
27 # now we need to add the path to the files
28 return [ file_path + f for f in file_list ]
29
30
31 install_requires = ["Flask>=0.10.1",
32 "Flask-Migrate>=1.2.0",
33 "Flask-SQLAlchemy>=2.0",
34 "Flask-Script>=2.0.5",
35 "Jinja2>=2.7.3",
36 "Mako>=0.9.1",
37 "MarkupSafe>=0.23",
38 "PyMySQL>=0.6.6",
39 "Pillow>=2.6.1",
40 "PyJWT>=1.3.0",
41 "PyYAML>=3.11",
42 "Pygments>=2.0.2",
43 "SQLAlchemy>=1.0.5",
44 "Werkzeug>=0.10.4",
45 "alembic>=0.6.7",
46 "argparse>=1.2.1",
47 "bcrypt>=1.1.0",
48 "beautifulsoup4>=4.3.2",
49 "cffi>=0.8.6",
50 "configobj>=5.0.6",
51 "docutils>=0.12",
52 "funcparserlib>=0.3.6",
53 "itsdangerous>=0.24",
54 "ldap3>=2.5",
55 "netaddr>=0.7.12",
56 "passlib>=1.6.2",
57 "pyasn1>=0.4.2",
58 "pyOpenSSL>=17.5",
59 "pycparser>=2.10",
60 "pycrypto>=2.6.1",
61 "pyrad>=2.0",
62 "pyusb>=1.0.0b2",
63 "qrcode>=5.1",
64 "requests>=2.7.0",
65 "sqlsoup>=0.9.0",
66 "ecdsa>=0.13",
67 "lxml>=3.3",
68 "python-gnupg>=0.3.8",
69 "defusedxml>=0.4.1",
70 "flask-babel>=0.9",
71 "croniter>=0.3.8",
72 "oauth2client>=2.0.1"
73 ]
74
75 # For python 2.6 we need additional dependency importlib
76 try:
77 import importlib
78 except ImportError:
79 install_requires.append('importlib')
80
81
82 def get_man_pages(dir):
83 """
84 Get man pages in a directory.
85 :param dir:
86 :return: list of file names
87 """
88 files = os.listdir(dir)
89 r_files = []
90 for file in files:
91 if file.endswith(".1"):
92 r_files.append(dir + "/" + file)
93 return r_files
94
95
96 def get_scripts(dir):
97 """
98 Get files that are executable
99 :param dir:
100 :return: list of file names
101 """
102 files = os.listdir(dir)
103 r_files = []
104 for file in files:
105 if os.stat(dir + "/" + file)[stat.ST_MODE] & stat.S_IEXEC:
106 r_files.append(dir + "/" + file)
107 return r_files
108
109
110 setup(
111 name='privacyIDEA',
112 version=VERSION,
113 description='privacyIDEA: identity, multifactor authentication (OTP), '
114 'authorization, audit',
115 author='privacyidea.org',
116 license='AGPLv3',
117 author_email='[email protected]',
118 url='http://www.privacyidea.org',
119 keywords='OTP, two factor authentication, management, security',
120 packages=find_packages(),
121 scripts=["pi-manage"] + get_scripts("tools"),
122 extras_require={
123 'dev': ["Sphinx>=1.3.1",
124 "sphinxcontrib-httpdomain>=1.3.0"],
125 'test': ["coverage>=3.7.1",
126 "mock>=1.0.1",
127 "pyparsing>=2.0.3",
128 "nose>=1.3.4",
129 "responses>=0.4.0",
130 "six>=1.8.0"],
131 },
132 install_requires=install_requires,
133 include_package_data=True,
134 data_files=[('etc/privacyidea/',
135 ['deploy/apache/privacyideaapp.wsgi',
136 'deploy/privacyidea/dictionary',
137 'deploy/privacyidea/enckey',
138 'deploy/privacyidea/private.pem',
139 'deploy/privacyidea/public.pem']),
140 ('share/man/man1', get_man_pages("tools")),
141 ('lib/privacyidea/authmodules/FreeRADIUS',
142 ["authmodules/FreeRADIUS/LICENSE",
143 "authmodules/FreeRADIUS/privacyidea_radius.pm"]),
144 ('lib/privacyidea/authmodules/OTRS',
145 ["authmodules/OTRS/privacyIDEA.pm"]),
146 ('lib/privacyidea/migrations',
147 ["migrations/alembic.ini",
148 "migrations/env.py",
149 "migrations/README",
150 "migrations/script.py.mako"]),
151 ('lib/privacyidea/migrations/versions',
152 get_file_list("migrations/versions/"))
153 ],
154 classifiers=["Framework :: Flask",
155 "License :: OSI Approved :: "
156 "GNU Affero General Public License v3",
157 "Programming Language :: Python",
158 "Development Status :: 5 - Production/Stable",
159 "Topic :: Internet",
160 "Topic :: Security",
161 "Topic :: System ::"
162 " Systems Administration :: Authentication/Directory"
163 ],
164 #message_extractors={'privacyidea': [
165 # ('**.py', 'python', None),
166 # ('static/**.html', 'html', {'input_encoding': 'utf-8'})]},
167 zip_safe=False,
168 long_description=get_file_contents('README.rst')
169 )
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -138,9 +138,6 @@
'deploy/privacyidea/private.pem',
'deploy/privacyidea/public.pem']),
('share/man/man1', get_man_pages("tools")),
- ('lib/privacyidea/authmodules/FreeRADIUS',
- ["authmodules/FreeRADIUS/LICENSE",
- "authmodules/FreeRADIUS/privacyidea_radius.pm"]),
('lib/privacyidea/authmodules/OTRS',
["authmodules/OTRS/privacyIDEA.pm"]),
('lib/privacyidea/migrations',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -138,9 +138,6 @@\n 'deploy/privacyidea/private.pem',\n 'deploy/privacyidea/public.pem']),\n ('share/man/man1', get_man_pages(\"tools\")),\n- ('lib/privacyidea/authmodules/FreeRADIUS',\n- [\"authmodules/FreeRADIUS/LICENSE\",\n- \"authmodules/FreeRADIUS/privacyidea_radius.pm\"]),\n ('lib/privacyidea/authmodules/OTRS',\n [\"authmodules/OTRS/privacyIDEA.pm\"]),\n ('lib/privacyidea/migrations',\n", "issue": "Remove sub modules\nWe often have problems, when building plugins. Plugins have another release pace than the server.\r\n\r\nWe should remove the submodules\r\n* pam_python\r\n* freeradius\r\n* simplesamlphp\r\n* adminclient\r\n\r\nand give them their own versioning. \r\nThe debian build environment has to be added/moved to those repositories.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nimport os\nimport stat\nimport sys\n\n#VERSION=\"2.1dev4\"\nVERSION=\"3.0dev1\"\n\n# Taken from kennethreitz/requests/setup.py\npackage_directory = os.path.realpath(os.path.dirname(__file__))\n\n\ndef get_file_contents(file_path):\n \"\"\"Get the context of the file using full path name.\"\"\"\n content = \"\"\n try:\n full_path = os.path.join(package_directory, file_path)\n content = open(full_path, 'r').read()\n except:\n print >> sys.stderr, \"### could not open file {0!r}\".format(file_path)\n return content\n\ndef get_file_list(file_path):\n full_path = os.path.join(package_directory, file_path)\n file_list = os.listdir(full_path)\n # now we need to add the path to the files\n return [ file_path + f for f in file_list ]\n\n\ninstall_requires = [\"Flask>=0.10.1\",\n \"Flask-Migrate>=1.2.0\",\n \"Flask-SQLAlchemy>=2.0\",\n \"Flask-Script>=2.0.5\",\n \"Jinja2>=2.7.3\",\n \"Mako>=0.9.1\",\n \"MarkupSafe>=0.23\",\n \"PyMySQL>=0.6.6\",\n \"Pillow>=2.6.1\",\n \"PyJWT>=1.3.0\",\n \"PyYAML>=3.11\",\n \"Pygments>=2.0.2\",\n \"SQLAlchemy>=1.0.5\",\n \"Werkzeug>=0.10.4\",\n \"alembic>=0.6.7\",\n \"argparse>=1.2.1\",\n \"bcrypt>=1.1.0\",\n \"beautifulsoup4>=4.3.2\",\n \"cffi>=0.8.6\",\n \"configobj>=5.0.6\",\n \"docutils>=0.12\",\n \"funcparserlib>=0.3.6\",\n \"itsdangerous>=0.24\",\n \"ldap3>=2.5\",\n \"netaddr>=0.7.12\",\n \"passlib>=1.6.2\",\n \"pyasn1>=0.4.2\",\n \"pyOpenSSL>=17.5\",\n \"pycparser>=2.10\",\n \"pycrypto>=2.6.1\",\n \"pyrad>=2.0\",\n \"pyusb>=1.0.0b2\",\n \"qrcode>=5.1\",\n \"requests>=2.7.0\",\n \"sqlsoup>=0.9.0\",\n \"ecdsa>=0.13\",\n \"lxml>=3.3\",\n \"python-gnupg>=0.3.8\",\n \"defusedxml>=0.4.1\",\n \"flask-babel>=0.9\",\n \"croniter>=0.3.8\",\n \"oauth2client>=2.0.1\"\n ]\n\n# For python 2.6 we need additional dependency importlib\ntry:\n import importlib\nexcept ImportError:\n install_requires.append('importlib')\n\n\ndef get_man_pages(dir):\n \"\"\"\n Get man pages in a directory.\n :param dir: \n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if file.endswith(\".1\"):\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\ndef get_scripts(dir):\n \"\"\"\n Get files that are executable\n :param dir: \n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if os.stat(dir + \"/\" + file)[stat.ST_MODE] & stat.S_IEXEC:\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\nsetup(\n name='privacyIDEA',\n version=VERSION,\n description='privacyIDEA: identity, multifactor authentication (OTP), '\n 'authorization, audit',\n author='privacyidea.org',\n license='AGPLv3',\n author_email='[email protected]',\n url='http://www.privacyidea.org',\n keywords='OTP, two factor authentication, management, security',\n packages=find_packages(),\n scripts=[\"pi-manage\"] + get_scripts(\"tools\"),\n extras_require={\n 'dev': [\"Sphinx>=1.3.1\",\n \"sphinxcontrib-httpdomain>=1.3.0\"],\n 'test': [\"coverage>=3.7.1\",\n \"mock>=1.0.1\",\n \"pyparsing>=2.0.3\",\n \"nose>=1.3.4\",\n \"responses>=0.4.0\",\n \"six>=1.8.0\"],\n },\n install_requires=install_requires,\n include_package_data=True,\n data_files=[('etc/privacyidea/',\n ['deploy/apache/privacyideaapp.wsgi',\n 'deploy/privacyidea/dictionary',\n 'deploy/privacyidea/enckey',\n 'deploy/privacyidea/private.pem',\n 'deploy/privacyidea/public.pem']),\n ('share/man/man1', get_man_pages(\"tools\")),\n ('lib/privacyidea/authmodules/FreeRADIUS',\n [\"authmodules/FreeRADIUS/LICENSE\",\n \"authmodules/FreeRADIUS/privacyidea_radius.pm\"]),\n ('lib/privacyidea/authmodules/OTRS',\n [\"authmodules/OTRS/privacyIDEA.pm\"]),\n ('lib/privacyidea/migrations',\n [\"migrations/alembic.ini\",\n \"migrations/env.py\",\n \"migrations/README\",\n \"migrations/script.py.mako\"]),\n ('lib/privacyidea/migrations/versions',\n get_file_list(\"migrations/versions/\"))\n ],\n classifiers=[\"Framework :: Flask\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3\",\n \"Programming Language :: Python\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Internet\",\n \"Topic :: Security\",\n \"Topic :: System ::\"\n \" Systems Administration :: Authentication/Directory\"\n ],\n #message_extractors={'privacyidea': [\n # ('**.py', 'python', None),\n # ('static/**.html', 'html', {'input_encoding': 'utf-8'})]},\n zip_safe=False,\n long_description=get_file_contents('README.rst')\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nimport os\nimport stat\nimport sys\n\n#VERSION=\"2.1dev4\"\nVERSION=\"3.0dev1\"\n\n# Taken from kennethreitz/requests/setup.py\npackage_directory = os.path.realpath(os.path.dirname(__file__))\n\n\ndef get_file_contents(file_path):\n \"\"\"Get the context of the file using full path name.\"\"\"\n content = \"\"\n try:\n full_path = os.path.join(package_directory, file_path)\n content = open(full_path, 'r').read()\n except:\n print >> sys.stderr, \"### could not open file {0!r}\".format(file_path)\n return content\n\ndef get_file_list(file_path):\n full_path = os.path.join(package_directory, file_path)\n file_list = os.listdir(full_path)\n # now we need to add the path to the files\n return [ file_path + f for f in file_list ]\n\n\ninstall_requires = [\"Flask>=0.10.1\",\n \"Flask-Migrate>=1.2.0\",\n \"Flask-SQLAlchemy>=2.0\",\n \"Flask-Script>=2.0.5\",\n \"Jinja2>=2.7.3\",\n \"Mako>=0.9.1\",\n \"MarkupSafe>=0.23\",\n \"PyMySQL>=0.6.6\",\n \"Pillow>=2.6.1\",\n \"PyJWT>=1.3.0\",\n \"PyYAML>=3.11\",\n \"Pygments>=2.0.2\",\n \"SQLAlchemy>=1.0.5\",\n \"Werkzeug>=0.10.4\",\n \"alembic>=0.6.7\",\n \"argparse>=1.2.1\",\n \"bcrypt>=1.1.0\",\n \"beautifulsoup4>=4.3.2\",\n \"cffi>=0.8.6\",\n \"configobj>=5.0.6\",\n \"docutils>=0.12\",\n \"funcparserlib>=0.3.6\",\n \"itsdangerous>=0.24\",\n \"ldap3>=2.5\",\n \"netaddr>=0.7.12\",\n \"passlib>=1.6.2\",\n \"pyasn1>=0.4.2\",\n \"pyOpenSSL>=17.5\",\n \"pycparser>=2.10\",\n \"pycrypto>=2.6.1\",\n \"pyrad>=2.0\",\n \"pyusb>=1.0.0b2\",\n \"qrcode>=5.1\",\n \"requests>=2.7.0\",\n \"sqlsoup>=0.9.0\",\n \"ecdsa>=0.13\",\n \"lxml>=3.3\",\n \"python-gnupg>=0.3.8\",\n \"defusedxml>=0.4.1\",\n \"flask-babel>=0.9\",\n \"croniter>=0.3.8\",\n \"oauth2client>=2.0.1\"\n ]\n\n# For python 2.6 we need additional dependency importlib\ntry:\n import importlib\nexcept ImportError:\n install_requires.append('importlib')\n\n\ndef get_man_pages(dir):\n \"\"\"\n Get man pages in a directory.\n :param dir: \n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if file.endswith(\".1\"):\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\ndef get_scripts(dir):\n \"\"\"\n Get files that are executable\n :param dir: \n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if os.stat(dir + \"/\" + file)[stat.ST_MODE] & stat.S_IEXEC:\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\nsetup(\n name='privacyIDEA',\n version=VERSION,\n description='privacyIDEA: identity, multifactor authentication (OTP), '\n 'authorization, audit',\n author='privacyidea.org',\n license='AGPLv3',\n author_email='[email protected]',\n url='http://www.privacyidea.org',\n keywords='OTP, two factor authentication, management, security',\n packages=find_packages(),\n scripts=[\"pi-manage\"] + get_scripts(\"tools\"),\n extras_require={\n 'dev': [\"Sphinx>=1.3.1\",\n \"sphinxcontrib-httpdomain>=1.3.0\"],\n 'test': [\"coverage>=3.7.1\",\n \"mock>=1.0.1\",\n \"pyparsing>=2.0.3\",\n \"nose>=1.3.4\",\n \"responses>=0.4.0\",\n \"six>=1.8.0\"],\n },\n install_requires=install_requires,\n include_package_data=True,\n data_files=[('etc/privacyidea/',\n ['deploy/apache/privacyideaapp.wsgi',\n 'deploy/privacyidea/dictionary',\n 'deploy/privacyidea/enckey',\n 'deploy/privacyidea/private.pem',\n 'deploy/privacyidea/public.pem']),\n ('share/man/man1', get_man_pages(\"tools\")),\n ('lib/privacyidea/authmodules/OTRS',\n [\"authmodules/OTRS/privacyIDEA.pm\"]),\n ('lib/privacyidea/migrations',\n [\"migrations/alembic.ini\",\n \"migrations/env.py\",\n \"migrations/README\",\n \"migrations/script.py.mako\"]),\n ('lib/privacyidea/migrations/versions',\n get_file_list(\"migrations/versions/\"))\n ],\n classifiers=[\"Framework :: Flask\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3\",\n \"Programming Language :: Python\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Internet\",\n \"Topic :: Security\",\n \"Topic :: System ::\"\n \" Systems Administration :: Authentication/Directory\"\n ],\n #message_extractors={'privacyidea': [\n # ('**.py', 'python', None),\n # ('static/**.html', 'html', {'input_encoding': 'utf-8'})]},\n zip_safe=False,\n long_description=get_file_contents('README.rst')\n)\n", "path": "setup.py"}]} | 2,159 | 139 |
gh_patches_debug_28736 | rasdani/github-patches | git_diff | opsdroid__opsdroid-183 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change default log location
Logs by default are written to `./opsdroid.log`. So they end up being written wherever you run the command.
Logs should either be written to `/var/log/opsdroid.log` or as that may not be writeable by all users maybe `~/.opsdroid/opsdroid.log`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/const.py`
Content:
```
1 """Constants used by OpsDroid."""
2 import os
3
4 __version__ = "0.8.1"
5
6 LOG_FILENAME = 'output.log'
7 DEFAULT_GIT_URL = "https://github.com/opsdroid/"
8 MODULES_DIRECTORY = "opsdroid-modules"
9 DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser("~"), ".opsdroid")
10 DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, "modules")
11 DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, "site-packages")
12 DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, "configuration.yaml")
13 DEFAULT_MODULE_BRANCH = "master"
14 EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
15 "configuration/example_configuration.yaml")
16
```
Path: `opsdroid/__main__.py`
Content:
```
1 """Starts opsdroid."""
2
3 import sys
4 import logging
5 import argparse
6
7 from opsdroid.core import OpsDroid
8 from opsdroid.const import LOG_FILENAME, EXAMPLE_CONFIG_FILE
9 from opsdroid.web import Web
10
11
12 _LOGGER = logging.getLogger("opsdroid")
13
14
15 def configure_logging(config):
16 """Configure the root logger based on user config."""
17 rootlogger = logging.getLogger()
18 while rootlogger.handlers:
19 rootlogger.handlers.pop()
20
21 try:
22 logfile_path = config["logging"]["path"]
23 except KeyError:
24 logfile_path = LOG_FILENAME
25
26 try:
27 log_level = get_logging_level(
28 config["logging"]["level"])
29 except KeyError:
30 log_level = logging.INFO
31
32 rootlogger.setLevel(log_level)
33 formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')
34
35 console_handler = logging.StreamHandler()
36 console_handler.setLevel(log_level)
37 console_handler.setFormatter(formatter)
38 rootlogger.addHandler(console_handler)
39
40 try:
41 if not config["logging"]["console"]:
42 console_handler.setLevel(logging.CRITICAL)
43 except KeyError:
44 pass
45
46 if logfile_path:
47 file_handler = logging.FileHandler(logfile_path)
48 file_handler.setLevel(log_level)
49 file_handler.setFormatter(formatter)
50 rootlogger.addHandler(file_handler)
51
52 _LOGGER.info("="*40)
53 _LOGGER.info("Stated application")
54
55
56 def get_logging_level(logging_level):
57 """Get the logger level based on the user configuration."""
58 if logging_level == 'critical':
59 return logging.CRITICAL
60 elif logging_level == 'error':
61 return logging.ERROR
62 elif logging_level == 'warning':
63 return logging.WARNING
64 elif logging_level == 'debug':
65 return logging.DEBUG
66
67 return logging.INFO
68
69
70 def parse_args(args):
71 """Parse command line arguments."""
72 parser = argparse.ArgumentParser(description='Run opsdroid.')
73 parser.add_argument('--gen-config', action="store_true",
74 help='prints out an example configuration file')
75 return parser.parse_args(args)
76
77
78 def check_dependencies():
79 """Check for system dependencies required by opsdroid."""
80 if sys.version_info[0] < 3 or sys.version_info[1] < 5:
81 logging.critical("Whoops! opsdroid requires python 3.5 or above.")
82 sys.exit(1)
83
84
85 def main():
86 """Enter the application here."""
87 args = parse_args(sys.argv[1:])
88
89 if args.gen_config:
90 with open(EXAMPLE_CONFIG_FILE, 'r') as conf:
91 print(conf.read())
92 sys.exit(0)
93
94 check_dependencies()
95
96 restart = True
97
98 while restart:
99 with OpsDroid() as opsdroid:
100 opsdroid.load()
101 configure_logging(opsdroid.config)
102 opsdroid.web_server = Web(opsdroid)
103 opsdroid.start_loop()
104 restart = opsdroid.should_restart
105
106
107 if __name__ == "__main__":
108 main()
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py
--- a/opsdroid/__main__.py
+++ b/opsdroid/__main__.py
@@ -1,11 +1,12 @@
"""Starts opsdroid."""
+import os
import sys
import logging
import argparse
from opsdroid.core import OpsDroid
-from opsdroid.const import LOG_FILENAME, EXAMPLE_CONFIG_FILE
+from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE
from opsdroid.web import Web
@@ -19,9 +20,12 @@
rootlogger.handlers.pop()
try:
- logfile_path = config["logging"]["path"]
+ if config["logging"]["path"]:
+ logfile_path = os.path.expanduser(config["logging"]["path"])
+ else:
+ logfile_path = config["logging"]["path"]
except KeyError:
- logfile_path = LOG_FILENAME
+ logfile_path = DEFAULT_LOG_FILENAME
try:
log_level = get_logging_level(
diff --git a/opsdroid/const.py b/opsdroid/const.py
--- a/opsdroid/const.py
+++ b/opsdroid/const.py
@@ -3,10 +3,10 @@
__version__ = "0.8.1"
-LOG_FILENAME = 'output.log'
DEFAULT_GIT_URL = "https://github.com/opsdroid/"
MODULES_DIRECTORY = "opsdroid-modules"
-DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser("~"), ".opsdroid")
+DEFAULT_ROOT_PATH = os.path.expanduser("~/.opsdroid")
+DEFAULT_LOG_FILENAME = os.path.join(DEFAULT_ROOT_PATH, 'output.log')
DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, "modules")
DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, "site-packages")
DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, "configuration.yaml")
| {"golden_diff": "diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py\n--- a/opsdroid/__main__.py\n+++ b/opsdroid/__main__.py\n@@ -1,11 +1,12 @@\n \"\"\"Starts opsdroid.\"\"\"\n \n+import os\n import sys\n import logging\n import argparse\n \n from opsdroid.core import OpsDroid\n-from opsdroid.const import LOG_FILENAME, EXAMPLE_CONFIG_FILE\n+from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE\n from opsdroid.web import Web\n \n \n@@ -19,9 +20,12 @@\n rootlogger.handlers.pop()\n \n try:\n- logfile_path = config[\"logging\"][\"path\"]\n+ if config[\"logging\"][\"path\"]:\n+ logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n+ else:\n+ logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n- logfile_path = LOG_FILENAME\n+ logfile_path = DEFAULT_LOG_FILENAME\n \n try:\n log_level = get_logging_level(\ndiff --git a/opsdroid/const.py b/opsdroid/const.py\n--- a/opsdroid/const.py\n+++ b/opsdroid/const.py\n@@ -3,10 +3,10 @@\n \n __version__ = \"0.8.1\"\n \n-LOG_FILENAME = 'output.log'\n DEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\n MODULES_DIRECTORY = \"opsdroid-modules\"\n-DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\n+DEFAULT_ROOT_PATH = os.path.expanduser(\"~/.opsdroid\")\n+DEFAULT_LOG_FILENAME = os.path.join(DEFAULT_ROOT_PATH, 'output.log')\n DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\n DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \"site-packages\")\n DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\n", "issue": "Change default log location\nLogs by default are written to `./opsdroid.log`. So they end up being written wherever you run the command.\r\n\r\nLogs should either be written to `/var/log/opsdroid.log` or as that may not be writeable by all users maybe `~/.opsdroid/opsdroid.log`.\n", "before_files": [{"content": "\"\"\"Constants used by OpsDroid.\"\"\"\nimport os\n\n__version__ = \"0.8.1\"\n\nLOG_FILENAME = 'output.log'\nDEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\nMODULES_DIRECTORY = \"opsdroid-modules\"\nDEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\nDEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\nDEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \"site-packages\")\nDEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\nDEFAULT_MODULE_BRANCH = \"master\"\nEXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\")\n", "path": "opsdroid/const.py"}, {"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport sys\nimport logging\nimport argparse\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.const import LOG_FILENAME, EXAMPLE_CONFIG_FILE\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n try:\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n except KeyError:\n pass\n\n if logfile_path:\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(\"Stated application\")\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n elif logging_level == 'error':\n return logging.ERROR\n elif logging_level == 'warning':\n return logging.WARNING\n elif logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef parse_args(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Run opsdroid.')\n parser.add_argument('--gen-config', action=\"store_true\",\n help='prints out an example configuration file')\n return parser.parse_args(args)\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info[0] < 3 or sys.version_info[1] < 5:\n logging.critical(\"Whoops! opsdroid requires python 3.5 or above.\")\n sys.exit(1)\n\n\ndef main():\n \"\"\"Enter the application here.\"\"\"\n args = parse_args(sys.argv[1:])\n\n if args.gen_config:\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n print(conf.read())\n sys.exit(0)\n\n check_dependencies()\n\n restart = True\n\n while restart:\n with OpsDroid() as opsdroid:\n opsdroid.load()\n configure_logging(opsdroid.config)\n opsdroid.web_server = Web(opsdroid)\n opsdroid.start_loop()\n restart = opsdroid.should_restart\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}], "after_files": [{"content": "\"\"\"Constants used by OpsDroid.\"\"\"\nimport os\n\n__version__ = \"0.8.1\"\n\nDEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\nMODULES_DIRECTORY = \"opsdroid-modules\"\nDEFAULT_ROOT_PATH = os.path.expanduser(\"~/.opsdroid\")\nDEFAULT_LOG_FILENAME = os.path.join(DEFAULT_ROOT_PATH, 'output.log')\nDEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\nDEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \"site-packages\")\nDEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\nDEFAULT_MODULE_BRANCH = \"master\"\nEXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\")\n", "path": "opsdroid/const.py"}, {"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport os\nimport sys\nimport logging\nimport argparse\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n try:\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n except KeyError:\n pass\n\n if logfile_path:\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(\"Stated application\")\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n elif logging_level == 'error':\n return logging.ERROR\n elif logging_level == 'warning':\n return logging.WARNING\n elif logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef parse_args(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Run opsdroid.')\n parser.add_argument('--gen-config', action=\"store_true\",\n help='prints out an example configuration file')\n return parser.parse_args(args)\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info[0] < 3 or sys.version_info[1] < 5:\n logging.critical(\"Whoops! opsdroid requires python 3.5 or above.\")\n sys.exit(1)\n\n\ndef main():\n \"\"\"Enter the application here.\"\"\"\n args = parse_args(sys.argv[1:])\n\n if args.gen_config:\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n print(conf.read())\n sys.exit(0)\n\n check_dependencies()\n\n restart = True\n\n while restart:\n with OpsDroid() as opsdroid:\n opsdroid.load()\n configure_logging(opsdroid.config)\n opsdroid.web_server = Web(opsdroid)\n opsdroid.start_loop()\n restart = opsdroid.should_restart\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}]} | 1,373 | 423 |
gh_patches_debug_65370 | rasdani/github-patches | git_diff | fossasia__open-event-server-5151 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
For the edit action button in admin/users the super admin should be allowed to make the users as an admin.
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
The patch for the users is working but is_admin is a readonly property even for the super user. Used the patch request for users and changed the value for is_admin from true to false. But there is no change after the request to the value of is_admin.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to v1/users/user_id
2. Send a patch request
3. Change the value of is_admin for some user from false to true.
4. See error

After the request

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/users.py`
Content:
```
1 import base64
2
3 from flask import Blueprint, request, jsonify, abort, make_response
4 from flask_jwt import current_identity as current_user
5 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
6
7 from app import get_settings
8 from app.api.bootstrap import api
9 from app.api.helpers.db import safe_query, get_count
10 from app.api.helpers.exceptions import ConflictException
11 from app.api.helpers.exceptions import ForbiddenException
12 from app.api.helpers.files import create_save_image_sizes, make_frontend_url
13 from app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action
14 from app.api.helpers.permission_manager import has_access
15 from app.api.helpers.permissions import is_user_itself
16 from app.api.helpers.utilities import get_serializer, str_generator
17 from app.api.schema.users import UserSchema, UserSchemaPublic
18 from app.models import db
19 from app.models.access_code import AccessCode
20 from app.models.discount_code import DiscountCode
21 from app.models.email_notification import EmailNotification
22 from app.models.event_invoice import EventInvoice
23 from app.models.feedback import Feedback
24 from app.models.mail import USER_REGISTER_WITH_PASSWORD
25 from app.models.notification import Notification
26 from app.models.session import Session
27 from app.models.speaker import Speaker
28 from app.models.ticket_holder import TicketHolder
29 from app.models.user import User
30 from app.models.users_events_role import UsersEventsRoles
31
32 user_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')
33
34
35 class UserList(ResourceList):
36 """
37 List and create Users
38 """
39 def before_create_object(self, data, view_kwargs):
40 """
41 method to check if there is an existing user with same email which is received in data to create a new user
42 :param data:
43 :param view_kwargs:
44 :return:
45 """
46 if db.session.query(User.id).filter_by(email=data['email'], deleted_at=None).scalar() is not None:
47 raise ConflictException({'pointer': '/data/attributes/email'}, "Email already exists")
48
49 def after_create_object(self, user, data, view_kwargs):
50 """
51 method to send-
52 email notification
53 mail link for register verification
54 add image urls
55 :param user:
56 :param data:
57 :param view_kwargs:
58 :return:
59 """
60 s = get_serializer()
61 hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')
62 link = make_frontend_url('/email/verify'.format(id=user.id), {'token': hash})
63 send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],
64 email=user.email)
65 send_email_confirmation(user.email, link)
66
67 if data.get('original_image_url'):
68 uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)
69 uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']
70 del uploaded_images['large_image_url']
71 self.session.query(User).filter_by(id=user.id).update(uploaded_images)
72
73 decorators = (api.has_permission('is_admin', methods="GET"),)
74 schema = UserSchema
75 data_layer = {'session': db.session,
76 'model': User,
77 'methods': {
78 'before_create_object': before_create_object,
79 'after_create_object': after_create_object
80 }}
81
82
83 class UserDetail(ResourceDetail):
84 """
85 User detail by id
86 """
87 def before_get(self, args, kwargs):
88
89 if current_user.is_admin or current_user.is_super_admin or current_user:
90 self.schema = UserSchema
91 else:
92 self.schema = UserSchemaPublic
93
94 def before_get_object(self, view_kwargs):
95 """
96 before get method for user object
97 :param view_kwargs:
98 :return:
99 """
100 if view_kwargs.get('notification_id') is not None:
101 notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')
102 if notification.user_id is not None:
103 view_kwargs['id'] = notification.user_id
104 else:
105 view_kwargs['id'] = None
106
107 if view_kwargs.get('feedback_id') is not None:
108 print(view_kwargs['feedback_id'])
109 feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')
110 if feedback.user_id is not None:
111 view_kwargs['id'] = feedback.user_id
112 else:
113 view_kwargs['id'] = None
114
115 if view_kwargs.get('attendee_id') is not None:
116 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')
117 if attendee.user is not None:
118 if (not has_access('is_user_itself',
119 user_id=attendee.user.id) or not has_access('is_coorganizer',
120 event_id=attendee.event_id)):
121 raise ForbiddenException({'source': ''}, 'Access Forbidden')
122 view_kwargs['id'] = attendee.user.id
123 else:
124 view_kwargs['id'] = None
125
126 if view_kwargs.get('event_invoice_id') is not None:
127 event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')
128 if event_invoice.user_id is not None:
129 view_kwargs['id'] = event_invoice.user_id
130 else:
131 view_kwargs['id'] = None
132
133 if view_kwargs.get('users_events_role_id') is not None:
134 users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],
135 'users_events_role_id')
136 if users_events_role.user_id is not None:
137 view_kwargs['id'] = users_events_role.user_id
138
139 if view_kwargs.get('speaker_id') is not None:
140 speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')
141 if speaker.user_id is not None:
142 view_kwargs['id'] = speaker.user_id
143 else:
144 view_kwargs['id'] = None
145
146 if view_kwargs.get('session_id') is not None:
147 session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
148 if session.creator_id is not None:
149 view_kwargs['id'] = session.creator_id
150 else:
151 view_kwargs['id'] = None
152
153 if view_kwargs.get('access_code_id') is not None:
154 access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')
155 if access_code.marketer_id is not None:
156 view_kwargs['id'] = access_code.marketer_id
157 else:
158 view_kwargs['id'] = None
159
160 if view_kwargs.get('discount_code_id') is not None:
161 discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')
162 if discount_code.marketer_id is not None:
163 view_kwargs['id'] = discount_code.marketer_id
164 else:
165 view_kwargs['id'] = None
166
167 if view_kwargs.get('email_notification_id') is not None:
168 email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],
169 'email_notification_id')
170 if email_notification.user_id is not None:
171 view_kwargs['id'] = email_notification.user_id
172 else:
173 view_kwargs['id'] = None
174
175 def before_update_object(self, user, data, view_kwargs):
176 if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:
177 uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)
178 data['original_image_url'] = uploaded_images['original_image_url']
179 data['small_image_url'] = uploaded_images['thumbnail_image_url']
180 data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']
181 data['icon_image_url'] = uploaded_images['icon_image_url']
182
183 if data.get('email') and data['email'] != user.email:
184 view_kwargs['email_changed'] = user.email
185
186 if data.get('is_admin') != user.is_admin:
187 user.is_admin = not user.is_admin
188
189 def after_update_object(self, user, data, view_kwargs):
190 """
191 method to mail user about email change
192 :param user:
193 :param data:
194 :param view_kwargs:
195 :return:
196 """
197 if view_kwargs.get('email_changed'):
198 send_email_change_user_email(user, view_kwargs.get('email_changed'))
199
200 decorators = (api.has_permission('is_user_itself', fetch="user_id,id", fetch_as="user_id",
201 model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,
202 DiscountCode, EmailNotification, Speaker, User],
203 fetch_key_url="notification_id, feedback_id, users_events_role_id, session_id, \
204 event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id",
205 leave_if=lambda a: a.get('attendee_id')), )
206 schema = UserSchema
207 data_layer = {'session': db.session,
208 'model': User,
209 'methods': {
210 'before_get_object': before_get_object,
211 'before_update_object': before_update_object,
212 'after_update_object': after_update_object
213 }}
214
215
216 class UserRelationship(ResourceRelationship):
217 """
218 User Relationship
219 """
220 decorators = (is_user_itself, )
221 schema = UserSchema
222 data_layer = {'session': db.session,
223 'model': User}
224
225
226 @user_misc_routes.route('/users/checkEmail', methods=['POST'])
227 def is_email_available():
228 email = request.json.get('email', None)
229 if email:
230 if get_count(db.session.query(User).filter_by(email=email)):
231 return jsonify(
232 result="False"
233 )
234 else:
235 return jsonify(
236 result="True"
237 )
238 else:
239 abort(
240 make_response(jsonify(error="Email field missing"), 422)
241 )
242
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/users.py b/app/api/users.py
--- a/app/api/users.py
+++ b/app/api/users.py
@@ -183,7 +183,7 @@
if data.get('email') and data['email'] != user.email:
view_kwargs['email_changed'] = user.email
- if data.get('is_admin') != user.is_admin:
+ if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:
user.is_admin = not user.is_admin
def after_update_object(self, user, data, view_kwargs):
| {"golden_diff": "diff --git a/app/api/users.py b/app/api/users.py\n--- a/app/api/users.py\n+++ b/app/api/users.py\n@@ -183,7 +183,7 @@\n if data.get('email') and data['email'] != user.email:\n view_kwargs['email_changed'] = user.email\n \n- if data.get('is_admin') != user.is_admin:\n+ if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n \n def after_update_object(self, user, data, view_kwargs):\n", "issue": "For the edit action button in admin/users the super admin should be allowed to make the users as an admin.\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nThe patch for the users is working but is_admin is a readonly property even for the super user. Used the patch request for users and changed the value for is_admin from true to false. But there is no change after the request to the value of is_admin. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to v1/users/user_id\r\n2. Send a patch request\r\n3. Change the value of is_admin for some user from false to true.\r\n4. See error\r\n\r\n\r\n\r\nAfter the request\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import base64\n\nfrom flask import Blueprint, request, jsonify, abort, make_response\nfrom flask_jwt import current_identity as current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app import get_settings\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import ConflictException\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.files import create_save_image_sizes, make_frontend_url\nfrom app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import is_user_itself\nfrom app.api.helpers.utilities import get_serializer, str_generator\nfrom app.api.schema.users import UserSchema, UserSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.email_notification import EmailNotification\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.feedback import Feedback\nfrom app.models.mail import USER_REGISTER_WITH_PASSWORD\nfrom app.models.notification import Notification\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\nfrom app.models.users_events_role import UsersEventsRoles\n\nuser_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')\n\n\nclass UserList(ResourceList):\n \"\"\"\n List and create Users\n \"\"\"\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n method to check if there is an existing user with same email which is received in data to create a new user\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if db.session.query(User.id).filter_by(email=data['email'], deleted_at=None).scalar() is not None:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n def after_create_object(self, user, data, view_kwargs):\n \"\"\"\n method to send-\n email notification\n mail link for register verification\n add image urls\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n s = get_serializer()\n hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')\n link = make_frontend_url('/email/verify'.format(id=user.id), {'token': hash})\n send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],\n email=user.email)\n send_email_confirmation(user.email, link)\n\n if data.get('original_image_url'):\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']\n del uploaded_images['large_image_url']\n self.session.query(User).filter_by(id=user.id).update(uploaded_images)\n\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass UserDetail(ResourceDetail):\n \"\"\"\n User detail by id\n \"\"\"\n def before_get(self, args, kwargs):\n\n if current_user.is_admin or current_user.is_super_admin or current_user:\n self.schema = UserSchema\n else:\n self.schema = UserSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method for user object\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('notification_id') is not None:\n notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')\n if notification.user_id is not None:\n view_kwargs['id'] = notification.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('feedback_id') is not None:\n print(view_kwargs['feedback_id'])\n feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')\n if feedback.user_id is not None:\n view_kwargs['id'] = feedback.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.user is not None:\n if (not has_access('is_user_itself',\n user_id=attendee.user.id) or not has_access('is_coorganizer',\n event_id=attendee.event_id)):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n view_kwargs['id'] = attendee.user.id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('event_invoice_id') is not None:\n event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')\n if event_invoice.user_id is not None:\n view_kwargs['id'] = event_invoice.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_role_id') is not None:\n users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],\n 'users_events_role_id')\n if users_events_role.user_id is not None:\n view_kwargs['id'] = users_events_role.user_id\n\n if view_kwargs.get('speaker_id') is not None:\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n if speaker.user_id is not None:\n view_kwargs['id'] = speaker.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('session_id') is not None:\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if session.creator_id is not None:\n view_kwargs['id'] = session.creator_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('access_code_id') is not None:\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n if access_code.marketer_id is not None:\n view_kwargs['id'] = access_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('discount_code_id') is not None:\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n if discount_code.marketer_id is not None:\n view_kwargs['id'] = discount_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('email_notification_id') is not None:\n email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],\n 'email_notification_id')\n if email_notification.user_id is not None:\n view_kwargs['id'] = email_notification.user_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, user, data, view_kwargs):\n if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n data['original_image_url'] = uploaded_images['original_image_url']\n data['small_image_url'] = uploaded_images['thumbnail_image_url']\n data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']\n data['icon_image_url'] = uploaded_images['icon_image_url']\n\n if data.get('email') and data['email'] != user.email:\n view_kwargs['email_changed'] = user.email\n\n if data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n\n def after_update_object(self, user, data, view_kwargs):\n \"\"\"\n method to mail user about email change\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('email_changed'):\n send_email_change_user_email(user, view_kwargs.get('email_changed'))\n\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id,id\", fetch_as=\"user_id\",\n model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,\n DiscountCode, EmailNotification, Speaker, User],\n fetch_key_url=\"notification_id, feedback_id, users_events_role_id, session_id, \\\n event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id\",\n leave_if=lambda a: a.get('attendee_id')), )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'after_update_object': after_update_object\n }}\n\n\nclass UserRelationship(ResourceRelationship):\n \"\"\"\n User Relationship\n \"\"\"\n decorators = (is_user_itself, )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User}\n\n\n@user_misc_routes.route('/users/checkEmail', methods=['POST'])\ndef is_email_available():\n email = request.json.get('email', None)\n if email:\n if get_count(db.session.query(User).filter_by(email=email)):\n return jsonify(\n result=\"False\"\n )\n else:\n return jsonify(\n result=\"True\"\n )\n else:\n abort(\n make_response(jsonify(error=\"Email field missing\"), 422)\n )\n", "path": "app/api/users.py"}], "after_files": [{"content": "import base64\n\nfrom flask import Blueprint, request, jsonify, abort, make_response\nfrom flask_jwt import current_identity as current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app import get_settings\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import ConflictException\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.files import create_save_image_sizes, make_frontend_url\nfrom app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import is_user_itself\nfrom app.api.helpers.utilities import get_serializer, str_generator\nfrom app.api.schema.users import UserSchema, UserSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.email_notification import EmailNotification\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.feedback import Feedback\nfrom app.models.mail import USER_REGISTER_WITH_PASSWORD\nfrom app.models.notification import Notification\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\nfrom app.models.users_events_role import UsersEventsRoles\n\nuser_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')\n\n\nclass UserList(ResourceList):\n \"\"\"\n List and create Users\n \"\"\"\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n method to check if there is an existing user with same email which is received in data to create a new user\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if db.session.query(User.id).filter_by(email=data['email'], deleted_at=None).scalar() is not None:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n def after_create_object(self, user, data, view_kwargs):\n \"\"\"\n method to send-\n email notification\n mail link for register verification\n add image urls\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n s = get_serializer()\n hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')\n link = make_frontend_url('/email/verify'.format(id=user.id), {'token': hash})\n send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],\n email=user.email)\n send_email_confirmation(user.email, link)\n\n if data.get('original_image_url'):\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']\n del uploaded_images['large_image_url']\n self.session.query(User).filter_by(id=user.id).update(uploaded_images)\n\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass UserDetail(ResourceDetail):\n \"\"\"\n User detail by id\n \"\"\"\n def before_get(self, args, kwargs):\n\n if current_user.is_admin or current_user.is_super_admin or current_user:\n self.schema = UserSchema\n else:\n self.schema = UserSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method for user object\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('notification_id') is not None:\n notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')\n if notification.user_id is not None:\n view_kwargs['id'] = notification.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('feedback_id') is not None:\n print(view_kwargs['feedback_id'])\n feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')\n if feedback.user_id is not None:\n view_kwargs['id'] = feedback.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.user is not None:\n if (not has_access('is_user_itself',\n user_id=attendee.user.id) or not has_access('is_coorganizer',\n event_id=attendee.event_id)):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n view_kwargs['id'] = attendee.user.id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('event_invoice_id') is not None:\n event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')\n if event_invoice.user_id is not None:\n view_kwargs['id'] = event_invoice.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_role_id') is not None:\n users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],\n 'users_events_role_id')\n if users_events_role.user_id is not None:\n view_kwargs['id'] = users_events_role.user_id\n\n if view_kwargs.get('speaker_id') is not None:\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n if speaker.user_id is not None:\n view_kwargs['id'] = speaker.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('session_id') is not None:\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if session.creator_id is not None:\n view_kwargs['id'] = session.creator_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('access_code_id') is not None:\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n if access_code.marketer_id is not None:\n view_kwargs['id'] = access_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('discount_code_id') is not None:\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n if discount_code.marketer_id is not None:\n view_kwargs['id'] = discount_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('email_notification_id') is not None:\n email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],\n 'email_notification_id')\n if email_notification.user_id is not None:\n view_kwargs['id'] = email_notification.user_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, user, data, view_kwargs):\n if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n data['original_image_url'] = uploaded_images['original_image_url']\n data['small_image_url'] = uploaded_images['thumbnail_image_url']\n data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']\n data['icon_image_url'] = uploaded_images['icon_image_url']\n\n if data.get('email') and data['email'] != user.email:\n view_kwargs['email_changed'] = user.email\n\n if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n\n def after_update_object(self, user, data, view_kwargs):\n \"\"\"\n method to mail user about email change\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('email_changed'):\n send_email_change_user_email(user, view_kwargs.get('email_changed'))\n\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id,id\", fetch_as=\"user_id\",\n model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,\n DiscountCode, EmailNotification, Speaker, User],\n fetch_key_url=\"notification_id, feedback_id, users_events_role_id, session_id, \\\n event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id\",\n leave_if=lambda a: a.get('attendee_id')), )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'after_update_object': after_update_object\n }}\n\n\nclass UserRelationship(ResourceRelationship):\n \"\"\"\n User Relationship\n \"\"\"\n decorators = (is_user_itself, )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User}\n\n\n@user_misc_routes.route('/users/checkEmail', methods=['POST'])\ndef is_email_available():\n email = request.json.get('email', None)\n if email:\n if get_count(db.session.query(User).filter_by(email=email)):\n return jsonify(\n result=\"False\"\n )\n else:\n return jsonify(\n result=\"True\"\n )\n else:\n abort(\n make_response(jsonify(error=\"Email field missing\"), 422)\n )\n", "path": "app/api/users.py"}]} | 3,281 | 129 |
gh_patches_debug_25413 | rasdani/github-patches | git_diff | getsentry__sentry-27105 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade from 21.6.1 to 21.6.2 migration error, relation already exists
### Environment
self-hosted (`onpremise` deployment)
### Version
21.6.1 upgrade to 21.6.1
### Steps to Reproduce
1. git fetch tags/21.6.2
2. ./install.sh
### Expected Result
Migration to succeed.
### Actual Result
```
django.db.utils.ProgrammingError: ProgrammingError('relation "sentry_groupedmessage_project_id_id_515aaa7e_uniq" already exists\n',)
SQL: ALTER TABLE "sentry_groupedmessage" ADD CONSTRAINT "sentry_groupedmessage_project_id_id_515aaa7e_uniq" UNIQUE ("project_id", "id")
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/migrations/0216_cdc_setup_replication_index.py`
Content:
```
1 # Generated by Django 1.11.29 on 2021-06-30 18:51
2
3 from django.db import migrations
4
5
6 class Migration(migrations.Migration):
7 # This flag is used to mark that a migration shouldn't be automatically run in
8 # production. We set this to True for operations that we think are risky and want
9 # someone from ops to run manually and monitor.
10 # General advice is that if in doubt, mark your migration as `is_dangerous`.
11 # Some things you should always mark as dangerous:
12 # - Large data migrations. Typically we want these to be run manually by ops so that
13 # they can be monitored. Since data migrations will now hold a transaction open
14 # this is even more important.
15 # - Adding columns to highly active tables, even ones that are NULL.
16 is_dangerous = True
17
18 # This flag is used to decide whether to run this migration in a transaction or not.
19 # By default we prefer to run in a transaction, but for migrations where you want
20 # to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
21 # want to create an index concurrently when adding one to an existing table.
22 # You'll also usually want to set this to `False` if you're writing a data
23 # migration, since we don't want the entire migration to run in one long-running
24 # transaction.
25 atomic = False
26
27 dependencies = [
28 ("sentry", "0215_fix_state"),
29 ]
30
31 operations = [
32 migrations.AlterUniqueTogether(
33 name="group",
34 unique_together={("project", "id"), ("project", "short_id")},
35 ),
36 migrations.RunSQL(
37 sql="""
38 ALTER TABLE sentry_groupasignee REPLICA IDENTITY USING INDEX
39 sentry_groupasignee_project_id_group_id_fbf4364e_uniq
40 """,
41 reverse_sql="""
42 ALTER TABLE sentry_groupasignee REPLICA IDENTITY DEFAULT
43 """,
44 hints={"tables": ["sentry_groupasignee"]},
45 ),
46 migrations.RunSQL(
47 sql="""
48 ALTER TABLE sentry_groupedmessage REPLICA IDENTITY USING INDEX
49 sentry_groupedmessage_project_id_id_515aaa7e_uniq
50 """,
51 reverse_sql="""
52 ALTER TABLE sentry_groupedmessage REPLICA IDENTITY DEFAULT
53 """,
54 hints={"tables": ["sentry_groupedmessage"]},
55 ),
56 ]
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/migrations/0216_cdc_setup_replication_index.py b/src/sentry/migrations/0216_cdc_setup_replication_index.py
--- a/src/sentry/migrations/0216_cdc_setup_replication_index.py
+++ b/src/sentry/migrations/0216_cdc_setup_replication_index.py
@@ -29,9 +29,33 @@
]
operations = [
- migrations.AlterUniqueTogether(
- name="group",
- unique_together={("project", "id"), ("project", "short_id")},
+ migrations.SeparateDatabaseAndState(
+ database_operations=[
+ migrations.RunSQL(
+ """
+ CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS "sentry_groupedmessage_project_id_id_515aaa7e_uniq" ON "sentry_groupedmessage" ("project_id", "id");
+ """,
+ reverse_sql="""
+ DROP INDEX CONCURRENTLY IF EXISTS sentry_groupedmessage_project_id_id_515aaa7e_uniq;
+ """,
+ hints={"tables": ["sentry_groupedmessage"]},
+ ),
+ migrations.RunSQL(
+ """
+ ALTER TABLE "sentry_groupedmessage" ADD CONSTRAINT "sentry_groupedmessage_project_id_id_515aaa7e_uniq" UNIQUE USING INDEX "sentry_groupedmessage_project_id_id_515aaa7e_uniq";
+ """,
+ reverse_sql="""
+ ALTER TABLE "sentry_groupedmessage" DROP CONSTRAINT IF EXISTS "sentry_groupedmessage_project_id_id_515aaa7e_uniq";
+ """,
+ hints={"tables": ["sentry_groupedmessage"]},
+ ),
+ ],
+ state_operations=[
+ migrations.AlterUniqueTogether(
+ name="group",
+ unique_together={("project", "id"), ("project", "short_id")},
+ ),
+ ],
),
migrations.RunSQL(
sql="""
| {"golden_diff": "diff --git a/src/sentry/migrations/0216_cdc_setup_replication_index.py b/src/sentry/migrations/0216_cdc_setup_replication_index.py\n--- a/src/sentry/migrations/0216_cdc_setup_replication_index.py\n+++ b/src/sentry/migrations/0216_cdc_setup_replication_index.py\n@@ -29,9 +29,33 @@\n ]\n \n operations = [\n- migrations.AlterUniqueTogether(\n- name=\"group\",\n- unique_together={(\"project\", \"id\"), (\"project\", \"short_id\")},\n+ migrations.SeparateDatabaseAndState(\n+ database_operations=[\n+ migrations.RunSQL(\n+ \"\"\"\n+ CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" ON \"sentry_groupedmessage\" (\"project_id\", \"id\");\n+ \"\"\",\n+ reverse_sql=\"\"\"\n+ DROP INDEX CONCURRENTLY IF EXISTS sentry_groupedmessage_project_id_id_515aaa7e_uniq;\n+ \"\"\",\n+ hints={\"tables\": [\"sentry_groupedmessage\"]},\n+ ),\n+ migrations.RunSQL(\n+ \"\"\"\n+ ALTER TABLE \"sentry_groupedmessage\" ADD CONSTRAINT \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" UNIQUE USING INDEX \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\";\n+ \"\"\",\n+ reverse_sql=\"\"\"\n+ ALTER TABLE \"sentry_groupedmessage\" DROP CONSTRAINT IF EXISTS \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\";\n+ \"\"\",\n+ hints={\"tables\": [\"sentry_groupedmessage\"]},\n+ ),\n+ ],\n+ state_operations=[\n+ migrations.AlterUniqueTogether(\n+ name=\"group\",\n+ unique_together={(\"project\", \"id\"), (\"project\", \"short_id\")},\n+ ),\n+ ],\n ),\n migrations.RunSQL(\n sql=\"\"\"\n", "issue": "Upgrade from 21.6.1 to 21.6.2 migration error, relation already exists\n### Environment\n\nself-hosted (`onpremise` deployment)\n\n### Version\n\n21.6.1 upgrade to 21.6.1\n\n### Steps to Reproduce\n\n1. git fetch tags/21.6.2\r\n2. ./install.sh\n\n### Expected Result\n\nMigration to succeed.\n\n### Actual Result\n\n```\r\ndjango.db.utils.ProgrammingError: ProgrammingError('relation \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" already exists\\n',)\r\nSQL: ALTER TABLE \"sentry_groupedmessage\" ADD CONSTRAINT \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" UNIQUE (\"project_id\", \"id\")\r\n```\n", "before_files": [{"content": "# Generated by Django 1.11.29 on 2021-06-30 18:51\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n # This flag is used to mark that a migration shouldn't be automatically run in\n # production. We set this to True for operations that we think are risky and want\n # someone from ops to run manually and monitor.\n # General advice is that if in doubt, mark your migration as `is_dangerous`.\n # Some things you should always mark as dangerous:\n # - Large data migrations. Typically we want these to be run manually by ops so that\n # they can be monitored. Since data migrations will now hold a transaction open\n # this is even more important.\n # - Adding columns to highly active tables, even ones that are NULL.\n is_dangerous = True\n\n # This flag is used to decide whether to run this migration in a transaction or not.\n # By default we prefer to run in a transaction, but for migrations where you want\n # to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll\n # want to create an index concurrently when adding one to an existing table.\n # You'll also usually want to set this to `False` if you're writing a data\n # migration, since we don't want the entire migration to run in one long-running\n # transaction.\n atomic = False\n\n dependencies = [\n (\"sentry\", \"0215_fix_state\"),\n ]\n\n operations = [\n migrations.AlterUniqueTogether(\n name=\"group\",\n unique_together={(\"project\", \"id\"), (\"project\", \"short_id\")},\n ),\n migrations.RunSQL(\n sql=\"\"\"\n ALTER TABLE sentry_groupasignee REPLICA IDENTITY USING INDEX\n sentry_groupasignee_project_id_group_id_fbf4364e_uniq\n \"\"\",\n reverse_sql=\"\"\"\n ALTER TABLE sentry_groupasignee REPLICA IDENTITY DEFAULT\n \"\"\",\n hints={\"tables\": [\"sentry_groupasignee\"]},\n ),\n migrations.RunSQL(\n sql=\"\"\"\n ALTER TABLE sentry_groupedmessage REPLICA IDENTITY USING INDEX\n sentry_groupedmessage_project_id_id_515aaa7e_uniq\n \"\"\",\n reverse_sql=\"\"\"\n ALTER TABLE sentry_groupedmessage REPLICA IDENTITY DEFAULT\n \"\"\",\n hints={\"tables\": [\"sentry_groupedmessage\"]},\n ),\n ]\n", "path": "src/sentry/migrations/0216_cdc_setup_replication_index.py"}], "after_files": [{"content": "# Generated by Django 1.11.29 on 2021-06-30 18:51\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n # This flag is used to mark that a migration shouldn't be automatically run in\n # production. We set this to True for operations that we think are risky and want\n # someone from ops to run manually and monitor.\n # General advice is that if in doubt, mark your migration as `is_dangerous`.\n # Some things you should always mark as dangerous:\n # - Large data migrations. Typically we want these to be run manually by ops so that\n # they can be monitored. Since data migrations will now hold a transaction open\n # this is even more important.\n # - Adding columns to highly active tables, even ones that are NULL.\n is_dangerous = True\n\n # This flag is used to decide whether to run this migration in a transaction or not.\n # By default we prefer to run in a transaction, but for migrations where you want\n # to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll\n # want to create an index concurrently when adding one to an existing table.\n # You'll also usually want to set this to `False` if you're writing a data\n # migration, since we don't want the entire migration to run in one long-running\n # transaction.\n atomic = False\n\n dependencies = [\n (\"sentry\", \"0215_fix_state\"),\n ]\n\n operations = [\n migrations.SeparateDatabaseAndState(\n database_operations=[\n migrations.RunSQL(\n \"\"\"\n CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" ON \"sentry_groupedmessage\" (\"project_id\", \"id\");\n \"\"\",\n reverse_sql=\"\"\"\n DROP INDEX CONCURRENTLY IF EXISTS sentry_groupedmessage_project_id_id_515aaa7e_uniq;\n \"\"\",\n hints={\"tables\": [\"sentry_groupedmessage\"]},\n ),\n migrations.RunSQL(\n \"\"\"\n ALTER TABLE \"sentry_groupedmessage\" ADD CONSTRAINT \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" UNIQUE USING INDEX \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\";\n \"\"\",\n reverse_sql=\"\"\"\n ALTER TABLE \"sentry_groupedmessage\" DROP CONSTRAINT IF EXISTS \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\";\n \"\"\",\n hints={\"tables\": [\"sentry_groupedmessage\"]},\n ),\n ],\n state_operations=[\n migrations.AlterUniqueTogether(\n name=\"group\",\n unique_together={(\"project\", \"id\"), (\"project\", \"short_id\")},\n ),\n ],\n ),\n migrations.RunSQL(\n sql=\"\"\"\n ALTER TABLE sentry_groupasignee REPLICA IDENTITY USING INDEX\n sentry_groupasignee_project_id_group_id_fbf4364e_uniq\n \"\"\",\n reverse_sql=\"\"\"\n ALTER TABLE sentry_groupasignee REPLICA IDENTITY DEFAULT\n \"\"\",\n hints={\"tables\": [\"sentry_groupasignee\"]},\n ),\n migrations.RunSQL(\n sql=\"\"\"\n ALTER TABLE sentry_groupedmessage REPLICA IDENTITY USING INDEX\n sentry_groupedmessage_project_id_id_515aaa7e_uniq\n \"\"\",\n reverse_sql=\"\"\"\n ALTER TABLE sentry_groupedmessage REPLICA IDENTITY DEFAULT\n \"\"\",\n hints={\"tables\": [\"sentry_groupedmessage\"]},\n ),\n ]\n", "path": "src/sentry/migrations/0216_cdc_setup_replication_index.py"}]} | 1,100 | 444 |
gh_patches_debug_14886 | rasdani/github-patches | git_diff | DDMAL__CantusDB-582 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
On Century Detail pages, sometimes unpublished sources are listed
Visiting century/3863 while not logged in, there's a link to source/672452. When I click on it, I get a 403 Access Denied error. We need to ensure that links to sources that are inaccessible to a user are never displayed.
This bug occurs on both staging and production.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/views/century.py`
Content:
```
1 from django.views.generic import DetailView
2 from main_app.models import Century
3
4 class CenturyDetailView(DetailView):
5 model = Century
6 context_object_name = "century"
7 template_name = "century_detail.html"
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/main_app/views/century.py b/django/cantusdb_project/main_app/views/century.py
--- a/django/cantusdb_project/main_app/views/century.py
+++ b/django/cantusdb_project/main_app/views/century.py
@@ -1,7 +1,20 @@
from django.views.generic import DetailView
-from main_app.models import Century
+from main_app.models import Century, Source
+from typing import Any
class CenturyDetailView(DetailView):
model = Century
context_object_name = "century"
template_name = "century_detail.html"
+
+ def get_context_data(self, **kwargs: Any) -> dict[str, Any]:
+ context = super().get_context_data(**kwargs)
+ century = self.get_object()
+ user = self.request.user
+ display_unpublished = user.is_authenticated
+ sources = Source.objects.filter(century=century)
+ if not display_unpublished:
+ sources = sources.filter(published=True)
+ sources=sources.only("title", "id")
+ context["sources"] = sources
+ return context
\ No newline at end of file
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/century.py b/django/cantusdb_project/main_app/views/century.py\n--- a/django/cantusdb_project/main_app/views/century.py\n+++ b/django/cantusdb_project/main_app/views/century.py\n@@ -1,7 +1,20 @@\n from django.views.generic import DetailView\n-from main_app.models import Century\n+from main_app.models import Century, Source\n+from typing import Any\n \n class CenturyDetailView(DetailView):\n model = Century\n context_object_name = \"century\"\n template_name = \"century_detail.html\"\n+\n+ def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n+ context = super().get_context_data(**kwargs)\n+ century = self.get_object()\n+ user = self.request.user\n+ display_unpublished = user.is_authenticated\n+ sources = Source.objects.filter(century=century)\n+ if not display_unpublished:\n+ sources = sources.filter(published=True)\n+ sources=sources.only(\"title\", \"id\")\n+ context[\"sources\"] = sources\n+ return context\n\\ No newline at end of file\n", "issue": "On Century Detail pages, sometimes unpublished sources are listed\nVisiting century/3863 while not logged in, there's a link to source/672452. When I click on it, I get a 403 Access Denied error. We need to ensure that links to sources that are inaccessible to a user are never displayed.\r\n\r\nThis bug occurs on both staging and production.\n", "before_files": [{"content": "from django.views.generic import DetailView\nfrom main_app.models import Century\n\nclass CenturyDetailView(DetailView):\n model = Century\n context_object_name = \"century\"\n template_name = \"century_detail.html\"\n", "path": "django/cantusdb_project/main_app/views/century.py"}], "after_files": [{"content": "from django.views.generic import DetailView\nfrom main_app.models import Century, Source\nfrom typing import Any\n\nclass CenturyDetailView(DetailView):\n model = Century\n context_object_name = \"century\"\n template_name = \"century_detail.html\"\n\n def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n context = super().get_context_data(**kwargs)\n century = self.get_object()\n user = self.request.user\n display_unpublished = user.is_authenticated\n sources = Source.objects.filter(century=century)\n if not display_unpublished:\n sources = sources.filter(published=True)\n sources=sources.only(\"title\", \"id\")\n context[\"sources\"] = sources\n return context", "path": "django/cantusdb_project/main_app/views/century.py"}]} | 404 | 265 |
gh_patches_debug_12512 | rasdani/github-patches | git_diff | PaddlePaddle__models-2482 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deeplabv3+ eval 报维度错误
在paddle1.5分支下,deeplabv3+ 的评估有如下报错:
Traceback (most recent call last):
File "./eval.py", line 131, in <module>
fetch_list=[pred, miou, out_wrong, out_correct])
File "/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/executor.py", line 650, in run
use_program_cache=use_program_cache)
File "/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/executor.py", line 748, in _run
exe.run(program.desc, scope, 0, True, True, fetch_var_name)
paddle.fluid.core_avx.EnforceNotMet: Invoke operator bilinear_interp error.
Python Callstacks:
File "/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/framework.py", line 1699, in append_op
attrs=kwargs.get("attrs", None))
File "/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/layer_helper.py", line 43, in append_op
return self.main_program.current_block().append_op(*args, **kwargs)
File "/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/layers/nn.py", line 7637, in image_resize
attrs=attrs)
File "/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/layers/nn.py", line 7742, in resize_bilinear
align_corners, align_mode)
File "./eval.py", line 84, in <module>
img = fluid.layers.resize_bilinear(img, image_shape)
C++ Callstacks:
Enforce failed. Expected dim_x.size() == 4, but received dim_x.size():3 != 4:4.
X's dimension must be 4 at [/ssd1/xiege/paddle_ce/Paddle/paddle/fluid/operators/interpolate_op.cc:40]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PaddleCV/deeplabv3+/eval.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4 import os
5 if 'FLAGS_fraction_of_gpu_memory_to_use' not in os.environ:
6 os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98'
7 os.environ['FLAGS_enable_parallel_graph'] = '1'
8
9 import paddle
10 import paddle.fluid as fluid
11 import numpy as np
12 import argparse
13 from reader import CityscapeDataset
14 import reader
15 import models
16 import sys
17 import utility
18
19 parser = argparse.ArgumentParser()
20 add_arg = lambda *args: utility.add_arguments(*args, argparser=parser)
21
22 # yapf: disable
23 add_arg('total_step', int, -1, "Number of the step to be evaluated, -1 for full evaluation.")
24 add_arg('init_weights_path', str, None, "Path of the weights to evaluate.")
25 add_arg('dataset_path', str, None, "Cityscape dataset path.")
26 add_arg('use_gpu', bool, True, "Whether use GPU or CPU.")
27 add_arg('num_classes', int, 19, "Number of classes.")
28 add_arg('use_py_reader', bool, True, "Use py_reader.")
29 add_arg('norm_type', str, 'bn', "Normalization type, should be 'bn' or 'gn'.")
30 #yapf: enable
31
32
33 def mean_iou(pred, label):
34 label = fluid.layers.elementwise_min(
35 label, fluid.layers.assign(np.array(
36 [num_classes], dtype=np.int32)))
37 label_ignore = (label == num_classes).astype('int32')
38 label_nignore = (label != num_classes).astype('int32')
39
40 pred = pred * label_nignore + label_ignore * num_classes
41
42 miou, wrong, correct = fluid.layers.mean_iou(pred, label, num_classes + 1)
43 return miou, wrong, correct
44
45
46 def load_model():
47 if os.path.isdir(args.init_weights_path):
48 fluid.io.load_params(
49 exe, dirname=args.init_weights_path, main_program=tp)
50 else:
51 fluid.io.load_params(
52 exe, dirname="", filename=args.init_weights_path, main_program=tp)
53
54
55 CityscapeDataset = reader.CityscapeDataset
56
57 args = parser.parse_args()
58
59 models.clean()
60 models.is_train = False
61 models.default_norm_type = args.norm_type
62 deeplabv3p = models.deeplabv3p
63
64 image_shape = [1025, 2049]
65 eval_shape = [1024, 2048]
66
67 sp = fluid.Program()
68 tp = fluid.Program()
69 batch_size = 1
70 reader.default_config['crop_size'] = -1
71 reader.default_config['shuffle'] = False
72 num_classes = args.num_classes
73
74 with fluid.program_guard(tp, sp):
75 if args.use_py_reader:
76 py_reader = fluid.layers.py_reader(capacity=64,
77 shapes=[[1, 3, 0, 0], [1] + eval_shape],
78 dtypes=['float32', 'int32'])
79 img, label = fluid.layers.read_file(py_reader)
80 else:
81 img = fluid.layers.data(name='img', shape=[3, 0, 0], dtype='float32')
82 label = fluid.layers.data(name='label', shape=eval_shape, dtype='int32')
83
84 img = fluid.layers.resize_bilinear(img, image_shape)
85 logit = deeplabv3p(img)
86 logit = fluid.layers.resize_bilinear(logit, eval_shape)
87 pred = fluid.layers.argmax(logit, axis=1).astype('int32')
88 miou, out_wrong, out_correct = mean_iou(pred, label)
89
90 tp = tp.clone(True)
91 fluid.memory_optimize(
92 tp,
93 print_log=False,
94 skip_opt_set=set([pred.name, miou, out_wrong, out_correct]),
95 level=1)
96
97 place = fluid.CPUPlace()
98 if args.use_gpu:
99 place = fluid.CUDAPlace(0)
100 exe = fluid.Executor(place)
101 exe.run(sp)
102
103 if args.init_weights_path:
104 print("load from:", args.init_weights_path)
105 load_model()
106
107 dataset = CityscapeDataset(args.dataset_path, 'val')
108 if args.total_step == -1:
109 total_step = len(dataset.label_files)
110 else:
111 total_step = args.total_step
112
113 batches = dataset.get_batch_generator(batch_size, total_step)
114 if args.use_py_reader:
115 py_reader.decorate_tensor_provider(lambda :[ (yield b[1],b[2]) for b in batches])
116 py_reader.start()
117
118 sum_iou = 0
119 all_correct = np.array([0], dtype=np.int64)
120 all_wrong = np.array([0], dtype=np.int64)
121
122 for i in range(total_step):
123 if not args.use_py_reader:
124 _, imgs, labels, names = next(batches)
125 result = exe.run(tp,
126 feed={'img': imgs,
127 'label': labels},
128 fetch_list=[pred, miou, out_wrong, out_correct])
129 else:
130 result = exe.run(tp,
131 fetch_list=[pred, miou, out_wrong, out_correct])
132
133 wrong = result[2][:-1] + all_wrong
134 right = result[3][:-1] + all_correct
135 all_wrong = wrong.copy()
136 all_correct = right.copy()
137 mp = (wrong + right) != 0
138 miou2 = np.mean((right[mp] * 1.0 / (right[mp] + wrong[mp])))
139 print('step: %s, mIoU: %s' % (i + 1, miou2))
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PaddleCV/deeplabv3+/eval.py b/PaddleCV/deeplabv3+/eval.py
--- a/PaddleCV/deeplabv3+/eval.py
+++ b/PaddleCV/deeplabv3+/eval.py
@@ -112,7 +112,7 @@
batches = dataset.get_batch_generator(batch_size, total_step)
if args.use_py_reader:
- py_reader.decorate_tensor_provider(lambda :[ (yield b[1],b[2]) for b in batches])
+ py_reader.decorate_tensor_provider(lambda :[ (yield b[0],b[1]) for b in batches])
py_reader.start()
sum_iou = 0
@@ -137,3 +137,5 @@
mp = (wrong + right) != 0
miou2 = np.mean((right[mp] * 1.0 / (right[mp] + wrong[mp])))
print('step: %s, mIoU: %s' % (i + 1, miou2))
+
+print('eval done!')
| {"golden_diff": "diff --git a/PaddleCV/deeplabv3+/eval.py b/PaddleCV/deeplabv3+/eval.py\n--- a/PaddleCV/deeplabv3+/eval.py\n+++ b/PaddleCV/deeplabv3+/eval.py\n@@ -112,7 +112,7 @@\n \n batches = dataset.get_batch_generator(batch_size, total_step)\n if args.use_py_reader:\n- py_reader.decorate_tensor_provider(lambda :[ (yield b[1],b[2]) for b in batches])\n+ py_reader.decorate_tensor_provider(lambda :[ (yield b[0],b[1]) for b in batches])\n py_reader.start()\n \n sum_iou = 0\n@@ -137,3 +137,5 @@\n mp = (wrong + right) != 0\n miou2 = np.mean((right[mp] * 1.0 / (right[mp] + wrong[mp])))\n print('step: %s, mIoU: %s' % (i + 1, miou2))\n+\n+print('eval done!')\n", "issue": "deeplabv3+ eval \u62a5\u7ef4\u5ea6\u9519\u8bef\n\u5728paddle1.5\u5206\u652f\u4e0b\uff0cdeeplabv3+ \u7684\u8bc4\u4f30\u6709\u5982\u4e0b\u62a5\u9519\uff1a\r\nTraceback (most recent call last):\r\n File \"./eval.py\", line 131, in <module>\r\n fetch_list=[pred, miou, out_wrong, out_correct])\r\n File \"/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/executor.py\", line 650, in run\r\n use_program_cache=use_program_cache)\r\n File \"/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/executor.py\", line 748, in _run\r\n exe.run(program.desc, scope, 0, True, True, fetch_var_name)\r\npaddle.fluid.core_avx.EnforceNotMet: Invoke operator bilinear_interp error.\r\nPython Callstacks:\r\n File \"/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/framework.py\", line 1699, in append_op\r\n attrs=kwargs.get(\"attrs\", None))\r\n File \"/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/layer_helper.py\", line 43, in append_op\r\n return self.main_program.current_block().append_op(*args, **kwargs)\r\n File \"/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/layers/nn.py\", line 7637, in image_resize\r\n attrs=attrs)\r\n File \"/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/layers/nn.py\", line 7742, in resize_bilinear\r\n align_corners, align_mode)\r\n File \"./eval.py\", line 84, in <module>\r\n img = fluid.layers.resize_bilinear(img, image_shape)\r\nC++ Callstacks:\r\nEnforce failed. Expected dim_x.size() == 4, but received dim_x.size():3 != 4:4.\r\nX's dimension must be 4 at [/ssd1/xiege/paddle_ce/Paddle/paddle/fluid/operators/interpolate_op.cc:40]\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nif 'FLAGS_fraction_of_gpu_memory_to_use' not in os.environ:\n os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98'\nos.environ['FLAGS_enable_parallel_graph'] = '1'\n\nimport paddle\nimport paddle.fluid as fluid\nimport numpy as np\nimport argparse\nfrom reader import CityscapeDataset\nimport reader\nimport models\nimport sys\nimport utility\n\nparser = argparse.ArgumentParser()\nadd_arg = lambda *args: utility.add_arguments(*args, argparser=parser)\n\n# yapf: disable\nadd_arg('total_step', int, -1, \"Number of the step to be evaluated, -1 for full evaluation.\")\nadd_arg('init_weights_path', str, None, \"Path of the weights to evaluate.\")\nadd_arg('dataset_path', str, None, \"Cityscape dataset path.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU or CPU.\")\nadd_arg('num_classes', int, 19, \"Number of classes.\")\nadd_arg('use_py_reader', bool, True, \"Use py_reader.\")\nadd_arg('norm_type', str, 'bn', \"Normalization type, should be 'bn' or 'gn'.\")\n#yapf: enable\n\n\ndef mean_iou(pred, label):\n label = fluid.layers.elementwise_min(\n label, fluid.layers.assign(np.array(\n [num_classes], dtype=np.int32)))\n label_ignore = (label == num_classes).astype('int32')\n label_nignore = (label != num_classes).astype('int32')\n\n pred = pred * label_nignore + label_ignore * num_classes\n\n miou, wrong, correct = fluid.layers.mean_iou(pred, label, num_classes + 1)\n return miou, wrong, correct\n\n\ndef load_model():\n if os.path.isdir(args.init_weights_path):\n fluid.io.load_params(\n exe, dirname=args.init_weights_path, main_program=tp)\n else:\n fluid.io.load_params(\n exe, dirname=\"\", filename=args.init_weights_path, main_program=tp)\n\n\nCityscapeDataset = reader.CityscapeDataset\n\nargs = parser.parse_args()\n\nmodels.clean()\nmodels.is_train = False\nmodels.default_norm_type = args.norm_type\ndeeplabv3p = models.deeplabv3p\n\nimage_shape = [1025, 2049]\neval_shape = [1024, 2048]\n\nsp = fluid.Program()\ntp = fluid.Program()\nbatch_size = 1\nreader.default_config['crop_size'] = -1\nreader.default_config['shuffle'] = False\nnum_classes = args.num_classes\n\nwith fluid.program_guard(tp, sp):\n if args.use_py_reader:\n py_reader = fluid.layers.py_reader(capacity=64,\n shapes=[[1, 3, 0, 0], [1] + eval_shape],\n dtypes=['float32', 'int32'])\n img, label = fluid.layers.read_file(py_reader)\n else:\n img = fluid.layers.data(name='img', shape=[3, 0, 0], dtype='float32')\n label = fluid.layers.data(name='label', shape=eval_shape, dtype='int32')\n\n img = fluid.layers.resize_bilinear(img, image_shape)\n logit = deeplabv3p(img)\n logit = fluid.layers.resize_bilinear(logit, eval_shape)\n pred = fluid.layers.argmax(logit, axis=1).astype('int32')\n miou, out_wrong, out_correct = mean_iou(pred, label)\n\ntp = tp.clone(True)\nfluid.memory_optimize(\n tp,\n print_log=False,\n skip_opt_set=set([pred.name, miou, out_wrong, out_correct]),\n level=1)\n\nplace = fluid.CPUPlace()\nif args.use_gpu:\n place = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\nexe.run(sp)\n\nif args.init_weights_path:\n print(\"load from:\", args.init_weights_path)\n load_model()\n\ndataset = CityscapeDataset(args.dataset_path, 'val')\nif args.total_step == -1:\n total_step = len(dataset.label_files)\nelse:\n total_step = args.total_step\n\nbatches = dataset.get_batch_generator(batch_size, total_step)\nif args.use_py_reader:\n py_reader.decorate_tensor_provider(lambda :[ (yield b[1],b[2]) for b in batches])\n py_reader.start()\n\nsum_iou = 0\nall_correct = np.array([0], dtype=np.int64)\nall_wrong = np.array([0], dtype=np.int64)\n\nfor i in range(total_step):\n if not args.use_py_reader:\n _, imgs, labels, names = next(batches)\n result = exe.run(tp,\n feed={'img': imgs,\n 'label': labels},\n fetch_list=[pred, miou, out_wrong, out_correct])\n else:\n result = exe.run(tp,\n fetch_list=[pred, miou, out_wrong, out_correct])\n\n wrong = result[2][:-1] + all_wrong\n right = result[3][:-1] + all_correct\n all_wrong = wrong.copy()\n all_correct = right.copy()\n mp = (wrong + right) != 0\n miou2 = np.mean((right[mp] * 1.0 / (right[mp] + wrong[mp])))\n print('step: %s, mIoU: %s' % (i + 1, miou2))\n", "path": "PaddleCV/deeplabv3+/eval.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nif 'FLAGS_fraction_of_gpu_memory_to_use' not in os.environ:\n os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98'\nos.environ['FLAGS_enable_parallel_graph'] = '1'\n\nimport paddle\nimport paddle.fluid as fluid\nimport numpy as np\nimport argparse\nfrom reader import CityscapeDataset\nimport reader\nimport models\nimport sys\nimport utility\n\nparser = argparse.ArgumentParser()\nadd_arg = lambda *args: utility.add_arguments(*args, argparser=parser)\n\n# yapf: disable\nadd_arg('total_step', int, -1, \"Number of the step to be evaluated, -1 for full evaluation.\")\nadd_arg('init_weights_path', str, None, \"Path of the weights to evaluate.\")\nadd_arg('dataset_path', str, None, \"Cityscape dataset path.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU or CPU.\")\nadd_arg('num_classes', int, 19, \"Number of classes.\")\nadd_arg('use_py_reader', bool, True, \"Use py_reader.\")\nadd_arg('norm_type', str, 'bn', \"Normalization type, should be 'bn' or 'gn'.\")\n#yapf: enable\n\n\ndef mean_iou(pred, label):\n label = fluid.layers.elementwise_min(\n label, fluid.layers.assign(np.array(\n [num_classes], dtype=np.int32)))\n label_ignore = (label == num_classes).astype('int32')\n label_nignore = (label != num_classes).astype('int32')\n\n pred = pred * label_nignore + label_ignore * num_classes\n\n miou, wrong, correct = fluid.layers.mean_iou(pred, label, num_classes + 1)\n return miou, wrong, correct\n\n\ndef load_model():\n if os.path.isdir(args.init_weights_path):\n fluid.io.load_params(\n exe, dirname=args.init_weights_path, main_program=tp)\n else:\n fluid.io.load_params(\n exe, dirname=\"\", filename=args.init_weights_path, main_program=tp)\n\n\nCityscapeDataset = reader.CityscapeDataset\n\nargs = parser.parse_args()\n\nmodels.clean()\nmodels.is_train = False\nmodels.default_norm_type = args.norm_type\ndeeplabv3p = models.deeplabv3p\n\nimage_shape = [1025, 2049]\neval_shape = [1024, 2048]\n\nsp = fluid.Program()\ntp = fluid.Program()\nbatch_size = 1\nreader.default_config['crop_size'] = -1\nreader.default_config['shuffle'] = False\nnum_classes = args.num_classes\n\nwith fluid.program_guard(tp, sp):\n if args.use_py_reader:\n py_reader = fluid.layers.py_reader(capacity=64,\n shapes=[[1, 3, 0, 0], [1] + eval_shape],\n dtypes=['float32', 'int32'])\n img, label = fluid.layers.read_file(py_reader)\n else:\n img = fluid.layers.data(name='img', shape=[3, 0, 0], dtype='float32')\n label = fluid.layers.data(name='label', shape=eval_shape, dtype='int32')\n\n img = fluid.layers.resize_bilinear(img, image_shape)\n logit = deeplabv3p(img)\n logit = fluid.layers.resize_bilinear(logit, eval_shape)\n pred = fluid.layers.argmax(logit, axis=1).astype('int32')\n miou, out_wrong, out_correct = mean_iou(pred, label)\n\ntp = tp.clone(True)\nfluid.memory_optimize(\n tp,\n print_log=False,\n skip_opt_set=set([pred.name, miou, out_wrong, out_correct]),\n level=1)\n\nplace = fluid.CPUPlace()\nif args.use_gpu:\n place = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\nexe.run(sp)\n\nif args.init_weights_path:\n print(\"load from:\", args.init_weights_path)\n load_model()\n\ndataset = CityscapeDataset(args.dataset_path, 'val')\nif args.total_step == -1:\n total_step = len(dataset.label_files)\nelse:\n total_step = args.total_step\n\nbatches = dataset.get_batch_generator(batch_size, total_step)\nif args.use_py_reader:\n py_reader.decorate_tensor_provider(lambda :[ (yield b[0],b[1]) for b in batches])\n py_reader.start()\n\nsum_iou = 0\nall_correct = np.array([0], dtype=np.int64)\nall_wrong = np.array([0], dtype=np.int64)\n\nfor i in range(total_step):\n if not args.use_py_reader:\n _, imgs, labels, names = next(batches)\n result = exe.run(tp,\n feed={'img': imgs,\n 'label': labels},\n fetch_list=[pred, miou, out_wrong, out_correct])\n else:\n result = exe.run(tp,\n fetch_list=[pred, miou, out_wrong, out_correct])\n\n wrong = result[2][:-1] + all_wrong\n right = result[3][:-1] + all_correct\n all_wrong = wrong.copy()\n all_correct = right.copy()\n mp = (wrong + right) != 0\n miou2 = np.mean((right[mp] * 1.0 / (right[mp] + wrong[mp])))\n print('step: %s, mIoU: %s' % (i + 1, miou2))\n\nprint('eval done!')\n", "path": "PaddleCV/deeplabv3+/eval.py"}]} | 2,313 | 243 |
gh_patches_debug_17941 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-1213 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Default InputTransform for video classification provide no normalization
## 🐛 Bug
Normalization does not work.
### To Reproduce
```bash
tensor([[[[[157., 157., 158., ..., 168., 171., 173.],
[157., 157., 158., ..., 192., 189., 187.],
[158., 158., 159., ..., 201., 214., 217.],
...,
[ 81., 81., 82., ..., 90., 91., 93.],
[ 81., 81., 82., ..., 88., 87., 87.],
[ 81., 81., 82., ..., 87., 86., 86.]],
[[158., 157., 154., ..., 177., 177., 177.],
[159., 158., 156., ..., 177., 177., 178.],
[159., 158., 157., ..., 178., 178., 180.],
...,
[ 83., 83., 83., ..., 114., 112., 107.],
[ 83., 83., 83., ..., 113., 98., 93.],
[ 83., 83., 83., ..., 232., 211., 207.]],
[[160., 160., 159., ..., 112., 102., 86.],
[166., 166., 166., ..., 116., 103., 86.],
[175., 176., 175., ..., 114., 105., 86.],
...,
[ 81., 81., 83., ..., 93., 92., 93.],
[ 82., 83., 83., ..., 108., 104., 103.],
[ 82., 82., 82., ..., 99., 97., 97.]],
...,
```
#### Code sample
```python
import os
from argparse import ArgumentParser
from torch.utils.data.sampler import RandomSampler
import flash
from flash.core.finetuning import NoFreeze
from flash.core.data.utils import download_data
from flash.video import VideoClassificationData, VideoClassifier
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--backbone', type=str, default="x3d_xs")
parser.add_argument('--download', type=bool, default=True)
parser.add_argument('--train_folder', type=str, default=os.path.join(os.getcwd(),
"./data/kinetics/train"))
parser.add_argument('--val_folder', type=str, default=os.path.join(os.getcwd(),
"./data/kinetics/val"))
parser.add_argument('--predict_folder', type=str, default=os.path.join(os.getcwd(),
"./data/kinetics/predict"))
parser.add_argument('--max_epochs', type=int, default=1)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--gpus', type=int, default=None)
parser.add_argument('--fast_dev_run', type=int, default=False)
args = parser.parse_args()
if args.download:
# Dataset Credit:Download a video clip dataset.
# Find more datasets at https://pytorchvideo.readthedocs.io/en/latest/data.html
download_data("https://pl-flash-data.s3.amazonaws.com/kinetics.zip",
os.path.join(os.getcwd(), "data/"))
datamodule = VideoClassificationData.from_folders(
train_folder=args.train_folder,
val_folder=args.val_folder,
predict_folder=args.predict_folder,
batch_size=8,
clip_sampler="uniform",
clip_duration=2,
video_sampler=RandomSampler,
decode_audio=False,
num_workers=2,
)
for batch in datamodule.train_dataloader():
print(batch['video'])
break
model = VideoClassifier(backbone=args.backbone, num_classes=datamodule.num_classes, pretrained=False)
trainer = flash.Trainer(max_epochs=args.max_epochs, gpus=args.gpus, strategy='ddp', fast_dev_run=args.fast_dev_run)
trainer.finetune(model, datamodule=datamodule, strategy=NoFreeze())
```
### Expected behavior
The input values to the model should be in range [0-1].
### Environment
- OS (e.g., Linux): Linux
- Python version: 3.8
- PyTorch/Lightning/Flash Version (e.g., 1.10/1.5/0.7):
- GPU models and configuration:
- Any other relevant information:
### Additional context
By default [VideoClassInputTransform](https://github.com/PyTorchLightning/lightning-flash/blob/master/flash/video/classification/input_transform.py#L35) performs necessary preprocessing step. One step that is missing is dividing by 255. i.e. adding `Lambda(lambda x: x/255.)` function.
I can create a PR with a quick fix.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/video/classification/input_transform.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from dataclasses import dataclass
15 from typing import Callable
16
17 import torch
18
19 from flash.core.data.io.input_transform import InputTransform
20 from flash.core.data.transforms import ApplyToKeys
21 from flash.core.utilities.imports import _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, requires
22
23 if _KORNIA_AVAILABLE:
24 import kornia.augmentation as K
25
26 if _PYTORCHVIDEO_AVAILABLE:
27 from pytorchvideo.transforms import UniformTemporalSubsample
28 from torchvision.transforms import CenterCrop, Compose, RandomCrop
29 else:
30 ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None
31
32
33 @requires("video")
34 @dataclass
35 class VideoClassificationInputTransform(InputTransform):
36
37 image_size: int = 244
38 temporal_sub_sample: int = 8
39 mean: torch.Tensor = torch.tensor([0.45, 0.45, 0.45])
40 std: torch.Tensor = torch.tensor([0.225, 0.225, 0.225])
41 data_format: str = "BCTHW"
42 same_on_frame: bool = False
43
44 def per_sample_transform(self) -> Callable:
45 if self.training:
46 per_sample_transform = [RandomCrop(self.image_size, pad_if_needed=True)]
47 else:
48 per_sample_transform = [CenterCrop(self.image_size)]
49
50 return ApplyToKeys(
51 "video", Compose([UniformTemporalSubsample(self.temporal_sub_sample)] + per_sample_transform)
52 )
53
54 def per_batch_transform_on_device(self) -> Callable:
55 return ApplyToKeys(
56 "video",
57 K.VideoSequential(
58 K.Normalize(self.mean, self.std),
59 data_format=self.data_format,
60 same_on_frame=self.same_on_frame,
61 ),
62 )
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flash/video/classification/input_transform.py b/flash/video/classification/input_transform.py
--- a/flash/video/classification/input_transform.py
+++ b/flash/video/classification/input_transform.py
@@ -30,6 +30,10 @@
ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None
+def normalize(x: torch.Tensor) -> torch.Tensor:
+ return x / 255.0
+
+
@requires("video")
@dataclass
class VideoClassificationInputTransform(InputTransform):
@@ -48,7 +52,8 @@
per_sample_transform = [CenterCrop(self.image_size)]
return ApplyToKeys(
- "video", Compose([UniformTemporalSubsample(self.temporal_sub_sample)] + per_sample_transform)
+ "video",
+ Compose([UniformTemporalSubsample(self.temporal_sub_sample), normalize] + per_sample_transform),
)
def per_batch_transform_on_device(self) -> Callable:
| {"golden_diff": "diff --git a/flash/video/classification/input_transform.py b/flash/video/classification/input_transform.py\n--- a/flash/video/classification/input_transform.py\n+++ b/flash/video/classification/input_transform.py\n@@ -30,6 +30,10 @@\n ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None\n \n \n+def normalize(x: torch.Tensor) -> torch.Tensor:\n+ return x / 255.0\n+\n+\n @requires(\"video\")\n @dataclass\n class VideoClassificationInputTransform(InputTransform):\n@@ -48,7 +52,8 @@\n per_sample_transform = [CenterCrop(self.image_size)]\n \n return ApplyToKeys(\n- \"video\", Compose([UniformTemporalSubsample(self.temporal_sub_sample)] + per_sample_transform)\n+ \"video\",\n+ Compose([UniformTemporalSubsample(self.temporal_sub_sample), normalize] + per_sample_transform),\n )\n \n def per_batch_transform_on_device(self) -> Callable:\n", "issue": "Default InputTransform for video classification provide no normalization\n## \ud83d\udc1b Bug\r\n\r\nNormalization does not work.\r\n\r\n### To Reproduce\r\n\r\n```bash\r\ntensor([[[[[157., 157., 158., ..., 168., 171., 173.],\r\n [157., 157., 158., ..., 192., 189., 187.],\r\n [158., 158., 159., ..., 201., 214., 217.],\r\n ...,\r\n [ 81., 81., 82., ..., 90., 91., 93.],\r\n [ 81., 81., 82., ..., 88., 87., 87.],\r\n [ 81., 81., 82., ..., 87., 86., 86.]],\r\n\r\n [[158., 157., 154., ..., 177., 177., 177.],\r\n [159., 158., 156., ..., 177., 177., 178.],\r\n [159., 158., 157., ..., 178., 178., 180.],\r\n ...,\r\n [ 83., 83., 83., ..., 114., 112., 107.],\r\n [ 83., 83., 83., ..., 113., 98., 93.],\r\n [ 83., 83., 83., ..., 232., 211., 207.]],\r\n\r\n [[160., 160., 159., ..., 112., 102., 86.],\r\n [166., 166., 166., ..., 116., 103., 86.],\r\n [175., 176., 175., ..., 114., 105., 86.],\r\n ...,\r\n [ 81., 81., 83., ..., 93., 92., 93.],\r\n [ 82., 83., 83., ..., 108., 104., 103.],\r\n [ 82., 82., 82., ..., 99., 97., 97.]],\r\n\r\n ...,\r\n\r\n```\r\n\r\n\r\n#### Code sample\r\n\r\n```python\r\nimport os\r\nfrom argparse import ArgumentParser\r\n\r\nfrom torch.utils.data.sampler import RandomSampler\r\n\r\nimport flash\r\nfrom flash.core.finetuning import NoFreeze\r\nfrom flash.core.data.utils import download_data\r\nfrom flash.video import VideoClassificationData, VideoClassifier\r\n\r\nif __name__ == '__main__':\r\n parser = ArgumentParser()\r\n parser.add_argument('--seed', type=int, default=1234)\r\n parser.add_argument('--backbone', type=str, default=\"x3d_xs\")\r\n parser.add_argument('--download', type=bool, default=True)\r\n parser.add_argument('--train_folder', type=str, default=os.path.join(os.getcwd(),\r\n \"./data/kinetics/train\"))\r\n parser.add_argument('--val_folder', type=str, default=os.path.join(os.getcwd(),\r\n \"./data/kinetics/val\"))\r\n parser.add_argument('--predict_folder', type=str, default=os.path.join(os.getcwd(),\r\n \"./data/kinetics/predict\"))\r\n parser.add_argument('--max_epochs', type=int, default=1)\r\n parser.add_argument('--learning_rate', type=float, default=1e-3)\r\n parser.add_argument('--gpus', type=int, default=None)\r\n parser.add_argument('--fast_dev_run', type=int, default=False)\r\n args = parser.parse_args()\r\n\r\n\r\n if args.download:\r\n # Dataset Credit:Download a video clip dataset.\r\n # Find more datasets at https://pytorchvideo.readthedocs.io/en/latest/data.html\r\n download_data(\"https://pl-flash-data.s3.amazonaws.com/kinetics.zip\",\r\n os.path.join(os.getcwd(), \"data/\"))\r\n\r\n datamodule = VideoClassificationData.from_folders(\r\n train_folder=args.train_folder,\r\n val_folder=args.val_folder,\r\n predict_folder=args.predict_folder,\r\n batch_size=8,\r\n clip_sampler=\"uniform\",\r\n clip_duration=2,\r\n video_sampler=RandomSampler,\r\n decode_audio=False,\r\n num_workers=2,\r\n )\r\n\r\n for batch in datamodule.train_dataloader():\r\n print(batch['video'])\r\n break\r\n \r\n model = VideoClassifier(backbone=args.backbone, num_classes=datamodule.num_classes, pretrained=False)\r\n\r\n trainer = flash.Trainer(max_epochs=args.max_epochs, gpus=args.gpus, strategy='ddp', fast_dev_run=args.fast_dev_run)\r\n trainer.finetune(model, datamodule=datamodule, strategy=NoFreeze())\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe input values to the model should be in range [0-1].\r\n\r\n### Environment\r\n \r\n - OS (e.g., Linux): Linux\r\n - Python version: 3.8\r\n - PyTorch/Lightning/Flash Version (e.g., 1.10/1.5/0.7): \r\n - GPU models and configuration: \r\n - Any other relevant information:\r\n\r\n### Additional context\r\n\r\nBy default [VideoClassInputTransform](https://github.com/PyTorchLightning/lightning-flash/blob/master/flash/video/classification/input_transform.py#L35) performs necessary preprocessing step. One step that is missing is dividing by 255. i.e. adding `Lambda(lambda x: x/255.)` function.\r\n\r\nI can create a PR with a quick fix.\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom dataclasses import dataclass\nfrom typing import Callable\n\nimport torch\n\nfrom flash.core.data.io.input_transform import InputTransform\nfrom flash.core.data.transforms import ApplyToKeys\nfrom flash.core.utilities.imports import _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, requires\n\nif _KORNIA_AVAILABLE:\n import kornia.augmentation as K\n\nif _PYTORCHVIDEO_AVAILABLE:\n from pytorchvideo.transforms import UniformTemporalSubsample\n from torchvision.transforms import CenterCrop, Compose, RandomCrop\nelse:\n ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None\n\n\n@requires(\"video\")\n@dataclass\nclass VideoClassificationInputTransform(InputTransform):\n\n image_size: int = 244\n temporal_sub_sample: int = 8\n mean: torch.Tensor = torch.tensor([0.45, 0.45, 0.45])\n std: torch.Tensor = torch.tensor([0.225, 0.225, 0.225])\n data_format: str = \"BCTHW\"\n same_on_frame: bool = False\n\n def per_sample_transform(self) -> Callable:\n if self.training:\n per_sample_transform = [RandomCrop(self.image_size, pad_if_needed=True)]\n else:\n per_sample_transform = [CenterCrop(self.image_size)]\n\n return ApplyToKeys(\n \"video\", Compose([UniformTemporalSubsample(self.temporal_sub_sample)] + per_sample_transform)\n )\n\n def per_batch_transform_on_device(self) -> Callable:\n return ApplyToKeys(\n \"video\",\n K.VideoSequential(\n K.Normalize(self.mean, self.std),\n data_format=self.data_format,\n same_on_frame=self.same_on_frame,\n ),\n )\n", "path": "flash/video/classification/input_transform.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom dataclasses import dataclass\nfrom typing import Callable\n\nimport torch\n\nfrom flash.core.data.io.input_transform import InputTransform\nfrom flash.core.data.transforms import ApplyToKeys\nfrom flash.core.utilities.imports import _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, requires\n\nif _KORNIA_AVAILABLE:\n import kornia.augmentation as K\n\nif _PYTORCHVIDEO_AVAILABLE:\n from pytorchvideo.transforms import UniformTemporalSubsample\n from torchvision.transforms import CenterCrop, Compose, RandomCrop\nelse:\n ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None\n\n\ndef normalize(x: torch.Tensor) -> torch.Tensor:\n return x / 255.0\n\n\n@requires(\"video\")\n@dataclass\nclass VideoClassificationInputTransform(InputTransform):\n\n image_size: int = 244\n temporal_sub_sample: int = 8\n mean: torch.Tensor = torch.tensor([0.45, 0.45, 0.45])\n std: torch.Tensor = torch.tensor([0.225, 0.225, 0.225])\n data_format: str = \"BCTHW\"\n same_on_frame: bool = False\n\n def per_sample_transform(self) -> Callable:\n if self.training:\n per_sample_transform = [RandomCrop(self.image_size, pad_if_needed=True)]\n else:\n per_sample_transform = [CenterCrop(self.image_size)]\n\n return ApplyToKeys(\n \"video\",\n Compose([UniformTemporalSubsample(self.temporal_sub_sample), normalize] + per_sample_transform),\n )\n\n def per_batch_transform_on_device(self) -> Callable:\n return ApplyToKeys(\n \"video\",\n K.VideoSequential(\n K.Normalize(self.mean, self.std),\n data_format=self.data_format,\n same_on_frame=self.same_on_frame,\n ),\n )\n", "path": "flash/video/classification/input_transform.py"}]} | 2,239 | 223 |
gh_patches_debug_36629 | rasdani/github-patches | git_diff | svthalia__concrexit-3382 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Facedetection admin resubmit action
Sometimes the facedetection lambda can fail randomly. Photos are resubmitted nightly if that happens, but it may be nice to have an alternative for that to do it sooner, without SSHing into the server.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/facedetection/admin.py`
Content:
```
1 from django.contrib import admin
2 from django.db.models.query import Prefetch
3 from django.urls import reverse
4 from django.utils.html import format_html
5 from django.utils.safestring import mark_safe
6
7 from .models import (
8 FaceDetectionPhoto,
9 PhotoFaceEncoding,
10 ReferenceFace,
11 ReferenceFaceEncoding,
12 )
13
14
15 class ReferenceFaceEncodingInline(admin.TabularInline):
16 model = ReferenceFaceEncoding
17 readonly_fields = ["num_matches"]
18 fields = ["num_matches"]
19 can_delete = False
20 extra = 0
21
22 def has_add_permission(self, request, obj=None):
23 return False # Encodings should not be created manually.
24
25 def get_queryset(self, request):
26 return super().get_queryset(request).only("reference")
27
28
29 @admin.register(ReferenceFace)
30 class ReferenceFaceAdmin(admin.ModelAdmin):
31 list_display = [
32 "user",
33 "status",
34 "created_at",
35 "marked_for_deletion_at",
36 ]
37
38 search_fields = [
39 "user__username",
40 "user__first_name",
41 "user__last_name",
42 ]
43
44 list_filter = ["status", "marked_for_deletion_at"]
45 inlines = [ReferenceFaceEncodingInline]
46
47 def get_readonly_fields(self, request, obj=None):
48 if obj is None:
49 return ["created_at", "submitted_at", "status"]
50 return ["file", "user", "created_at", "submitted_at", "status"]
51
52
53 class PhotoFaceEncodingInline(admin.TabularInline):
54 model = PhotoFaceEncoding
55 readonly_fields = ["view_matches"]
56 fields = ["view_matches"]
57 can_delete = False
58 extra = 0
59
60 @admin.display(description="Matches")
61 def view_matches(self, obj):
62 reference_faces = [match.reference for match in obj.matches.all()]
63 if not reference_faces:
64 return "-"
65
66 links = [
67 format_html(
68 '<a href="{url}">{text}</a>',
69 url=reverse(
70 "admin:facedetection_referenceface_change",
71 kwargs={"object_id": rf.pk},
72 ),
73 text=str(rf),
74 )
75 for rf in reference_faces
76 ]
77 return mark_safe(", ".join(links))
78
79 def has_add_permission(self, request, obj=None):
80 return False # Encodings should not be created manually.
81
82 def get_queryset(self, request):
83 return (
84 super()
85 .get_queryset(request)
86 .only("photo") # Don't select the 128 encoding fields.
87 .prefetch_related(
88 "photo__photo__album",
89 Prefetch(
90 "matches",
91 queryset=ReferenceFaceEncoding.objects.select_related(
92 "reference", "reference__user"
93 ).only("reference"),
94 ),
95 )
96 )
97
98
99 @admin.register(FaceDetectionPhoto)
100 class FaceDetectionPhotoAdmin(admin.ModelAdmin):
101 list_display = [
102 "__str__",
103 "status",
104 "submitted_at",
105 "num_faces",
106 ]
107
108 readonly_fields = [
109 "photo",
110 "submitted_at",
111 "status",
112 ]
113
114 search_fields = [
115 "photo__album__title",
116 "photo__album__date",
117 "photo__file",
118 ]
119
120 list_filter = ["status", "submitted_at"]
121 inlines = [PhotoFaceEncodingInline]
122
123 def get_queryset(self, request):
124 return (
125 super()
126 .get_queryset(request)
127 .select_related("photo")
128 .prefetch_related("photo__album")
129 .select_properties("num_faces")
130 )
131
132 def has_add_permission(self, request):
133 return False
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/facedetection/admin.py b/website/facedetection/admin.py
--- a/website/facedetection/admin.py
+++ b/website/facedetection/admin.py
@@ -1,9 +1,11 @@
-from django.contrib import admin
+from django.contrib import admin, messages
from django.db.models.query import Prefetch
from django.urls import reverse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
+from facedetection.services import trigger_facedetection_lambda
+
from .models import (
FaceDetectionPhoto,
PhotoFaceEncoding,
@@ -44,11 +46,25 @@
list_filter = ["status", "marked_for_deletion_at"]
inlines = [ReferenceFaceEncodingInline]
+ actions = ["resubmit_reference_faces"]
+
def get_readonly_fields(self, request, obj=None):
if obj is None:
return ["created_at", "submitted_at", "status"]
return ["file", "user", "created_at", "submitted_at", "status"]
+ @admin.action(description="Resubmit reference faces for analysis.")
+ def resubmit_reference_faces(self, request, queryset) -> list[ReferenceFace]:
+ querylist = list(
+ queryset.filter(
+ status=FaceDetectionPhoto.Status.PROCESSING,
+ )
+ )
+ if querylist:
+ trigger_facedetection_lambda(querylist)
+ messages.success(request, "Resubmit successful.")
+ return querylist
+
class PhotoFaceEncodingInline(admin.TabularInline):
model = PhotoFaceEncoding
@@ -120,6 +136,8 @@
list_filter = ["status", "submitted_at"]
inlines = [PhotoFaceEncodingInline]
+ actions = ["resubmit_face_detection_photos"]
+
def get_queryset(self, request):
return (
super()
@@ -131,3 +149,17 @@
def has_add_permission(self, request):
return False
+
+ @admin.action(description="Resubmits face detection photos for analysis.")
+ def resubmit_face_detection_photos(
+ self, request, queryset
+ ) -> list[FaceDetectionPhoto]:
+ querylist = list(
+ queryset.filter(
+ status=FaceDetectionPhoto.Status.PROCESSING,
+ )
+ )
+ if querylist:
+ trigger_facedetection_lambda(querylist)
+ messages.success(request, "Resubmit successful.")
+ return querylist
| {"golden_diff": "diff --git a/website/facedetection/admin.py b/website/facedetection/admin.py\n--- a/website/facedetection/admin.py\n+++ b/website/facedetection/admin.py\n@@ -1,9 +1,11 @@\n-from django.contrib import admin\n+from django.contrib import admin, messages\n from django.db.models.query import Prefetch\n from django.urls import reverse\n from django.utils.html import format_html\n from django.utils.safestring import mark_safe\n \n+from facedetection.services import trigger_facedetection_lambda\n+\n from .models import (\n FaceDetectionPhoto,\n PhotoFaceEncoding,\n@@ -44,11 +46,25 @@\n list_filter = [\"status\", \"marked_for_deletion_at\"]\n inlines = [ReferenceFaceEncodingInline]\n \n+ actions = [\"resubmit_reference_faces\"]\n+\n def get_readonly_fields(self, request, obj=None):\n if obj is None:\n return [\"created_at\", \"submitted_at\", \"status\"]\n return [\"file\", \"user\", \"created_at\", \"submitted_at\", \"status\"]\n \n+ @admin.action(description=\"Resubmit reference faces for analysis.\")\n+ def resubmit_reference_faces(self, request, queryset) -> list[ReferenceFace]:\n+ querylist = list(\n+ queryset.filter(\n+ status=FaceDetectionPhoto.Status.PROCESSING,\n+ )\n+ )\n+ if querylist:\n+ trigger_facedetection_lambda(querylist)\n+ messages.success(request, \"Resubmit successful.\")\n+ return querylist\n+\n \n class PhotoFaceEncodingInline(admin.TabularInline):\n model = PhotoFaceEncoding\n@@ -120,6 +136,8 @@\n list_filter = [\"status\", \"submitted_at\"]\n inlines = [PhotoFaceEncodingInline]\n \n+ actions = [\"resubmit_face_detection_photos\"]\n+\n def get_queryset(self, request):\n return (\n super()\n@@ -131,3 +149,17 @@\n \n def has_add_permission(self, request):\n return False\n+\n+ @admin.action(description=\"Resubmits face detection photos for analysis.\")\n+ def resubmit_face_detection_photos(\n+ self, request, queryset\n+ ) -> list[FaceDetectionPhoto]:\n+ querylist = list(\n+ queryset.filter(\n+ status=FaceDetectionPhoto.Status.PROCESSING,\n+ )\n+ )\n+ if querylist:\n+ trigger_facedetection_lambda(querylist)\n+ messages.success(request, \"Resubmit successful.\")\n+ return querylist\n", "issue": "Facedetection admin resubmit action\nSometimes the facedetection lambda can fail randomly. Photos are resubmitted nightly if that happens, but it may be nice to have an alternative for that to do it sooner, without SSHing into the server.\r\n\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.db.models.query import Prefetch\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom .models import (\n FaceDetectionPhoto,\n PhotoFaceEncoding,\n ReferenceFace,\n ReferenceFaceEncoding,\n)\n\n\nclass ReferenceFaceEncodingInline(admin.TabularInline):\n model = ReferenceFaceEncoding\n readonly_fields = [\"num_matches\"]\n fields = [\"num_matches\"]\n can_delete = False\n extra = 0\n\n def has_add_permission(self, request, obj=None):\n return False # Encodings should not be created manually.\n\n def get_queryset(self, request):\n return super().get_queryset(request).only(\"reference\")\n\n\[email protected](ReferenceFace)\nclass ReferenceFaceAdmin(admin.ModelAdmin):\n list_display = [\n \"user\",\n \"status\",\n \"created_at\",\n \"marked_for_deletion_at\",\n ]\n\n search_fields = [\n \"user__username\",\n \"user__first_name\",\n \"user__last_name\",\n ]\n\n list_filter = [\"status\", \"marked_for_deletion_at\"]\n inlines = [ReferenceFaceEncodingInline]\n\n def get_readonly_fields(self, request, obj=None):\n if obj is None:\n return [\"created_at\", \"submitted_at\", \"status\"]\n return [\"file\", \"user\", \"created_at\", \"submitted_at\", \"status\"]\n\n\nclass PhotoFaceEncodingInline(admin.TabularInline):\n model = PhotoFaceEncoding\n readonly_fields = [\"view_matches\"]\n fields = [\"view_matches\"]\n can_delete = False\n extra = 0\n\n @admin.display(description=\"Matches\")\n def view_matches(self, obj):\n reference_faces = [match.reference for match in obj.matches.all()]\n if not reference_faces:\n return \"-\"\n\n links = [\n format_html(\n '<a href=\"{url}\">{text}</a>',\n url=reverse(\n \"admin:facedetection_referenceface_change\",\n kwargs={\"object_id\": rf.pk},\n ),\n text=str(rf),\n )\n for rf in reference_faces\n ]\n return mark_safe(\", \".join(links))\n\n def has_add_permission(self, request, obj=None):\n return False # Encodings should not be created manually.\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .only(\"photo\") # Don't select the 128 encoding fields.\n .prefetch_related(\n \"photo__photo__album\",\n Prefetch(\n \"matches\",\n queryset=ReferenceFaceEncoding.objects.select_related(\n \"reference\", \"reference__user\"\n ).only(\"reference\"),\n ),\n )\n )\n\n\[email protected](FaceDetectionPhoto)\nclass FaceDetectionPhotoAdmin(admin.ModelAdmin):\n list_display = [\n \"__str__\",\n \"status\",\n \"submitted_at\",\n \"num_faces\",\n ]\n\n readonly_fields = [\n \"photo\",\n \"submitted_at\",\n \"status\",\n ]\n\n search_fields = [\n \"photo__album__title\",\n \"photo__album__date\",\n \"photo__file\",\n ]\n\n list_filter = [\"status\", \"submitted_at\"]\n inlines = [PhotoFaceEncodingInline]\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .select_related(\"photo\")\n .prefetch_related(\"photo__album\")\n .select_properties(\"num_faces\")\n )\n\n def has_add_permission(self, request):\n return False\n", "path": "website/facedetection/admin.py"}], "after_files": [{"content": "from django.contrib import admin, messages\nfrom django.db.models.query import Prefetch\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom facedetection.services import trigger_facedetection_lambda\n\nfrom .models import (\n FaceDetectionPhoto,\n PhotoFaceEncoding,\n ReferenceFace,\n ReferenceFaceEncoding,\n)\n\n\nclass ReferenceFaceEncodingInline(admin.TabularInline):\n model = ReferenceFaceEncoding\n readonly_fields = [\"num_matches\"]\n fields = [\"num_matches\"]\n can_delete = False\n extra = 0\n\n def has_add_permission(self, request, obj=None):\n return False # Encodings should not be created manually.\n\n def get_queryset(self, request):\n return super().get_queryset(request).only(\"reference\")\n\n\[email protected](ReferenceFace)\nclass ReferenceFaceAdmin(admin.ModelAdmin):\n list_display = [\n \"user\",\n \"status\",\n \"created_at\",\n \"marked_for_deletion_at\",\n ]\n\n search_fields = [\n \"user__username\",\n \"user__first_name\",\n \"user__last_name\",\n ]\n\n list_filter = [\"status\", \"marked_for_deletion_at\"]\n inlines = [ReferenceFaceEncodingInline]\n\n actions = [\"resubmit_reference_faces\"]\n\n def get_readonly_fields(self, request, obj=None):\n if obj is None:\n return [\"created_at\", \"submitted_at\", \"status\"]\n return [\"file\", \"user\", \"created_at\", \"submitted_at\", \"status\"]\n\n @admin.action(description=\"Resubmit reference faces for analysis.\")\n def resubmit_reference_faces(self, request, queryset) -> list[ReferenceFace]:\n querylist = list(\n queryset.filter(\n status=FaceDetectionPhoto.Status.PROCESSING,\n )\n )\n if querylist:\n trigger_facedetection_lambda(querylist)\n messages.success(request, \"Resubmit successful.\")\n return querylist\n\n\nclass PhotoFaceEncodingInline(admin.TabularInline):\n model = PhotoFaceEncoding\n readonly_fields = [\"view_matches\"]\n fields = [\"view_matches\"]\n can_delete = False\n extra = 0\n\n @admin.display(description=\"Matches\")\n def view_matches(self, obj):\n reference_faces = [match.reference for match in obj.matches.all()]\n if not reference_faces:\n return \"-\"\n\n links = [\n format_html(\n '<a href=\"{url}\">{text}</a>',\n url=reverse(\n \"admin:facedetection_referenceface_change\",\n kwargs={\"object_id\": rf.pk},\n ),\n text=str(rf),\n )\n for rf in reference_faces\n ]\n return mark_safe(\", \".join(links))\n\n def has_add_permission(self, request, obj=None):\n return False # Encodings should not be created manually.\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .only(\"photo\") # Don't select the 128 encoding fields.\n .prefetch_related(\n \"photo__photo__album\",\n Prefetch(\n \"matches\",\n queryset=ReferenceFaceEncoding.objects.select_related(\n \"reference\", \"reference__user\"\n ).only(\"reference\"),\n ),\n )\n )\n\n\[email protected](FaceDetectionPhoto)\nclass FaceDetectionPhotoAdmin(admin.ModelAdmin):\n list_display = [\n \"__str__\",\n \"status\",\n \"submitted_at\",\n \"num_faces\",\n ]\n\n readonly_fields = [\n \"photo\",\n \"submitted_at\",\n \"status\",\n ]\n\n search_fields = [\n \"photo__album__title\",\n \"photo__album__date\",\n \"photo__file\",\n ]\n\n list_filter = [\"status\", \"submitted_at\"]\n inlines = [PhotoFaceEncodingInline]\n\n actions = [\"resubmit_face_detection_photos\"]\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .select_related(\"photo\")\n .prefetch_related(\"photo__album\")\n .select_properties(\"num_faces\")\n )\n\n def has_add_permission(self, request):\n return False\n\n @admin.action(description=\"Resubmits face detection photos for analysis.\")\n def resubmit_face_detection_photos(\n self, request, queryset\n ) -> list[FaceDetectionPhoto]:\n querylist = list(\n queryset.filter(\n status=FaceDetectionPhoto.Status.PROCESSING,\n )\n )\n if querylist:\n trigger_facedetection_lambda(querylist)\n messages.success(request, \"Resubmit successful.\")\n return querylist\n", "path": "website/facedetection/admin.py"}]} | 1,371 | 549 |
gh_patches_debug_8484 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1789 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
recursive submodule support for `language: golang`
I added this hook in `pre-commit-hooks.yaml` in `https://github.com/google/go-jsonnet`, and then when I try to use this hook as:
```
repos:
- repo: https://github.com/google/go-jsonnet
rev: 4a3144a417b7eb9b1f7e56741a9e72f3155de3fa
hooks:
- id: jsonnet-format
```
then I see following error.
```
Traceback (most recent call last):
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/error_handler.py", line 65, in error_handler
yield
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/main.py", line 378, in main
return run(args.config, store, args)
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/commands/run.py", line 403, in run
install_hook_envs(hooks, store)
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 224, in install_hook_envs
_hook_install(hook)
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 82, in _hook_install
lang.install_environment(
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/languages/golang.py", line 81, in install_environment
cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/util.py", line 154, in cmd_output_b
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
pre_commit.util.CalledProcessError: command: ('/usr/local/bin/go', 'get', './...')
return code: 2
expected return code: 0
stdout: (none)
stderr:
go: downloading github.com/sergi/go-diff v1.1.0
go: downloading github.com/fatih/color v1.9.0
go: downloading github.com/mattn/go-colorable v0.1.4
go: downloading github.com/mattn/go-isatty v0.0.11
go: downloading golang.org/x/sys v0.0.0-20191026070338-33540a1f6037
# github.com/google/go-jsonnet/c-bindings
libjsonnet.cpp:5:14: fatal error: 'libjsonnet.h' file not found
```
Any idea? Thanks.
_Originally posted by @gaurav517 in https://github.com/pre-commit/pre-commit/issues/1785#issuecomment-774486062_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/golang.py`
Content:
```
1 import contextlib
2 import os.path
3 import sys
4 from typing import Generator
5 from typing import Sequence
6 from typing import Tuple
7
8 import pre_commit.constants as C
9 from pre_commit import git
10 from pre_commit.envcontext import envcontext
11 from pre_commit.envcontext import PatchesT
12 from pre_commit.envcontext import Var
13 from pre_commit.hook import Hook
14 from pre_commit.languages import helpers
15 from pre_commit.prefix import Prefix
16 from pre_commit.util import clean_path_on_failure
17 from pre_commit.util import cmd_output
18 from pre_commit.util import cmd_output_b
19 from pre_commit.util import rmtree
20
21 ENVIRONMENT_DIR = 'golangenv'
22 get_default_version = helpers.basic_get_default_version
23 healthy = helpers.basic_healthy
24
25
26 def get_env_patch(venv: str) -> PatchesT:
27 return (
28 ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),
29 )
30
31
32 @contextlib.contextmanager
33 def in_env(prefix: Prefix) -> Generator[None, None, None]:
34 envdir = prefix.path(
35 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
36 )
37 with envcontext(get_env_patch(envdir)):
38 yield
39
40
41 def guess_go_dir(remote_url: str) -> str:
42 if remote_url.endswith('.git'):
43 remote_url = remote_url[:-1 * len('.git')]
44 looks_like_url = (
45 not remote_url.startswith('file://') and
46 ('//' in remote_url or '@' in remote_url)
47 )
48 remote_url = remote_url.replace(':', '/')
49 if looks_like_url:
50 _, _, remote_url = remote_url.rpartition('//')
51 _, _, remote_url = remote_url.rpartition('@')
52 return remote_url
53 else:
54 return 'unknown_src_dir'
55
56
57 def install_environment(
58 prefix: Prefix,
59 version: str,
60 additional_dependencies: Sequence[str],
61 ) -> None:
62 helpers.assert_version_default('golang', version)
63 directory = prefix.path(
64 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
65 )
66
67 with clean_path_on_failure(directory):
68 remote = git.get_remote_url(prefix.prefix_dir)
69 repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))
70
71 # Clone into the goenv we'll create
72 helpers.run_setup_cmd(prefix, ('git', 'clone', '.', repo_src_dir))
73
74 if sys.platform == 'cygwin': # pragma: no cover
75 _, gopath, _ = cmd_output('cygpath', '-w', directory)
76 gopath = gopath.strip()
77 else:
78 gopath = directory
79 env = dict(os.environ, GOPATH=gopath)
80 env.pop('GOBIN', None)
81 cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)
82 for dependency in additional_dependencies:
83 cmd_output_b('go', 'get', dependency, cwd=repo_src_dir, env=env)
84 # Same some disk space, we don't need these after installation
85 rmtree(prefix.path(directory, 'src'))
86 pkgdir = prefix.path(directory, 'pkg')
87 if os.path.exists(pkgdir): # pragma: no cover (go<1.10)
88 rmtree(pkgdir)
89
90
91 def run_hook(
92 hook: Hook,
93 file_args: Sequence[str],
94 color: bool,
95 ) -> Tuple[int, bytes]:
96 with in_env(hook.prefix):
97 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/golang.py b/pre_commit/languages/golang.py
--- a/pre_commit/languages/golang.py
+++ b/pre_commit/languages/golang.py
@@ -69,7 +69,8 @@
repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))
# Clone into the goenv we'll create
- helpers.run_setup_cmd(prefix, ('git', 'clone', '.', repo_src_dir))
+ cmd = ('git', 'clone', '--recursive', '.', repo_src_dir)
+ helpers.run_setup_cmd(prefix, cmd)
if sys.platform == 'cygwin': # pragma: no cover
_, gopath, _ = cmd_output('cygpath', '-w', directory)
| {"golden_diff": "diff --git a/pre_commit/languages/golang.py b/pre_commit/languages/golang.py\n--- a/pre_commit/languages/golang.py\n+++ b/pre_commit/languages/golang.py\n@@ -69,7 +69,8 @@\n repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))\n \n # Clone into the goenv we'll create\n- helpers.run_setup_cmd(prefix, ('git', 'clone', '.', repo_src_dir))\n+ cmd = ('git', 'clone', '--recursive', '.', repo_src_dir)\n+ helpers.run_setup_cmd(prefix, cmd)\n \n if sys.platform == 'cygwin': # pragma: no cover\n _, gopath, _ = cmd_output('cygpath', '-w', directory)\n", "issue": "recursive submodule support for `language: golang`\nI added this hook in `pre-commit-hooks.yaml` in `https://github.com/google/go-jsonnet`, and then when I try to use this hook as:\r\n```\r\nrepos:\r\n - repo: https://github.com/google/go-jsonnet\r\n rev: 4a3144a417b7eb9b1f7e56741a9e72f3155de3fa\r\n hooks:\r\n - id: jsonnet-format\r\n```\r\nthen I see following error.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/error_handler.py\", line 65, in error_handler\r\n yield\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/main.py\", line 378, in main\r\n return run(args.config, store, args)\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/commands/run.py\", line 403, in run\r\n install_hook_envs(hooks, store)\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py\", line 224, in install_hook_envs\r\n _hook_install(hook)\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py\", line 82, in _hook_install\r\n lang.install_environment(\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/languages/golang.py\", line 81, in install_environment\r\n cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/util.py\", line 154, in cmd_output_b\r\n raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)\r\npre_commit.util.CalledProcessError: command: ('/usr/local/bin/go', 'get', './...')\r\nreturn code: 2\r\nexpected return code: 0\r\nstdout: (none)\r\nstderr:\r\n go: downloading github.com/sergi/go-diff v1.1.0\r\n go: downloading github.com/fatih/color v1.9.0\r\n go: downloading github.com/mattn/go-colorable v0.1.4\r\n go: downloading github.com/mattn/go-isatty v0.0.11\r\n go: downloading golang.org/x/sys v0.0.0-20191026070338-33540a1f6037\r\n # github.com/google/go-jsonnet/c-bindings\r\n libjsonnet.cpp:5:14: fatal error: 'libjsonnet.h' file not found\r\n\r\n```\r\nAny idea? Thanks.\r\n\r\n_Originally posted by @gaurav517 in https://github.com/pre-commit/pre-commit/issues/1785#issuecomment-774486062_\n", "before_files": [{"content": "import contextlib\nimport os.path\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import rmtree\n\nENVIRONMENT_DIR = 'golangenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix: Prefix) -> Generator[None, None, None]:\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef guess_go_dir(remote_url: str) -> str:\n if remote_url.endswith('.git'):\n remote_url = remote_url[:-1 * len('.git')]\n looks_like_url = (\n not remote_url.startswith('file://') and\n ('//' in remote_url or '@' in remote_url)\n )\n remote_url = remote_url.replace(':', '/')\n if looks_like_url:\n _, _, remote_url = remote_url.rpartition('//')\n _, _, remote_url = remote_url.rpartition('@')\n return remote_url\n else:\n return 'unknown_src_dir'\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n helpers.assert_version_default('golang', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n with clean_path_on_failure(directory):\n remote = git.get_remote_url(prefix.prefix_dir)\n repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))\n\n # Clone into the goenv we'll create\n helpers.run_setup_cmd(prefix, ('git', 'clone', '.', repo_src_dir))\n\n if sys.platform == 'cygwin': # pragma: no cover\n _, gopath, _ = cmd_output('cygpath', '-w', directory)\n gopath = gopath.strip()\n else:\n gopath = directory\n env = dict(os.environ, GOPATH=gopath)\n env.pop('GOBIN', None)\n cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)\n for dependency in additional_dependencies:\n cmd_output_b('go', 'get', dependency, cwd=repo_src_dir, env=env)\n # Same some disk space, we don't need these after installation\n rmtree(prefix.path(directory, 'src'))\n pkgdir = prefix.path(directory, 'pkg')\n if os.path.exists(pkgdir): # pragma: no cover (go<1.10)\n rmtree(pkgdir)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/golang.py"}], "after_files": [{"content": "import contextlib\nimport os.path\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import rmtree\n\nENVIRONMENT_DIR = 'golangenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix: Prefix) -> Generator[None, None, None]:\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef guess_go_dir(remote_url: str) -> str:\n if remote_url.endswith('.git'):\n remote_url = remote_url[:-1 * len('.git')]\n looks_like_url = (\n not remote_url.startswith('file://') and\n ('//' in remote_url or '@' in remote_url)\n )\n remote_url = remote_url.replace(':', '/')\n if looks_like_url:\n _, _, remote_url = remote_url.rpartition('//')\n _, _, remote_url = remote_url.rpartition('@')\n return remote_url\n else:\n return 'unknown_src_dir'\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n helpers.assert_version_default('golang', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n with clean_path_on_failure(directory):\n remote = git.get_remote_url(prefix.prefix_dir)\n repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))\n\n # Clone into the goenv we'll create\n cmd = ('git', 'clone', '--recursive', '.', repo_src_dir)\n helpers.run_setup_cmd(prefix, cmd)\n\n if sys.platform == 'cygwin': # pragma: no cover\n _, gopath, _ = cmd_output('cygpath', '-w', directory)\n gopath = gopath.strip()\n else:\n gopath = directory\n env = dict(os.environ, GOPATH=gopath)\n env.pop('GOBIN', None)\n cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)\n for dependency in additional_dependencies:\n cmd_output_b('go', 'get', dependency, cwd=repo_src_dir, env=env)\n # Same some disk space, we don't need these after installation\n rmtree(prefix.path(directory, 'src'))\n pkgdir = prefix.path(directory, 'pkg')\n if os.path.exists(pkgdir): # pragma: no cover (go<1.10)\n rmtree(pkgdir)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/golang.py"}]} | 1,938 | 166 |
gh_patches_debug_4737 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-1035 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rename main storage/datastore/etc. docs since captions now have the name
We end up just repeating the names. See http://googlecloudplatform.github.io/gcloud-python/latest/

and http://gcloud-python.readthedocs.org/en/latest/

And notice that "Datastore" is both the heading name and the name of the first document in the heading.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # gcloud documentation build configuration file, created by
4 # sphinx-quickstart on Tue Jan 21 22:24:47 2014.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # Note that not all possible configuration values are present in this
9 # autogenerated file.
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 from email import message_from_string
15 import os
16 from pkg_resources import get_distribution
17 import sys
18 import urllib
19
20 import sphinx_rtd_theme
21
22
23 ON_READ_THE_DOCS = os.environ.get('READTHEDOCS', None) == 'True'
24 LOCAL_READ_THE_DOCS = os.environ.get('LOCAL_RTD', None) == 'True'
25
26 # If extensions (or modules to document with autodoc) are in another directory,
27 # add these directories to sys.path here. If the directory is relative to the
28 # documentation root, use os.path.abspath to make it absolute, like shown here.
29 sys.path.insert(0, os.path.abspath('..'))
30
31 # -- General configuration -----------------------------------------------------
32
33 # If your documentation needs a minimal Sphinx version, state it here.
34 #needs_sphinx = '1.0'
35
36 # Add any Sphinx extension module names here, as strings. They can be extensions
37 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
38 extensions = [
39 'sphinx.ext.autodoc',
40 'sphinx.ext.autosummary',
41 'sphinx.ext.doctest',
42 'sphinx.ext.todo',
43 'sphinx.ext.viewcode',
44 ]
45
46 # Add any paths that contain templates here, relative to this directory.
47 if ON_READ_THE_DOCS or LOCAL_READ_THE_DOCS:
48 templates_path = []
49 else:
50 templates_path = ['_templates']
51
52 # The suffix of source filenames.
53 source_suffix = '.rst'
54
55 # The encoding of source files.
56 #source_encoding = 'utf-8-sig'
57
58 # The master toctree document.
59 master_doc = 'index'
60
61 # General information about the project.
62 project = u'gcloud'
63 copyright = u'2014, Google'
64
65 # The version info for the project you're documenting, acts as replacement for
66 # |version| and |release|, also used in various other places throughout the
67 # built documents.
68 #
69 # The short X.Y version.
70 distro = get_distribution('gcloud')
71 release = os.getenv('SPHINX_RELEASE', distro.version)
72
73 # The language for content autogenerated by Sphinx. Refer to documentation
74 # for a list of supported languages.
75 #language = None
76
77 # There are two options for replacing |today|: either, you set today to some
78 # non-false value, then it is used:
79 #today = ''
80 # Else, today_fmt is used as the format for a strftime call.
81 #today_fmt = '%B %d, %Y'
82
83 # List of patterns, relative to source directory, that match files and
84 # directories to ignore when looking for source files.
85 exclude_patterns = ['_build', '_components/*']
86
87 # The reST default role (used for this markup: `text`) to use for all documents.
88 #default_role = None
89
90 # If true, '()' will be appended to :func: etc. cross-reference text.
91 #add_function_parentheses = True
92
93 # If true, the current module name will be prepended to all description
94 # unit titles (such as .. function::).
95 #add_module_names = True
96
97 # If true, sectionauthor and moduleauthor directives will be shown in the
98 # output. They are ignored by default.
99 #show_authors = False
100
101 # The name of the Pygments (syntax highlighting) style to use.
102 pygments_style = 'sphinx'
103
104 # A list of ignored prefixes for module index sorting.
105 #modindex_common_prefix = []
106
107
108 # -- Options for HTML output ---------------------------------------------------
109
110 # The theme to use for HTML and HTML Help pages. See the documentation for
111 # a list of builtin themes.
112
113 if LOCAL_READ_THE_DOCS:
114 html_theme = 'sphinx_rtd_theme'
115 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
116 elif not ON_READ_THE_DOCS:
117 html_style = 'css/main.css'
118
119 # Theme options are theme-specific and customize the look and feel of a theme
120 # further. For a list of options available for each theme, see the
121 # documentation.
122 #html_theme_options = {}
123
124 # Add any paths that contain custom themes here, relative to this directory.
125 #html_theme_path = []
126
127 # The name for this set of Sphinx documents. If None, it defaults to
128 # "<project> v<release> documentation".
129 #html_title = None
130
131 # A shorter title for the navigation bar. Default is the same as html_title.
132 #html_short_title = None
133
134 # The name of an image file (relative to this directory) to place at the top
135 # of the sidebar.
136 #html_logo = None
137
138 # The name of an image file (within the static path) to use as favicon of the
139 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
140 # pixels large.
141 html_favicon = '_static/images/favicon.ico'
142
143 # Add any paths that contain custom static files (such as style sheets) here,
144 # relative to this directory. They are copied after the builtin static files,
145 # so a file named "default.css" will overwrite the builtin "default.css".
146 html_static_path = ['_static']
147
148 html_add_permalinks = '#'
149
150 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
151 # using the given strftime format.
152 #html_last_updated_fmt = '%b %d, %Y'
153
154 # If true, SmartyPants will be used to convert quotes and dashes to
155 # typographically correct entities.
156 #html_use_smartypants = True
157
158 # Custom sidebar templates, maps document names to template names.
159 #html_sidebars = {}
160
161 # Additional templates that should be rendered to pages, maps page names to
162 # template names.
163 #html_additional_pages = {}
164
165 # If false, no module index is generated.
166 #html_domain_indices = True
167
168 # If false, no index is generated.
169 #html_use_index = True
170
171 # If true, the index is split into individual pages for each letter.
172 #html_split_index = False
173
174 # If true, links to the reST sources are added to the pages.
175 #html_show_sourcelink = True
176
177 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
178 #html_show_sphinx = True
179
180 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
181 #html_show_copyright = True
182
183 # If true, an OpenSearch description file will be output, and all pages will
184 # contain a <link> tag referring to it. The value of this option must be the
185 # base URL from which the finished HTML is served.
186 #html_use_opensearch = ''
187
188 # This is the file name suffix for HTML files (e.g. ".xhtml").
189 #html_file_suffix = None
190
191 # Output file base name for HTML help builder.
192 htmlhelp_basename = 'gclouddoc'
193
194 html_context = {}
195
196
197 # -- Options for LaTeX output --------------------------------------------------
198
199 latex_elements = {
200 # The paper size ('letterpaper' or 'a4paper').
201 #'papersize': 'letterpaper',
202
203 # The font size ('10pt', '11pt' or '12pt').
204 #'pointsize': '10pt',
205
206 # Additional stuff for the LaTeX preamble.
207 #'preamble': '',
208 }
209
210 metadata = distro.get_metadata(distro.PKG_INFO)
211 author = message_from_string(metadata).get('Author')
212 # Grouping the document tree into LaTeX files. List of tuples
213 # (source start file, target name, title, author, documentclass [howto/manual]).
214 latex_documents = [
215 ('index', 'gcloud.tex', u'gCloud Documentation',
216 author, 'manual'),
217 ]
218
219 # The name of an image file (relative to this directory) to place at the top of
220 # the title page.
221 #latex_logo = None
222
223 # For "manual" documents, if this is true, then toplevel headings are parts,
224 # not chapters.
225 #latex_use_parts = False
226
227 # If true, show page references after internal links.
228 #latex_show_pagerefs = False
229
230 # If true, show URL addresses after external links.
231 #latex_show_urls = False
232
233 # Documents to append as an appendix to all manuals.
234 #latex_appendices = []
235
236 # If false, no module index is generated.
237 #latex_domain_indices = True
238
239
240 # -- Options for manual page output --------------------------------------------
241
242 # One entry per manual page. List of tuples
243 # (source start file, name, description, authors, manual section).
244 man_pages = [
245 ('index', 'gcloud', u'gCloud Documentation',
246 [author], 1)
247 ]
248
249 # If true, show URL addresses after external links.
250 #man_show_urls = False
251
252
253 # -- Options for Texinfo output ------------------------------------------------
254
255 # Grouping the document tree into Texinfo files. List of tuples
256 # (source start file, target name, title, author,
257 # dir menu entry, description, category)
258 texinfo_documents = [
259 ('index', 'gcloud', u'gCloud Documentation',
260 author, 'gcloud', 'Python API for Google Cloud.',
261 'Miscellaneous'),
262 ]
263
264 # Documents to append as an appendix to all manuals.
265 #texinfo_appendices = []
266
267 # If false, no module index is generated.
268 #texinfo_domain_indices = True
269
270 # How to display URL addresses: 'footnote', 'no', or 'inline'.
271 #texinfo_show_urls = 'footnote'
272
273 # This pulls class descriptions from the class docstring,
274 # and parameter definitions from the __init__ docstring.
275 autoclass_content = 'both'
276
277 issue_uri = ('https://github.com/GoogleCloudPlatform/gcloud-python/issues/'
278 'new?' + urllib.urlencode({'title': '[Documentation Issue] '}))
279 issue_uri_template = (
280 issue_uri + '&' + urllib.urlencode({'body': 'Page Name: '}) + '{0}' +
281 urllib.quote('\nRelease: ') + '{1}')
282
283 html_context.update(
284 issue_uri=issue_uri,
285 issue_uri_template=issue_uri_template,
286 )
287
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -82,7 +82,7 @@
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
-exclude_patterns = ['_build', '_components/*']
+exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -82,7 +82,7 @@\n \n # List of patterns, relative to source directory, that match files and\n # directories to ignore when looking for source files.\n-exclude_patterns = ['_build', '_components/*']\n+exclude_patterns = ['_build']\n \n # The reST default role (used for this markup: `text`) to use for all documents.\n #default_role = None\n", "issue": "Rename main storage/datastore/etc. docs since captions now have the name\nWe end up just repeating the names. See http://googlecloudplatform.github.io/gcloud-python/latest/\n\n\n\nand http://gcloud-python.readthedocs.org/en/latest/\n\n\n\nAnd notice that \"Datastore\" is both the heading name and the name of the first document in the heading.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# gcloud documentation build configuration file, created by\n# sphinx-quickstart on Tue Jan 21 22:24:47 2014.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nfrom email import message_from_string\nimport os\nfrom pkg_resources import get_distribution\nimport sys\nimport urllib\n\nimport sphinx_rtd_theme\n\n\nON_READ_THE_DOCS = os.environ.get('READTHEDOCS', None) == 'True'\nLOCAL_READ_THE_DOCS = os.environ.get('LOCAL_RTD', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\nif ON_READ_THE_DOCS or LOCAL_READ_THE_DOCS:\n templates_path = []\nelse:\n templates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'gcloud'\ncopyright = u'2014, Google'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ndistro = get_distribution('gcloud')\nrelease = os.getenv('SPHINX_RELEASE', distro.version)\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', '_components/*']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\nif LOCAL_READ_THE_DOCS:\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nelif not ON_READ_THE_DOCS:\n html_style = 'css/main.css'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/images/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_add_permalinks = '#'\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'gclouddoc'\n\nhtml_context = {}\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\nmetadata = distro.get_metadata(distro.PKG_INFO)\nauthor = message_from_string(metadata).get('Author')\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'gcloud.tex', u'gCloud Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'gcloud', u'gCloud Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'gcloud', u'gCloud Documentation',\n author, 'gcloud', 'Python API for Google Cloud.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# This pulls class descriptions from the class docstring,\n# and parameter definitions from the __init__ docstring.\nautoclass_content = 'both'\n\nissue_uri = ('https://github.com/GoogleCloudPlatform/gcloud-python/issues/'\n 'new?' + urllib.urlencode({'title': '[Documentation Issue] '}))\nissue_uri_template = (\n issue_uri + '&' + urllib.urlencode({'body': 'Page Name: '}) + '{0}' +\n urllib.quote('\\nRelease: ') + '{1}')\n\nhtml_context.update(\n issue_uri=issue_uri,\n issue_uri_template=issue_uri_template,\n)\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# gcloud documentation build configuration file, created by\n# sphinx-quickstart on Tue Jan 21 22:24:47 2014.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nfrom email import message_from_string\nimport os\nfrom pkg_resources import get_distribution\nimport sys\nimport urllib\n\nimport sphinx_rtd_theme\n\n\nON_READ_THE_DOCS = os.environ.get('READTHEDOCS', None) == 'True'\nLOCAL_READ_THE_DOCS = os.environ.get('LOCAL_RTD', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\nif ON_READ_THE_DOCS or LOCAL_READ_THE_DOCS:\n templates_path = []\nelse:\n templates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'gcloud'\ncopyright = u'2014, Google'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ndistro = get_distribution('gcloud')\nrelease = os.getenv('SPHINX_RELEASE', distro.version)\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\nif LOCAL_READ_THE_DOCS:\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nelif not ON_READ_THE_DOCS:\n html_style = 'css/main.css'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/images/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_add_permalinks = '#'\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'gclouddoc'\n\nhtml_context = {}\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\nmetadata = distro.get_metadata(distro.PKG_INFO)\nauthor = message_from_string(metadata).get('Author')\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'gcloud.tex', u'gCloud Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'gcloud', u'gCloud Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'gcloud', u'gCloud Documentation',\n author, 'gcloud', 'Python API for Google Cloud.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# This pulls class descriptions from the class docstring,\n# and parameter definitions from the __init__ docstring.\nautoclass_content = 'both'\n\nissue_uri = ('https://github.com/GoogleCloudPlatform/gcloud-python/issues/'\n 'new?' + urllib.urlencode({'title': '[Documentation Issue] '}))\nissue_uri_template = (\n issue_uri + '&' + urllib.urlencode({'body': 'Page Name: '}) + '{0}' +\n urllib.quote('\\nRelease: ') + '{1}')\n\nhtml_context.update(\n issue_uri=issue_uri,\n issue_uri_template=issue_uri_template,\n)\n", "path": "docs/conf.py"}]} | 3,473 | 110 |
gh_patches_debug_31338 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-347 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
json files created by provision are not human readable
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nvflare/lighter/impl/static_file.py`
Content:
```
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 import json
17 import os
18
19 import yaml
20
21 from nvflare.lighter.spec import Builder
22 from nvflare.lighter.utils import sh_replace
23
24
25 class StaticFileBuilder(Builder):
26 def __init__(
27 self,
28 enable_byoc=False,
29 config_folder="",
30 app_validator="",
31 docker_image="",
32 snapshot_persistor="",
33 overseer_agent="",
34 ):
35 """Build all static files from template.
36
37 Uses the information from project.yml through study to go through the participants and write the contents of
38 each file with the template, and replacing with the appropriate values from project.yml.
39
40 Usually, two main categories of files are created in all FL participants, static and dynamic. Static files
41 have similar contents among different participants, with small differences. For example, the differences in
42 sub_start.sh are client name and python module. Those are basically static files. This builder uses template
43 file and string replacement to generate those static files for each participant.
44
45 Args:
46 enable_byoc: for each participant, true to enable loading of code in the custom folder of applications
47 config_folder: usually "config"
48 app_validator: optional path to an app validator to verify that uploaded app has the expected structure
49 docker_image: when docker_image is set to a docker image name, docker.sh will be generated on server/client/admin
50 """
51 self.enable_byoc = enable_byoc
52 self.config_folder = config_folder
53 self.docker_image = docker_image
54 self.app_validator = app_validator
55 self.overseer_agent = overseer_agent
56 self.snapshot_persistor = snapshot_persistor
57
58 def _write(self, file_full_path, content, mode, exe=False):
59 mode = mode + "w"
60 with open(file_full_path, mode) as f:
61 f.write(content)
62 if exe:
63 os.chmod(file_full_path, 0o755)
64
65 def _build_overseer(self, overseer, ctx):
66 dest_dir = self.get_kit_dir(overseer, ctx)
67 self._write(
68 os.path.join(dest_dir, "start.sh"),
69 self.template["start_svr_sh"],
70 "t",
71 exe=True,
72 )
73 protocol = overseer.props.get("protocol", "http")
74 api_root = overseer.props.get("api_root", "/api/v1/")
75 default_port = "443" if protocol == "https" else "80"
76 port = overseer.props.get("port", default_port)
77 replacement_dict = {"port": port}
78 admins = self.study.get_participants_by_type("admin", first_only=False)
79 privilege_dict = dict()
80 for admin in admins:
81 for role in admin.props.get("roles", {}):
82 if role in privilege_dict:
83 privilege_dict[role].append(admin.subject)
84 else:
85 privilege_dict[role] = [admin.subject]
86 self._write(
87 os.path.join(dest_dir, "privilege.yml"),
88 yaml.dump(privilege_dict, Dumper=yaml.Dumper),
89 "t",
90 exe=False,
91 )
92
93 if self.docker_image:
94 self._write(
95 os.path.join(dest_dir, "docker.sh"),
96 sh_replace(self.template["docker_svr_sh"], replacement_dict),
97 "t",
98 exe=True,
99 )
100 self._write(
101 os.path.join(dest_dir, "gunicorn.conf.py"),
102 sh_replace(self.template["gunicorn_conf_py"], replacement_dict),
103 "t",
104 exe=False,
105 )
106 self._write(
107 os.path.join(dest_dir, "start.sh"),
108 self.template["start_ovsr_sh"],
109 "t",
110 exe=True,
111 )
112 if port:
113 ctx["overseer_end_point"] = f"{protocol}://{overseer.name}:{port}{api_root}"
114 else:
115 ctx["overseer_end_point"] = f"{protocol}://{overseer.name}{api_root}"
116
117 def _build_server(self, server, ctx):
118 config = json.loads(self.template["fed_server"])
119 dest_dir = self.get_kit_dir(server, ctx)
120 server_0 = config["servers"][0]
121 server_0["name"] = self.study_name
122 admin_port = server.props.get("admin_port", 8003)
123 ctx["admin_port"] = admin_port
124 fed_learn_port = server.props.get("fed_learn_port", 8002)
125 ctx["fed_learn_port"] = fed_learn_port
126 ctx["server_name"] = server.name
127 server_0["service"]["target"] = f"{server.name}:{fed_learn_port}"
128 server_0["admin_host"] = server.name
129 server_0["admin_port"] = admin_port
130 config["enable_byoc"] = server.enable_byoc
131 if self.app_validator:
132 config["app_validator"] = {"path": self.app_validator}
133 if self.overseer_agent:
134 overseer_agent = copy.deepcopy(self.overseer_agent)
135 if overseer_agent.get("overseer_exists", True):
136 overseer_agent["args"] = {
137 "role": "server",
138 "overseer_end_point": ctx.get("overseer_end_point", ""),
139 "project": self.study_name,
140 "name": server.name,
141 "fl_port": str(fed_learn_port),
142 "admin_port": str(admin_port),
143 }
144 overseer_agent.pop("overseer_exists", None)
145 config["overseer_agent"] = overseer_agent
146 if self.snapshot_persistor:
147 config["snapshot_persistor"] = self.snapshot_persistor
148 self._write(os.path.join(dest_dir, "fed_server.json"), json.dumps(config), "t")
149 replacement_dict = {
150 "admin_port": admin_port,
151 "fed_learn_port": fed_learn_port,
152 "config_folder": self.config_folder,
153 "docker_image": self.docker_image,
154 }
155 if self.docker_image:
156 self._write(
157 os.path.join(dest_dir, "docker.sh"),
158 sh_replace(self.template["docker_svr_sh"], replacement_dict),
159 "t",
160 exe=True,
161 )
162 self._write(
163 os.path.join(dest_dir, "start.sh"),
164 self.template["start_svr_sh"],
165 "t",
166 exe=True,
167 )
168 self._write(
169 os.path.join(dest_dir, "sub_start.sh"),
170 sh_replace(self.template["sub_start_svr_sh"], replacement_dict),
171 "t",
172 exe=True,
173 )
174 self._write(
175 os.path.join(dest_dir, "log.config"),
176 self.template["log_config"],
177 "t",
178 )
179 self._write(
180 os.path.join(dest_dir, "readme.txt"),
181 self.template["readme_fs"],
182 "t",
183 )
184 self._write(
185 os.path.join(dest_dir, "stop_fl.sh"),
186 self.template["stop_fl_sh"],
187 "t",
188 exe=True,
189 )
190
191 def _build_client(self, client, ctx):
192 config = json.loads(self.template["fed_client"])
193 dest_dir = self.get_kit_dir(client, ctx)
194 fed_learn_port = ctx.get("fed_learn_port")
195 server_name = ctx.get("server_name")
196 # config["servers"][0]["service"]["target"] = f"{server_name}:{fed_learn_port}"
197 config["servers"][0]["name"] = self.study_name
198 config["enable_byoc"] = client.enable_byoc
199 replacement_dict = {
200 "client_name": f"{client.subject}",
201 "config_folder": self.config_folder,
202 "docker_image": self.docker_image,
203 }
204 if self.overseer_agent:
205 overseer_agent = copy.deepcopy(self.overseer_agent)
206 if overseer_agent.get("overseer_exists", True):
207 overseer_agent["args"] = {
208 "role": "client",
209 "overseer_end_point": ctx.get("overseer_end_point", ""),
210 "project": self.study_name,
211 "name": client.subject,
212 }
213 overseer_agent.pop("overseer_exists", None)
214 config["overseer_agent"] = overseer_agent
215
216 self._write(os.path.join(dest_dir, "fed_client.json"), json.dumps(config), "t")
217 if self.docker_image:
218 self._write(
219 os.path.join(dest_dir, "docker.sh"),
220 sh_replace(self.template["docker_cln_sh"], replacement_dict),
221 "t",
222 exe=True,
223 )
224 self._write(
225 os.path.join(dest_dir, "start.sh"),
226 self.template["start_cln_sh"],
227 "t",
228 exe=True,
229 )
230 self._write(
231 os.path.join(dest_dir, "sub_start.sh"),
232 sh_replace(self.template["sub_start_cln_sh"], replacement_dict),
233 "t",
234 exe=True,
235 )
236 self._write(
237 os.path.join(dest_dir, "log.config"),
238 self.template["log_config"],
239 "t",
240 )
241 self._write(
242 os.path.join(dest_dir, "readme.txt"),
243 self.template["readme_fc"],
244 "t",
245 )
246 self._write(
247 os.path.join(dest_dir, "stop_fl.sh"),
248 self.template["stop_fl_sh"],
249 "t",
250 exe=True,
251 )
252
253 def _build_admin(self, admin, ctx):
254 config = json.loads(self.template["fed_admin"])
255 dest_dir = self.get_kit_dir(admin, ctx)
256 admin_port = ctx.get("admin_port")
257 server_name = ctx.get("server_name")
258
259 replacement_dict = {
260 "cn": f"{server_name}",
261 "admin_port": f"{admin_port}",
262 "docker_image": self.docker_image,
263 }
264 agent_config = dict()
265 if self.overseer_agent:
266 overseer_agent = copy.deepcopy(self.overseer_agent)
267 if overseer_agent.get("overseer_exists", True):
268 overseer_agent["args"] = {
269 "role": "admin",
270 "overseer_end_point": ctx.get("overseer_end_point", ""),
271 "project": self.study_name,
272 "name": admin.subject,
273 }
274 overseer_agent.pop("overseer_exists", None)
275 agent_config["overseer_agent"] = overseer_agent
276 config["admin"].update(agent_config)
277 self._write(os.path.join(dest_dir, "fed_admin.json"), json.dumps(config), "t")
278 if self.docker_image:
279 self._write(
280 os.path.join(dest_dir, "docker.sh"),
281 sh_replace(self.template["docker_adm_sh"], replacement_dict),
282 "t",
283 exe=True,
284 )
285 self._write(
286 os.path.join(dest_dir, "fl_admin.sh"),
287 sh_replace(self.template["fl_admin_sh"], replacement_dict),
288 "t",
289 exe=True,
290 )
291 self._write(
292 os.path.join(dest_dir, "readme.txt"),
293 self.template["readme_am"],
294 "t",
295 )
296
297 def build(self, study, ctx):
298 self.template = ctx.get("template")
299 self.study_name = study.name
300 self.study = study
301 overseer = study.get_participants_by_type("overseer")
302 self._build_overseer(overseer, ctx)
303 servers = study.get_participants_by_type("server", first_only=False)
304 for server in servers:
305 self._build_server(server, ctx)
306
307 for client in study.get_participants_by_type("client", first_only=False):
308 self._build_client(client, ctx)
309
310 for admin in study.get_participants_by_type("admin", first_only=False):
311 self._build_admin(admin, ctx)
312
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nvflare/lighter/impl/static_file.py b/nvflare/lighter/impl/static_file.py
--- a/nvflare/lighter/impl/static_file.py
+++ b/nvflare/lighter/impl/static_file.py
@@ -145,7 +145,7 @@
config["overseer_agent"] = overseer_agent
if self.snapshot_persistor:
config["snapshot_persistor"] = self.snapshot_persistor
- self._write(os.path.join(dest_dir, "fed_server.json"), json.dumps(config), "t")
+ self._write(os.path.join(dest_dir, "fed_server.json"), json.dumps(config, sort_keys=True, indent=2), "t")
replacement_dict = {
"admin_port": admin_port,
"fed_learn_port": fed_learn_port,
@@ -213,7 +213,7 @@
overseer_agent.pop("overseer_exists", None)
config["overseer_agent"] = overseer_agent
- self._write(os.path.join(dest_dir, "fed_client.json"), json.dumps(config), "t")
+ self._write(os.path.join(dest_dir, "fed_client.json"), json.dumps(config, sort_keys=True, indent=2), "t")
if self.docker_image:
self._write(
os.path.join(dest_dir, "docker.sh"),
@@ -274,7 +274,7 @@
overseer_agent.pop("overseer_exists", None)
agent_config["overseer_agent"] = overseer_agent
config["admin"].update(agent_config)
- self._write(os.path.join(dest_dir, "fed_admin.json"), json.dumps(config), "t")
+ self._write(os.path.join(dest_dir, "fed_admin.json"), json.dumps(config, sort_keys=True, indent=2), "t")
if self.docker_image:
self._write(
os.path.join(dest_dir, "docker.sh"),
| {"golden_diff": "diff --git a/nvflare/lighter/impl/static_file.py b/nvflare/lighter/impl/static_file.py\n--- a/nvflare/lighter/impl/static_file.py\n+++ b/nvflare/lighter/impl/static_file.py\n@@ -145,7 +145,7 @@\n config[\"overseer_agent\"] = overseer_agent\n if self.snapshot_persistor:\n config[\"snapshot_persistor\"] = self.snapshot_persistor\n- self._write(os.path.join(dest_dir, \"fed_server.json\"), json.dumps(config), \"t\")\n+ self._write(os.path.join(dest_dir, \"fed_server.json\"), json.dumps(config, sort_keys=True, indent=2), \"t\")\n replacement_dict = {\n \"admin_port\": admin_port,\n \"fed_learn_port\": fed_learn_port,\n@@ -213,7 +213,7 @@\n overseer_agent.pop(\"overseer_exists\", None)\n config[\"overseer_agent\"] = overseer_agent\n \n- self._write(os.path.join(dest_dir, \"fed_client.json\"), json.dumps(config), \"t\")\n+ self._write(os.path.join(dest_dir, \"fed_client.json\"), json.dumps(config, sort_keys=True, indent=2), \"t\")\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n@@ -274,7 +274,7 @@\n overseer_agent.pop(\"overseer_exists\", None)\n agent_config[\"overseer_agent\"] = overseer_agent\n config[\"admin\"].update(agent_config)\n- self._write(os.path.join(dest_dir, \"fed_admin.json\"), json.dumps(config), \"t\")\n+ self._write(os.path.join(dest_dir, \"fed_admin.json\"), json.dumps(config, sort_keys=True, indent=2), \"t\")\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n", "issue": "json files created by provision are not human readable\n\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport json\nimport os\n\nimport yaml\n\nfrom nvflare.lighter.spec import Builder\nfrom nvflare.lighter.utils import sh_replace\n\n\nclass StaticFileBuilder(Builder):\n def __init__(\n self,\n enable_byoc=False,\n config_folder=\"\",\n app_validator=\"\",\n docker_image=\"\",\n snapshot_persistor=\"\",\n overseer_agent=\"\",\n ):\n \"\"\"Build all static files from template.\n\n Uses the information from project.yml through study to go through the participants and write the contents of\n each file with the template, and replacing with the appropriate values from project.yml.\n\n Usually, two main categories of files are created in all FL participants, static and dynamic. Static files\n have similar contents among different participants, with small differences. For example, the differences in\n sub_start.sh are client name and python module. Those are basically static files. This builder uses template\n file and string replacement to generate those static files for each participant.\n\n Args:\n enable_byoc: for each participant, true to enable loading of code in the custom folder of applications\n config_folder: usually \"config\"\n app_validator: optional path to an app validator to verify that uploaded app has the expected structure\n docker_image: when docker_image is set to a docker image name, docker.sh will be generated on server/client/admin\n \"\"\"\n self.enable_byoc = enable_byoc\n self.config_folder = config_folder\n self.docker_image = docker_image\n self.app_validator = app_validator\n self.overseer_agent = overseer_agent\n self.snapshot_persistor = snapshot_persistor\n\n def _write(self, file_full_path, content, mode, exe=False):\n mode = mode + \"w\"\n with open(file_full_path, mode) as f:\n f.write(content)\n if exe:\n os.chmod(file_full_path, 0o755)\n\n def _build_overseer(self, overseer, ctx):\n dest_dir = self.get_kit_dir(overseer, ctx)\n self._write(\n os.path.join(dest_dir, \"start.sh\"),\n self.template[\"start_svr_sh\"],\n \"t\",\n exe=True,\n )\n protocol = overseer.props.get(\"protocol\", \"http\")\n api_root = overseer.props.get(\"api_root\", \"/api/v1/\")\n default_port = \"443\" if protocol == \"https\" else \"80\"\n port = overseer.props.get(\"port\", default_port)\n replacement_dict = {\"port\": port}\n admins = self.study.get_participants_by_type(\"admin\", first_only=False)\n privilege_dict = dict()\n for admin in admins:\n for role in admin.props.get(\"roles\", {}):\n if role in privilege_dict:\n privilege_dict[role].append(admin.subject)\n else:\n privilege_dict[role] = [admin.subject]\n self._write(\n os.path.join(dest_dir, \"privilege.yml\"),\n yaml.dump(privilege_dict, Dumper=yaml.Dumper),\n \"t\",\n exe=False,\n )\n\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n sh_replace(self.template[\"docker_svr_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"gunicorn.conf.py\"),\n sh_replace(self.template[\"gunicorn_conf_py\"], replacement_dict),\n \"t\",\n exe=False,\n )\n self._write(\n os.path.join(dest_dir, \"start.sh\"),\n self.template[\"start_ovsr_sh\"],\n \"t\",\n exe=True,\n )\n if port:\n ctx[\"overseer_end_point\"] = f\"{protocol}://{overseer.name}:{port}{api_root}\"\n else:\n ctx[\"overseer_end_point\"] = f\"{protocol}://{overseer.name}{api_root}\"\n\n def _build_server(self, server, ctx):\n config = json.loads(self.template[\"fed_server\"])\n dest_dir = self.get_kit_dir(server, ctx)\n server_0 = config[\"servers\"][0]\n server_0[\"name\"] = self.study_name\n admin_port = server.props.get(\"admin_port\", 8003)\n ctx[\"admin_port\"] = admin_port\n fed_learn_port = server.props.get(\"fed_learn_port\", 8002)\n ctx[\"fed_learn_port\"] = fed_learn_port\n ctx[\"server_name\"] = server.name\n server_0[\"service\"][\"target\"] = f\"{server.name}:{fed_learn_port}\"\n server_0[\"admin_host\"] = server.name\n server_0[\"admin_port\"] = admin_port\n config[\"enable_byoc\"] = server.enable_byoc\n if self.app_validator:\n config[\"app_validator\"] = {\"path\": self.app_validator}\n if self.overseer_agent:\n overseer_agent = copy.deepcopy(self.overseer_agent)\n if overseer_agent.get(\"overseer_exists\", True):\n overseer_agent[\"args\"] = {\n \"role\": \"server\",\n \"overseer_end_point\": ctx.get(\"overseer_end_point\", \"\"),\n \"project\": self.study_name,\n \"name\": server.name,\n \"fl_port\": str(fed_learn_port),\n \"admin_port\": str(admin_port),\n }\n overseer_agent.pop(\"overseer_exists\", None)\n config[\"overseer_agent\"] = overseer_agent\n if self.snapshot_persistor:\n config[\"snapshot_persistor\"] = self.snapshot_persistor\n self._write(os.path.join(dest_dir, \"fed_server.json\"), json.dumps(config), \"t\")\n replacement_dict = {\n \"admin_port\": admin_port,\n \"fed_learn_port\": fed_learn_port,\n \"config_folder\": self.config_folder,\n \"docker_image\": self.docker_image,\n }\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n sh_replace(self.template[\"docker_svr_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"start.sh\"),\n self.template[\"start_svr_sh\"],\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"sub_start.sh\"),\n sh_replace(self.template[\"sub_start_svr_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"log.config\"),\n self.template[\"log_config\"],\n \"t\",\n )\n self._write(\n os.path.join(dest_dir, \"readme.txt\"),\n self.template[\"readme_fs\"],\n \"t\",\n )\n self._write(\n os.path.join(dest_dir, \"stop_fl.sh\"),\n self.template[\"stop_fl_sh\"],\n \"t\",\n exe=True,\n )\n\n def _build_client(self, client, ctx):\n config = json.loads(self.template[\"fed_client\"])\n dest_dir = self.get_kit_dir(client, ctx)\n fed_learn_port = ctx.get(\"fed_learn_port\")\n server_name = ctx.get(\"server_name\")\n # config[\"servers\"][0][\"service\"][\"target\"] = f\"{server_name}:{fed_learn_port}\"\n config[\"servers\"][0][\"name\"] = self.study_name\n config[\"enable_byoc\"] = client.enable_byoc\n replacement_dict = {\n \"client_name\": f\"{client.subject}\",\n \"config_folder\": self.config_folder,\n \"docker_image\": self.docker_image,\n }\n if self.overseer_agent:\n overseer_agent = copy.deepcopy(self.overseer_agent)\n if overseer_agent.get(\"overseer_exists\", True):\n overseer_agent[\"args\"] = {\n \"role\": \"client\",\n \"overseer_end_point\": ctx.get(\"overseer_end_point\", \"\"),\n \"project\": self.study_name,\n \"name\": client.subject,\n }\n overseer_agent.pop(\"overseer_exists\", None)\n config[\"overseer_agent\"] = overseer_agent\n\n self._write(os.path.join(dest_dir, \"fed_client.json\"), json.dumps(config), \"t\")\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n sh_replace(self.template[\"docker_cln_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"start.sh\"),\n self.template[\"start_cln_sh\"],\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"sub_start.sh\"),\n sh_replace(self.template[\"sub_start_cln_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"log.config\"),\n self.template[\"log_config\"],\n \"t\",\n )\n self._write(\n os.path.join(dest_dir, \"readme.txt\"),\n self.template[\"readme_fc\"],\n \"t\",\n )\n self._write(\n os.path.join(dest_dir, \"stop_fl.sh\"),\n self.template[\"stop_fl_sh\"],\n \"t\",\n exe=True,\n )\n\n def _build_admin(self, admin, ctx):\n config = json.loads(self.template[\"fed_admin\"])\n dest_dir = self.get_kit_dir(admin, ctx)\n admin_port = ctx.get(\"admin_port\")\n server_name = ctx.get(\"server_name\")\n\n replacement_dict = {\n \"cn\": f\"{server_name}\",\n \"admin_port\": f\"{admin_port}\",\n \"docker_image\": self.docker_image,\n }\n agent_config = dict()\n if self.overseer_agent:\n overseer_agent = copy.deepcopy(self.overseer_agent)\n if overseer_agent.get(\"overseer_exists\", True):\n overseer_agent[\"args\"] = {\n \"role\": \"admin\",\n \"overseer_end_point\": ctx.get(\"overseer_end_point\", \"\"),\n \"project\": self.study_name,\n \"name\": admin.subject,\n }\n overseer_agent.pop(\"overseer_exists\", None)\n agent_config[\"overseer_agent\"] = overseer_agent\n config[\"admin\"].update(agent_config)\n self._write(os.path.join(dest_dir, \"fed_admin.json\"), json.dumps(config), \"t\")\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n sh_replace(self.template[\"docker_adm_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"fl_admin.sh\"),\n sh_replace(self.template[\"fl_admin_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"readme.txt\"),\n self.template[\"readme_am\"],\n \"t\",\n )\n\n def build(self, study, ctx):\n self.template = ctx.get(\"template\")\n self.study_name = study.name\n self.study = study\n overseer = study.get_participants_by_type(\"overseer\")\n self._build_overseer(overseer, ctx)\n servers = study.get_participants_by_type(\"server\", first_only=False)\n for server in servers:\n self._build_server(server, ctx)\n\n for client in study.get_participants_by_type(\"client\", first_only=False):\n self._build_client(client, ctx)\n\n for admin in study.get_participants_by_type(\"admin\", first_only=False):\n self._build_admin(admin, ctx)\n", "path": "nvflare/lighter/impl/static_file.py"}], "after_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport json\nimport os\n\nimport yaml\n\nfrom nvflare.lighter.spec import Builder\nfrom nvflare.lighter.utils import sh_replace\n\n\nclass StaticFileBuilder(Builder):\n def __init__(\n self,\n enable_byoc=False,\n config_folder=\"\",\n app_validator=\"\",\n docker_image=\"\",\n snapshot_persistor=\"\",\n overseer_agent=\"\",\n ):\n \"\"\"Build all static files from template.\n\n Uses the information from project.yml through study to go through the participants and write the contents of\n each file with the template, and replacing with the appropriate values from project.yml.\n\n Usually, two main categories of files are created in all FL participants, static and dynamic. Static files\n have similar contents among different participants, with small differences. For example, the differences in\n sub_start.sh are client name and python module. Those are basically static files. This builder uses template\n file and string replacement to generate those static files for each participant.\n\n Args:\n enable_byoc: for each participant, true to enable loading of code in the custom folder of applications\n config_folder: usually \"config\"\n app_validator: optional path to an app validator to verify that uploaded app has the expected structure\n docker_image: when docker_image is set to a docker image name, docker.sh will be generated on server/client/admin\n \"\"\"\n self.enable_byoc = enable_byoc\n self.config_folder = config_folder\n self.docker_image = docker_image\n self.app_validator = app_validator\n self.overseer_agent = overseer_agent\n self.snapshot_persistor = snapshot_persistor\n\n def _write(self, file_full_path, content, mode, exe=False):\n mode = mode + \"w\"\n with open(file_full_path, mode) as f:\n f.write(content)\n if exe:\n os.chmod(file_full_path, 0o755)\n\n def _build_overseer(self, overseer, ctx):\n dest_dir = self.get_kit_dir(overseer, ctx)\n self._write(\n os.path.join(dest_dir, \"start.sh\"),\n self.template[\"start_svr_sh\"],\n \"t\",\n exe=True,\n )\n protocol = overseer.props.get(\"protocol\", \"http\")\n api_root = overseer.props.get(\"api_root\", \"/api/v1/\")\n default_port = \"443\" if protocol == \"https\" else \"80\"\n port = overseer.props.get(\"port\", default_port)\n replacement_dict = {\"port\": port}\n admins = self.study.get_participants_by_type(\"admin\", first_only=False)\n privilege_dict = dict()\n for admin in admins:\n for role in admin.props.get(\"roles\", {}):\n if role in privilege_dict:\n privilege_dict[role].append(admin.subject)\n else:\n privilege_dict[role] = [admin.subject]\n self._write(\n os.path.join(dest_dir, \"privilege.yml\"),\n yaml.dump(privilege_dict, Dumper=yaml.Dumper),\n \"t\",\n exe=False,\n )\n\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n sh_replace(self.template[\"docker_svr_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"gunicorn.conf.py\"),\n sh_replace(self.template[\"gunicorn_conf_py\"], replacement_dict),\n \"t\",\n exe=False,\n )\n self._write(\n os.path.join(dest_dir, \"start.sh\"),\n self.template[\"start_ovsr_sh\"],\n \"t\",\n exe=True,\n )\n if port:\n ctx[\"overseer_end_point\"] = f\"{protocol}://{overseer.name}:{port}{api_root}\"\n else:\n ctx[\"overseer_end_point\"] = f\"{protocol}://{overseer.name}{api_root}\"\n\n def _build_server(self, server, ctx):\n config = json.loads(self.template[\"fed_server\"])\n dest_dir = self.get_kit_dir(server, ctx)\n server_0 = config[\"servers\"][0]\n server_0[\"name\"] = self.study_name\n admin_port = server.props.get(\"admin_port\", 8003)\n ctx[\"admin_port\"] = admin_port\n fed_learn_port = server.props.get(\"fed_learn_port\", 8002)\n ctx[\"fed_learn_port\"] = fed_learn_port\n ctx[\"server_name\"] = server.name\n server_0[\"service\"][\"target\"] = f\"{server.name}:{fed_learn_port}\"\n server_0[\"admin_host\"] = server.name\n server_0[\"admin_port\"] = admin_port\n config[\"enable_byoc\"] = server.enable_byoc\n if self.app_validator:\n config[\"app_validator\"] = {\"path\": self.app_validator}\n if self.overseer_agent:\n overseer_agent = copy.deepcopy(self.overseer_agent)\n if overseer_agent.get(\"overseer_exists\", True):\n overseer_agent[\"args\"] = {\n \"role\": \"server\",\n \"overseer_end_point\": ctx.get(\"overseer_end_point\", \"\"),\n \"project\": self.study_name,\n \"name\": server.name,\n \"fl_port\": str(fed_learn_port),\n \"admin_port\": str(admin_port),\n }\n overseer_agent.pop(\"overseer_exists\", None)\n config[\"overseer_agent\"] = overseer_agent\n if self.snapshot_persistor:\n config[\"snapshot_persistor\"] = self.snapshot_persistor\n self._write(os.path.join(dest_dir, \"fed_server.json\"), json.dumps(config, sort_keys=True, indent=2), \"t\")\n replacement_dict = {\n \"admin_port\": admin_port,\n \"fed_learn_port\": fed_learn_port,\n \"config_folder\": self.config_folder,\n \"docker_image\": self.docker_image,\n }\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n sh_replace(self.template[\"docker_svr_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"start.sh\"),\n self.template[\"start_svr_sh\"],\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"sub_start.sh\"),\n sh_replace(self.template[\"sub_start_svr_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"log.config\"),\n self.template[\"log_config\"],\n \"t\",\n )\n self._write(\n os.path.join(dest_dir, \"readme.txt\"),\n self.template[\"readme_fs\"],\n \"t\",\n )\n self._write(\n os.path.join(dest_dir, \"stop_fl.sh\"),\n self.template[\"stop_fl_sh\"],\n \"t\",\n exe=True,\n )\n\n def _build_client(self, client, ctx):\n config = json.loads(self.template[\"fed_client\"])\n dest_dir = self.get_kit_dir(client, ctx)\n fed_learn_port = ctx.get(\"fed_learn_port\")\n server_name = ctx.get(\"server_name\")\n # config[\"servers\"][0][\"service\"][\"target\"] = f\"{server_name}:{fed_learn_port}\"\n config[\"servers\"][0][\"name\"] = self.study_name\n config[\"enable_byoc\"] = client.enable_byoc\n replacement_dict = {\n \"client_name\": f\"{client.subject}\",\n \"config_folder\": self.config_folder,\n \"docker_image\": self.docker_image,\n }\n if self.overseer_agent:\n overseer_agent = copy.deepcopy(self.overseer_agent)\n if overseer_agent.get(\"overseer_exists\", True):\n overseer_agent[\"args\"] = {\n \"role\": \"client\",\n \"overseer_end_point\": ctx.get(\"overseer_end_point\", \"\"),\n \"project\": self.study_name,\n \"name\": client.subject,\n }\n overseer_agent.pop(\"overseer_exists\", None)\n config[\"overseer_agent\"] = overseer_agent\n\n self._write(os.path.join(dest_dir, \"fed_client.json\"), json.dumps(config, sort_keys=True, indent=2), \"t\")\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n sh_replace(self.template[\"docker_cln_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"start.sh\"),\n self.template[\"start_cln_sh\"],\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"sub_start.sh\"),\n sh_replace(self.template[\"sub_start_cln_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"log.config\"),\n self.template[\"log_config\"],\n \"t\",\n )\n self._write(\n os.path.join(dest_dir, \"readme.txt\"),\n self.template[\"readme_fc\"],\n \"t\",\n )\n self._write(\n os.path.join(dest_dir, \"stop_fl.sh\"),\n self.template[\"stop_fl_sh\"],\n \"t\",\n exe=True,\n )\n\n def _build_admin(self, admin, ctx):\n config = json.loads(self.template[\"fed_admin\"])\n dest_dir = self.get_kit_dir(admin, ctx)\n admin_port = ctx.get(\"admin_port\")\n server_name = ctx.get(\"server_name\")\n\n replacement_dict = {\n \"cn\": f\"{server_name}\",\n \"admin_port\": f\"{admin_port}\",\n \"docker_image\": self.docker_image,\n }\n agent_config = dict()\n if self.overseer_agent:\n overseer_agent = copy.deepcopy(self.overseer_agent)\n if overseer_agent.get(\"overseer_exists\", True):\n overseer_agent[\"args\"] = {\n \"role\": \"admin\",\n \"overseer_end_point\": ctx.get(\"overseer_end_point\", \"\"),\n \"project\": self.study_name,\n \"name\": admin.subject,\n }\n overseer_agent.pop(\"overseer_exists\", None)\n agent_config[\"overseer_agent\"] = overseer_agent\n config[\"admin\"].update(agent_config)\n self._write(os.path.join(dest_dir, \"fed_admin.json\"), json.dumps(config, sort_keys=True, indent=2), \"t\")\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n sh_replace(self.template[\"docker_adm_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"fl_admin.sh\"),\n sh_replace(self.template[\"fl_admin_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"readme.txt\"),\n self.template[\"readme_am\"],\n \"t\",\n )\n\n def build(self, study, ctx):\n self.template = ctx.get(\"template\")\n self.study_name = study.name\n self.study = study\n overseer = study.get_participants_by_type(\"overseer\")\n self._build_overseer(overseer, ctx)\n servers = study.get_participants_by_type(\"server\", first_only=False)\n for server in servers:\n self._build_server(server, ctx)\n\n for client in study.get_participants_by_type(\"client\", first_only=False):\n self._build_client(client, ctx)\n\n for admin in study.get_participants_by_type(\"admin\", first_only=False):\n self._build_admin(admin, ctx)\n", "path": "nvflare/lighter/impl/static_file.py"}]} | 3,732 | 421 |
gh_patches_debug_23810 | rasdani/github-patches | git_diff | translate__pootle-5915 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dates not getting localised in browse tables
seems like the dates are not getting localised as they should
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_app/panels.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import re
10
11 from django.utils.safestring import mark_safe
12
13 from pootle.core.browser import get_table_headings
14 from pootle.core.decorators import persistent_property
15 from pootle.core.views.panels import TablePanel
16
17 from pootle.i18n.dates import timesince
18
19
20 class ChildrenPanel(TablePanel):
21 panel_name = "children"
22 _table_fields = (
23 'name', 'progress', 'activity',
24 'total', 'need-translation',
25 'suggestions', 'critical')
26
27 @property
28 def table_fields(self):
29 fields = (
30 ("name", "total")
31 if self.view.is_templates_context
32 else self._table_fields)
33 if self.view.has_admin_access:
34 fields += ('last-updated', )
35 return fields
36
37 @property
38 def children(self):
39 return self.view.object_children
40
41 @property
42 def table(self):
43 if self.view.object_children:
44 return {
45 'id': self.view.view_name,
46 'fields': self.table_fields,
47 'headings': get_table_headings(self.table_fields),
48 'rows': self.view.object_children}
49
50 @persistent_property
51 def _content(self):
52 return self.render()
53
54 @property
55 def child_update_times(self):
56 _times = {}
57 for child in self.children:
58 if not child.get("stats"):
59 continue
60 last_created_unit = (
61 timesince(child["stats"]["last_created_unit"]["creation_time"])
62 if child["stats"].get("last_created_unit")
63 else None)
64 last_submission = (
65 timesince(child["stats"]["last_submission"]["mtime"])
66 if child["stats"].get("last_submission")
67 else None)
68 _times[child["code"]] = (last_submission, last_created_unit)
69 return _times
70
71 @property
72 def content(self):
73 return self.update_times(self._content)
74
75 def get_context_data(self):
76 return dict(
77 table=self.table,
78 can_translate=self.view.can_translate)
79
80 def update_times(self, content):
81 times = {}
82 update_times = self.child_update_times.items()
83 for name, (last_submission, last_created_unit) in update_times:
84 if last_submission:
85 times[
86 "_XXX_LAST_SUBMISSION_%s_LAST_SUBMISSION_XXX_"
87 % name] = last_submission
88 if last_created_unit:
89 times[
90 "_XXX_LAST_CREATED_%s_LAST_CREATED_XXX_"
91 % name] = last_created_unit
92 if times:
93 regex = re.compile("(%s)" % "|".join(map(re.escape, times.keys())))
94 return mark_safe(
95 regex.sub(
96 lambda match: times[match.string[match.start():match.end()]],
97 content))
98 return content
99
```
Path: `pootle/i18n/dates.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import locale as system_locale
10 import os
11 from datetime import datetime
12
13 from babel.dates import format_timedelta
14
15 from django.utils import translation
16
17
18 class LocalDate(object):
19
20 def __init__(self):
21 if not self.locale_code and not os.name == "nt":
22 self.set_locale()
23
24 @property
25 def default_locale(self):
26 return translation.to_locale(translation.get_language())
27
28 def set_locale(self):
29 system_locale.setlocale(
30 system_locale.LC_ALL,
31 (self.default_locale, 'UTF-8'))
32
33 @property
34 def locale_code(self):
35 return system_locale.getlocale()[0]
36
37 def format_timesince(self, timestamp, locale=None):
38 return format_timedelta(
39 datetime.now()
40 - datetime.fromtimestamp(
41 timestamp),
42 locale=(
43 locale
44 or self.locale_code
45 or self.default_locale))
46
47
48 localdate = LocalDate()
49
50
51 def timesince(timestamp, locale=None):
52 return localdate.format_timesince(timestamp, locale=locale)
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_app/panels.py b/pootle/apps/pootle_app/panels.py
--- a/pootle/apps/pootle_app/panels.py
+++ b/pootle/apps/pootle_app/panels.py
@@ -58,11 +58,15 @@
if not child.get("stats"):
continue
last_created_unit = (
- timesince(child["stats"]["last_created_unit"]["creation_time"])
+ timesince(
+ child["stats"]["last_created_unit"]["creation_time"],
+ locale=self.view.request_lang)
if child["stats"].get("last_created_unit")
else None)
last_submission = (
- timesince(child["stats"]["last_submission"]["mtime"])
+ timesince(
+ child["stats"]["last_submission"]["mtime"],
+ locale=self.view.request_lang)
if child["stats"].get("last_submission")
else None)
_times[child["code"]] = (last_submission, last_created_unit)
diff --git a/pootle/i18n/dates.py b/pootle/i18n/dates.py
--- a/pootle/i18n/dates.py
+++ b/pootle/i18n/dates.py
@@ -49,4 +49,6 @@
def timesince(timestamp, locale=None):
+ if locale:
+ locale = translation.to_locale(locale)
return localdate.format_timesince(timestamp, locale=locale)
| {"golden_diff": "diff --git a/pootle/apps/pootle_app/panels.py b/pootle/apps/pootle_app/panels.py\n--- a/pootle/apps/pootle_app/panels.py\n+++ b/pootle/apps/pootle_app/panels.py\n@@ -58,11 +58,15 @@\n if not child.get(\"stats\"):\n continue\n last_created_unit = (\n- timesince(child[\"stats\"][\"last_created_unit\"][\"creation_time\"])\n+ timesince(\n+ child[\"stats\"][\"last_created_unit\"][\"creation_time\"],\n+ locale=self.view.request_lang)\n if child[\"stats\"].get(\"last_created_unit\")\n else None)\n last_submission = (\n- timesince(child[\"stats\"][\"last_submission\"][\"mtime\"])\n+ timesince(\n+ child[\"stats\"][\"last_submission\"][\"mtime\"],\n+ locale=self.view.request_lang)\n if child[\"stats\"].get(\"last_submission\")\n else None)\n _times[child[\"code\"]] = (last_submission, last_created_unit)\ndiff --git a/pootle/i18n/dates.py b/pootle/i18n/dates.py\n--- a/pootle/i18n/dates.py\n+++ b/pootle/i18n/dates.py\n@@ -49,4 +49,6 @@\n \n \n def timesince(timestamp, locale=None):\n+ if locale:\n+ locale = translation.to_locale(locale)\n return localdate.format_timesince(timestamp, locale=locale)\n", "issue": "Dates not getting localised in browse tables\nseems like the dates are not getting localised as they should\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport re\n\nfrom django.utils.safestring import mark_safe\n\nfrom pootle.core.browser import get_table_headings\nfrom pootle.core.decorators import persistent_property\nfrom pootle.core.views.panels import TablePanel\n\nfrom pootle.i18n.dates import timesince\n\n\nclass ChildrenPanel(TablePanel):\n panel_name = \"children\"\n _table_fields = (\n 'name', 'progress', 'activity',\n 'total', 'need-translation',\n 'suggestions', 'critical')\n\n @property\n def table_fields(self):\n fields = (\n (\"name\", \"total\")\n if self.view.is_templates_context\n else self._table_fields)\n if self.view.has_admin_access:\n fields += ('last-updated', )\n return fields\n\n @property\n def children(self):\n return self.view.object_children\n\n @property\n def table(self):\n if self.view.object_children:\n return {\n 'id': self.view.view_name,\n 'fields': self.table_fields,\n 'headings': get_table_headings(self.table_fields),\n 'rows': self.view.object_children}\n\n @persistent_property\n def _content(self):\n return self.render()\n\n @property\n def child_update_times(self):\n _times = {}\n for child in self.children:\n if not child.get(\"stats\"):\n continue\n last_created_unit = (\n timesince(child[\"stats\"][\"last_created_unit\"][\"creation_time\"])\n if child[\"stats\"].get(\"last_created_unit\")\n else None)\n last_submission = (\n timesince(child[\"stats\"][\"last_submission\"][\"mtime\"])\n if child[\"stats\"].get(\"last_submission\")\n else None)\n _times[child[\"code\"]] = (last_submission, last_created_unit)\n return _times\n\n @property\n def content(self):\n return self.update_times(self._content)\n\n def get_context_data(self):\n return dict(\n table=self.table,\n can_translate=self.view.can_translate)\n\n def update_times(self, content):\n times = {}\n update_times = self.child_update_times.items()\n for name, (last_submission, last_created_unit) in update_times:\n if last_submission:\n times[\n \"_XXX_LAST_SUBMISSION_%s_LAST_SUBMISSION_XXX_\"\n % name] = last_submission\n if last_created_unit:\n times[\n \"_XXX_LAST_CREATED_%s_LAST_CREATED_XXX_\"\n % name] = last_created_unit\n if times:\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape, times.keys())))\n return mark_safe(\n regex.sub(\n lambda match: times[match.string[match.start():match.end()]],\n content))\n return content\n", "path": "pootle/apps/pootle_app/panels.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport locale as system_locale\nimport os\nfrom datetime import datetime\n\nfrom babel.dates import format_timedelta\n\nfrom django.utils import translation\n\n\nclass LocalDate(object):\n\n def __init__(self):\n if not self.locale_code and not os.name == \"nt\":\n self.set_locale()\n\n @property\n def default_locale(self):\n return translation.to_locale(translation.get_language())\n\n def set_locale(self):\n system_locale.setlocale(\n system_locale.LC_ALL,\n (self.default_locale, 'UTF-8'))\n\n @property\n def locale_code(self):\n return system_locale.getlocale()[0]\n\n def format_timesince(self, timestamp, locale=None):\n return format_timedelta(\n datetime.now()\n - datetime.fromtimestamp(\n timestamp),\n locale=(\n locale\n or self.locale_code\n or self.default_locale))\n\n\nlocaldate = LocalDate()\n\n\ndef timesince(timestamp, locale=None):\n return localdate.format_timesince(timestamp, locale=locale)\n", "path": "pootle/i18n/dates.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport re\n\nfrom django.utils.safestring import mark_safe\n\nfrom pootle.core.browser import get_table_headings\nfrom pootle.core.decorators import persistent_property\nfrom pootle.core.views.panels import TablePanel\n\nfrom pootle.i18n.dates import timesince\n\n\nclass ChildrenPanel(TablePanel):\n panel_name = \"children\"\n _table_fields = (\n 'name', 'progress', 'activity',\n 'total', 'need-translation',\n 'suggestions', 'critical')\n\n @property\n def table_fields(self):\n fields = (\n (\"name\", \"total\")\n if self.view.is_templates_context\n else self._table_fields)\n if self.view.has_admin_access:\n fields += ('last-updated', )\n return fields\n\n @property\n def children(self):\n return self.view.object_children\n\n @property\n def table(self):\n if self.view.object_children:\n return {\n 'id': self.view.view_name,\n 'fields': self.table_fields,\n 'headings': get_table_headings(self.table_fields),\n 'rows': self.view.object_children}\n\n @persistent_property\n def _content(self):\n return self.render()\n\n @property\n def child_update_times(self):\n _times = {}\n for child in self.children:\n if not child.get(\"stats\"):\n continue\n last_created_unit = (\n timesince(\n child[\"stats\"][\"last_created_unit\"][\"creation_time\"],\n locale=self.view.request_lang)\n if child[\"stats\"].get(\"last_created_unit\")\n else None)\n last_submission = (\n timesince(\n child[\"stats\"][\"last_submission\"][\"mtime\"],\n locale=self.view.request_lang)\n if child[\"stats\"].get(\"last_submission\")\n else None)\n _times[child[\"code\"]] = (last_submission, last_created_unit)\n return _times\n\n @property\n def content(self):\n return self.update_times(self._content)\n\n def get_context_data(self):\n return dict(\n table=self.table,\n can_translate=self.view.can_translate)\n\n def update_times(self, content):\n times = {}\n update_times = self.child_update_times.items()\n for name, (last_submission, last_created_unit) in update_times:\n if last_submission:\n times[\n \"_XXX_LAST_SUBMISSION_%s_LAST_SUBMISSION_XXX_\"\n % name] = last_submission\n if last_created_unit:\n times[\n \"_XXX_LAST_CREATED_%s_LAST_CREATED_XXX_\"\n % name] = last_created_unit\n if times:\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape, times.keys())))\n return mark_safe(\n regex.sub(\n lambda match: times[match.string[match.start():match.end()]],\n content))\n return content\n", "path": "pootle/apps/pootle_app/panels.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport locale as system_locale\nimport os\nfrom datetime import datetime\n\nfrom babel.dates import format_timedelta\n\nfrom django.utils import translation\n\n\nclass LocalDate(object):\n\n def __init__(self):\n if not self.locale_code and not os.name == \"nt\":\n self.set_locale()\n\n @property\n def default_locale(self):\n return translation.to_locale(translation.get_language())\n\n def set_locale(self):\n system_locale.setlocale(\n system_locale.LC_ALL,\n (self.default_locale, 'UTF-8'))\n\n @property\n def locale_code(self):\n return system_locale.getlocale()[0]\n\n def format_timesince(self, timestamp, locale=None):\n return format_timedelta(\n datetime.now()\n - datetime.fromtimestamp(\n timestamp),\n locale=(\n locale\n or self.locale_code\n or self.default_locale))\n\n\nlocaldate = LocalDate()\n\n\ndef timesince(timestamp, locale=None):\n if locale:\n locale = translation.to_locale(locale)\n return localdate.format_timesince(timestamp, locale=locale)\n", "path": "pootle/i18n/dates.py"}]} | 1,538 | 317 |
gh_patches_debug_3513 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-578 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cookiecutter needs to always use utf-8 for writing files
I get this on Windows (pitty me):
```
File "c:\program files\python 3.5\lib\site-packages\cookiecutter\generate.py", line 318, in generate_files
run_hook('post_gen_project', project_dir, context)
File "c:\program files\python 3.5\lib\site-packages\cookiecutter\hooks.py", line 107, in run_hook
return run_script_with_context(script, project_dir, context)
File "c:\program files\python 3.5\lib\site-packages\cookiecutter\hooks.py", line 90, in run_script_with_context
temp.write(Template(contents).render(**context))
File "c:\program files\python 3.5\lib\tempfile.py", line 482, in func_wrapper
return func(*args, **kwargs)
File "c:\program files\python 3.5\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode character '\u0103' in position 1626: character maps to <undefined>
```
cookiecutter should pass `encoding='utf-8'` to `NamedTemporaryFile`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/hooks.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.hooks
6 ------------------
7
8 Functions for discovering and executing various cookiecutter hooks.
9 """
10
11 import io
12 import logging
13 import os
14 import subprocess
15 import sys
16 import tempfile
17
18 from jinja2 import Template
19
20 from cookiecutter import utils
21 from .exceptions import FailedHookException
22
23
24 _HOOKS = [
25 'pre_gen_project',
26 'post_gen_project',
27 # TODO: other hooks should be listed here
28 ]
29 EXIT_SUCCESS = 0
30
31
32 def find_hooks():
33 """
34 Must be called with the project template as the current working directory.
35 Returns a dict of all hook scripts provided.
36 Dict's key will be the hook/script's name, without extension, while
37 values will be the absolute path to the script.
38 Missing scripts will not be included in the returned dict.
39 """
40 hooks_dir = 'hooks'
41 r = {}
42 logging.debug('hooks_dir is {0}'.format(hooks_dir))
43 if not os.path.isdir(hooks_dir):
44 logging.debug('No hooks/ dir in template_dir')
45 return r
46 for f in os.listdir(hooks_dir):
47 basename = os.path.splitext(os.path.basename(f))[0]
48 if basename in _HOOKS:
49 r[basename] = os.path.abspath(os.path.join(hooks_dir, f))
50 return r
51
52
53 def run_script(script_path, cwd='.'):
54 """
55 Executes a script from a working directory.
56
57 :param script_path: Absolute path to the script to run.
58 :param cwd: The directory to run the script from.
59 """
60 run_thru_shell = sys.platform.startswith('win')
61 if script_path.endswith('.py'):
62 script_command = [sys.executable, script_path]
63 else:
64 script_command = [script_path]
65
66 utils.make_executable(script_path)
67
68 proc = subprocess.Popen(
69 script_command,
70 shell=run_thru_shell,
71 cwd=cwd
72 )
73 exit_status = proc.wait()
74 if exit_status != EXIT_SUCCESS:
75 raise FailedHookException(
76 "Hook script failed (exit status: %d)" % exit_status)
77
78
79 def run_script_with_context(script_path, cwd, context):
80 """
81 Executes a script after rendering with it Jinja.
82
83 :param script_path: Absolute path to the script to run.
84 :param cwd: The directory to run the script from.
85 :param context: Cookiecutter project template context.
86 """
87 _, extension = os.path.splitext(script_path)
88
89 contents = io.open(script_path, 'r', encoding='utf-8').read()
90
91 with tempfile.NamedTemporaryFile(
92 delete=False,
93 mode='w',
94 suffix=extension
95 ) as temp:
96 temp.write(Template(contents).render(**context))
97
98 run_script(temp.name, cwd)
99
100
101 def run_hook(hook_name, project_dir, context):
102 """
103 Try to find and execute a hook from the specified project directory.
104
105 :param hook_name: The hook to execute.
106 :param project_dir: The directory to execute the script from.
107 :param context: Cookiecutter project context.
108 """
109 script = find_hooks().get(hook_name)
110 if script is None:
111 logging.debug('No hooks found')
112 return
113 run_script_with_context(script, project_dir, context)
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py
--- a/cookiecutter/hooks.py
+++ b/cookiecutter/hooks.py
@@ -90,10 +90,11 @@
with tempfile.NamedTemporaryFile(
delete=False,
- mode='w',
+ mode='wb',
suffix=extension
) as temp:
- temp.write(Template(contents).render(**context))
+ output = Template(contents).render(**context)
+ temp.write(output.encode('utf-8'))
run_script(temp.name, cwd)
| {"golden_diff": "diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py\n--- a/cookiecutter/hooks.py\n+++ b/cookiecutter/hooks.py\n@@ -90,10 +90,11 @@\n \n with tempfile.NamedTemporaryFile(\n delete=False,\n- mode='w',\n+ mode='wb',\n suffix=extension\n ) as temp:\n- temp.write(Template(contents).render(**context))\n+ output = Template(contents).render(**context)\n+ temp.write(output.encode('utf-8'))\n \n run_script(temp.name, cwd)\n", "issue": "Cookiecutter needs to always use utf-8 for writing files\nI get this on Windows (pitty me):\n\n```\n File \"c:\\program files\\python 3.5\\lib\\site-packages\\cookiecutter\\generate.py\", line 318, in generate_files\n run_hook('post_gen_project', project_dir, context)\n File \"c:\\program files\\python 3.5\\lib\\site-packages\\cookiecutter\\hooks.py\", line 107, in run_hook\n return run_script_with_context(script, project_dir, context)\n File \"c:\\program files\\python 3.5\\lib\\site-packages\\cookiecutter\\hooks.py\", line 90, in run_script_with_context\n temp.write(Template(contents).render(**context))\n File \"c:\\program files\\python 3.5\\lib\\tempfile.py\", line 482, in func_wrapper\n return func(*args, **kwargs)\n File \"c:\\program files\\python 3.5\\lib\\encodings\\cp1252.py\", line 19, in encode\n return codecs.charmap_encode(input,self.errors,encoding_table)[0]\nUnicodeEncodeError: 'charmap' codec can't encode character '\\u0103' in position 1626: character maps to <undefined>\n```\n\ncookiecutter should pass `encoding='utf-8'` to `NamedTemporaryFile`\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.hooks\n------------------\n\nFunctions for discovering and executing various cookiecutter hooks.\n\"\"\"\n\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom jinja2 import Template\n\nfrom cookiecutter import utils\nfrom .exceptions import FailedHookException\n\n\n_HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n # TODO: other hooks should be listed here\n]\nEXIT_SUCCESS = 0\n\n\ndef find_hooks():\n \"\"\"\n Must be called with the project template as the current working directory.\n Returns a dict of all hook scripts provided.\n Dict's key will be the hook/script's name, without extension, while\n values will be the absolute path to the script.\n Missing scripts will not be included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n r = {}\n logging.debug('hooks_dir is {0}'.format(hooks_dir))\n if not os.path.isdir(hooks_dir):\n logging.debug('No hooks/ dir in template_dir')\n return r\n for f in os.listdir(hooks_dir):\n basename = os.path.splitext(os.path.basename(f))[0]\n if basename in _HOOKS:\n r[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n return r\n\n\ndef run_script(script_path, cwd='.'):\n \"\"\"\n Executes a script from a working directory.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n \"\"\"\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n proc = subprocess.Popen(\n script_command,\n shell=run_thru_shell,\n cwd=cwd\n )\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n \"Hook script failed (exit status: %d)\" % exit_status)\n\n\ndef run_script_with_context(script_path, cwd, context):\n \"\"\"\n Executes a script after rendering with it Jinja.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n :param context: Cookiecutter project template context.\n \"\"\"\n _, extension = os.path.splitext(script_path)\n\n contents = io.open(script_path, 'r', encoding='utf-8').read()\n\n with tempfile.NamedTemporaryFile(\n delete=False,\n mode='w',\n suffix=extension\n ) as temp:\n temp.write(Template(contents).render(**context))\n\n run_script(temp.name, cwd)\n\n\ndef run_hook(hook_name, project_dir, context):\n \"\"\"\n Try to find and execute a hook from the specified project directory.\n\n :param hook_name: The hook to execute.\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n script = find_hooks().get(hook_name)\n if script is None:\n logging.debug('No hooks found')\n return\n run_script_with_context(script, project_dir, context)\n", "path": "cookiecutter/hooks.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.hooks\n------------------\n\nFunctions for discovering and executing various cookiecutter hooks.\n\"\"\"\n\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom jinja2 import Template\n\nfrom cookiecutter import utils\nfrom .exceptions import FailedHookException\n\n\n_HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n # TODO: other hooks should be listed here\n]\nEXIT_SUCCESS = 0\n\n\ndef find_hooks():\n \"\"\"\n Must be called with the project template as the current working directory.\n Returns a dict of all hook scripts provided.\n Dict's key will be the hook/script's name, without extension, while\n values will be the absolute path to the script.\n Missing scripts will not be included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n r = {}\n logging.debug('hooks_dir is {0}'.format(hooks_dir))\n if not os.path.isdir(hooks_dir):\n logging.debug('No hooks/ dir in template_dir')\n return r\n for f in os.listdir(hooks_dir):\n basename = os.path.splitext(os.path.basename(f))[0]\n if basename in _HOOKS:\n r[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n return r\n\n\ndef run_script(script_path, cwd='.'):\n \"\"\"\n Executes a script from a working directory.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n \"\"\"\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n proc = subprocess.Popen(\n script_command,\n shell=run_thru_shell,\n cwd=cwd\n )\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n \"Hook script failed (exit status: %d)\" % exit_status)\n\n\ndef run_script_with_context(script_path, cwd, context):\n \"\"\"\n Executes a script after rendering with it Jinja.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n :param context: Cookiecutter project template context.\n \"\"\"\n _, extension = os.path.splitext(script_path)\n\n contents = io.open(script_path, 'r', encoding='utf-8').read()\n\n with tempfile.NamedTemporaryFile(\n delete=False,\n mode='wb',\n suffix=extension\n ) as temp:\n output = Template(contents).render(**context)\n temp.write(output.encode('utf-8'))\n\n run_script(temp.name, cwd)\n\n\ndef run_hook(hook_name, project_dir, context):\n \"\"\"\n Try to find and execute a hook from the specified project directory.\n\n :param hook_name: The hook to execute.\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n script = find_hooks().get(hook_name)\n if script is None:\n logging.debug('No hooks found')\n return\n run_script_with_context(script, project_dir, context)\n", "path": "cookiecutter/hooks.py"}]} | 1,528 | 127 |
gh_patches_debug_27938 | rasdani/github-patches | git_diff | sanic-org__sanic-2627 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Uvicorn ignores listeners errors
**Describe the bug**
When an exception happens on a listener the error is not reported and the server doesn't crash when using Uvicorn.
**Code snippet**
```python
from sanic import Sanic
from sanic.response import text
app = Sanic("MyHelloWorldApp")
@app.listener("before_server_start")
async def raises(app, loop):
print("Trying to run faster")
raise Exception("oh noes")
@app.get("/")
async def hello_world(request):
return text("Hello, world.")
```
```
~ uvicorn app:app
INFO: Started server process [49837]
INFO: Waiting for application startup.
[2022-01-25 14:57:34 +0100] [49837] [INFO]
┌────────────────────────────────────────────────────────────────┐
│ Sanic v21.12.1 │
│ │
├───────────────────────┬────────────────────────────────────────┤
│ │ mode: production, ASGI │
│ ▄███ █████ ██ │ server: sanic │
│ ██ │ python: 3.9.10 │
│ ▀███████ ███▄ │ platform: macOS-12.1-x86_64-i386-64bit │
│ ██ │ packages: sanic-routing==0.7.2 │
│ ████ ████████▀ │ │
│ │ │
│ Build Fast. Run Fast. │ │
└───────────────────────┴────────────────────────────────────────┘
/Users/andre.ericson/projects/sanic-uvicorn/.venv/lib/python3.9/site-packages/sanic/asgi.py:27: UserWarning: You have set a listener for "before_server_start" in ASGI mode. It will be executed as early as possible, but not before the ASGI server is started.
warnings.warn(
Trying to run faster
INFO: ASGI 'lifespan' protocol appears unsupported.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
```
**Expected behavior**
The server should crash, or at least log an error.
**Environment (please complete the following information):**
- MacOS
- python 3.9.10
```
pip list
Package Version
------------- ------------
aiofiles 0.8.0
asgiref 3.5.0
click 8.0.3
h11 0.13.0
httptools 0.3.0
multidict 5.2.0
pip 21.3.1
sanic 21.12.1
sanic-routing 0.7.2
setuptools 58.1.0
ujson 5.1.0
uvicorn 0.17.0.post1
uvloop 0.16.0
websockets 10.1
```
**Additional context**
Works as expected with `sanic app.app` and also with GunicornWorker.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/asgi.py`
Content:
```
1 from __future__ import annotations
2
3 import warnings
4
5 from typing import TYPE_CHECKING, Optional
6 from urllib.parse import quote
7
8 from sanic.compat import Header
9 from sanic.exceptions import ServerError
10 from sanic.helpers import Default
11 from sanic.http import Stage
12 from sanic.log import logger
13 from sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport
14 from sanic.request import Request
15 from sanic.response import BaseHTTPResponse
16 from sanic.server import ConnInfo
17 from sanic.server.websockets.connection import WebSocketConnection
18
19
20 if TYPE_CHECKING:
21 from sanic import Sanic
22
23
24 class Lifespan:
25 def __init__(self, asgi_app: ASGIApp) -> None:
26 self.asgi_app = asgi_app
27
28 if (
29 "server.init.before"
30 in self.asgi_app.sanic_app.signal_router.name_index
31 ):
32 logger.debug(
33 'You have set a listener for "before_server_start" '
34 "in ASGI mode. "
35 "It will be executed as early as possible, but not before "
36 "the ASGI server is started.",
37 extra={"verbosity": 1},
38 )
39 if (
40 "server.shutdown.after"
41 in self.asgi_app.sanic_app.signal_router.name_index
42 ):
43 logger.debug(
44 'You have set a listener for "after_server_stop" '
45 "in ASGI mode. "
46 "It will be executed as late as possible, but not after "
47 "the ASGI server is stopped.",
48 extra={"verbosity": 1},
49 )
50
51 async def startup(self) -> None:
52 """
53 Gather the listeners to fire on server start.
54 Because we are using a third-party server and not Sanic server, we do
55 not have access to fire anything BEFORE the server starts.
56 Therefore, we fire before_server_start and after_server_start
57 in sequence since the ASGI lifespan protocol only supports a single
58 startup event.
59 """
60 await self.asgi_app.sanic_app._startup()
61 await self.asgi_app.sanic_app._server_event("init", "before")
62 await self.asgi_app.sanic_app._server_event("init", "after")
63
64 if not isinstance(self.asgi_app.sanic_app.config.USE_UVLOOP, Default):
65 warnings.warn(
66 "You have set the USE_UVLOOP configuration option, but Sanic "
67 "cannot control the event loop when running in ASGI mode."
68 "This option will be ignored."
69 )
70
71 async def shutdown(self) -> None:
72 """
73 Gather the listeners to fire on server stop.
74 Because we are using a third-party server and not Sanic server, we do
75 not have access to fire anything AFTER the server stops.
76 Therefore, we fire before_server_stop and after_server_stop
77 in sequence since the ASGI lifespan protocol only supports a single
78 shutdown event.
79 """
80 await self.asgi_app.sanic_app._server_event("shutdown", "before")
81 await self.asgi_app.sanic_app._server_event("shutdown", "after")
82
83 async def __call__(
84 self, scope: ASGIScope, receive: ASGIReceive, send: ASGISend
85 ) -> None:
86 message = await receive()
87 if message["type"] == "lifespan.startup":
88 await self.startup()
89 await send({"type": "lifespan.startup.complete"})
90
91 message = await receive()
92 if message["type"] == "lifespan.shutdown":
93 await self.shutdown()
94 await send({"type": "lifespan.shutdown.complete"})
95
96
97 class ASGIApp:
98 sanic_app: Sanic
99 request: Request
100 transport: MockTransport
101 lifespan: Lifespan
102 ws: Optional[WebSocketConnection]
103 stage: Stage
104 response: Optional[BaseHTTPResponse]
105
106 def __init__(self) -> None:
107 self.ws = None
108
109 @classmethod
110 async def create(
111 cls, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend
112 ) -> "ASGIApp":
113 instance = cls()
114 instance.sanic_app = sanic_app
115 instance.transport = MockTransport(scope, receive, send)
116 instance.transport.loop = sanic_app.loop
117 instance.stage = Stage.IDLE
118 instance.response = None
119 setattr(instance.transport, "add_task", sanic_app.loop.create_task)
120
121 headers = Header(
122 [
123 (key.decode("latin-1"), value.decode("latin-1"))
124 for key, value in scope.get("headers", [])
125 ]
126 )
127 instance.lifespan = Lifespan(instance)
128
129 if scope["type"] == "lifespan":
130 await instance.lifespan(scope, receive, send)
131 else:
132 path = (
133 scope["path"][1:]
134 if scope["path"].startswith("/")
135 else scope["path"]
136 )
137 url = "/".join([scope.get("root_path", ""), quote(path)])
138 url_bytes = url.encode("latin-1")
139 url_bytes += b"?" + scope["query_string"]
140
141 if scope["type"] == "http":
142 version = scope["http_version"]
143 method = scope["method"]
144 elif scope["type"] == "websocket":
145 version = "1.1"
146 method = "GET"
147
148 instance.ws = instance.transport.create_websocket_connection(
149 send, receive
150 )
151 else:
152 raise ServerError("Received unknown ASGI scope")
153
154 request_class = sanic_app.request_class or Request
155 instance.request = request_class(
156 url_bytes,
157 headers,
158 version,
159 method,
160 instance.transport,
161 sanic_app,
162 )
163 instance.request.stream = instance
164 instance.request_body = True
165 instance.request.conn_info = ConnInfo(instance.transport)
166
167 await sanic_app.dispatch(
168 "http.lifecycle.request",
169 inline=True,
170 context={"request": instance.request},
171 fail_not_found=False,
172 )
173
174 return instance
175
176 async def read(self) -> Optional[bytes]:
177 """
178 Read and stream the body in chunks from an incoming ASGI message.
179 """
180 if self.stage is Stage.IDLE:
181 self.stage = Stage.REQUEST
182 message = await self.transport.receive()
183 body = message.get("body", b"")
184 if not message.get("more_body", False):
185 self.request_body = False
186 if not body:
187 return None
188 return body
189
190 async def __aiter__(self):
191 while self.request_body:
192 data = await self.read()
193 if data:
194 yield data
195
196 def respond(self, response: BaseHTTPResponse):
197 if self.stage is not Stage.HANDLER:
198 self.stage = Stage.FAILED
199 raise RuntimeError("Response already started")
200 if self.response is not None:
201 self.response.stream = None
202 response.stream, self.response = self, response
203 return response
204
205 async def send(self, data, end_stream):
206 self.stage = Stage.IDLE if end_stream else Stage.RESPONSE
207 if self.response:
208 response, self.response = self.response, None
209 await self.transport.send(
210 {
211 "type": "http.response.start",
212 "status": response.status,
213 "headers": response.processed_headers,
214 }
215 )
216 response_body = getattr(response, "body", None)
217 if response_body:
218 data = response_body + data if data else response_body
219 await self.transport.send(
220 {
221 "type": "http.response.body",
222 "body": data.encode() if hasattr(data, "encode") else data,
223 "more_body": not end_stream,
224 }
225 )
226
227 _asgi_single_callable = True # We conform to ASGI 3.0 single-callable
228
229 async def __call__(self) -> None:
230 """
231 Handle the incoming request.
232 """
233 try:
234 self.stage = Stage.HANDLER
235 await self.sanic_app.handle_request(self.request)
236 except Exception as e:
237 try:
238 await self.sanic_app.handle_exception(self.request, e)
239 except Exception as exc:
240 await self.sanic_app.handle_exception(self.request, exc, False)
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sanic/asgi.py b/sanic/asgi.py
--- a/sanic/asgi.py
+++ b/sanic/asgi.py
@@ -9,7 +9,7 @@
from sanic.exceptions import ServerError
from sanic.helpers import Default
from sanic.http import Stage
-from sanic.log import logger
+from sanic.log import error_logger, logger
from sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport
from sanic.request import Request
from sanic.response import BaseHTTPResponse
@@ -85,13 +85,27 @@
) -> None:
message = await receive()
if message["type"] == "lifespan.startup":
- await self.startup()
- await send({"type": "lifespan.startup.complete"})
+ try:
+ await self.startup()
+ except Exception as e:
+ error_logger.exception(e)
+ await send(
+ {"type": "lifespan.startup.failed", "message": str(e)}
+ )
+ else:
+ await send({"type": "lifespan.startup.complete"})
message = await receive()
if message["type"] == "lifespan.shutdown":
- await self.shutdown()
- await send({"type": "lifespan.shutdown.complete"})
+ try:
+ await self.shutdown()
+ except Exception as e:
+ error_logger.exception(e)
+ await send(
+ {"type": "lifespan.shutdown.failed", "message": str(e)}
+ )
+ else:
+ await send({"type": "lifespan.shutdown.complete"})
class ASGIApp:
| {"golden_diff": "diff --git a/sanic/asgi.py b/sanic/asgi.py\n--- a/sanic/asgi.py\n+++ b/sanic/asgi.py\n@@ -9,7 +9,7 @@\n from sanic.exceptions import ServerError\n from sanic.helpers import Default\n from sanic.http import Stage\n-from sanic.log import logger\n+from sanic.log import error_logger, logger\n from sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport\n from sanic.request import Request\n from sanic.response import BaseHTTPResponse\n@@ -85,13 +85,27 @@\n ) -> None:\n message = await receive()\n if message[\"type\"] == \"lifespan.startup\":\n- await self.startup()\n- await send({\"type\": \"lifespan.startup.complete\"})\n+ try:\n+ await self.startup()\n+ except Exception as e:\n+ error_logger.exception(e)\n+ await send(\n+ {\"type\": \"lifespan.startup.failed\", \"message\": str(e)}\n+ )\n+ else:\n+ await send({\"type\": \"lifespan.startup.complete\"})\n \n message = await receive()\n if message[\"type\"] == \"lifespan.shutdown\":\n- await self.shutdown()\n- await send({\"type\": \"lifespan.shutdown.complete\"})\n+ try:\n+ await self.shutdown()\n+ except Exception as e:\n+ error_logger.exception(e)\n+ await send(\n+ {\"type\": \"lifespan.shutdown.failed\", \"message\": str(e)}\n+ )\n+ else:\n+ await send({\"type\": \"lifespan.shutdown.complete\"})\n \n \n class ASGIApp:\n", "issue": "Uvicorn ignores listeners errors\n**Describe the bug**\r\nWhen an exception happens on a listener the error is not reported and the server doesn't crash when using Uvicorn.\r\n\r\n\r\n**Code snippet**\r\n```python\r\nfrom sanic import Sanic\r\nfrom sanic.response import text\r\n\r\napp = Sanic(\"MyHelloWorldApp\")\r\n\r\[email protected](\"before_server_start\")\r\nasync def raises(app, loop):\r\n print(\"Trying to run faster\")\r\n raise Exception(\"oh noes\")\r\n\r\[email protected](\"/\")\r\nasync def hello_world(request):\r\n return text(\"Hello, world.\")\r\n```\r\n\r\n```\r\n~ uvicorn app:app\r\nINFO: Started server process [49837]\r\nINFO: Waiting for application startup.\r\n[2022-01-25 14:57:34 +0100] [49837] [INFO]\r\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\r\n \u2502 Sanic v21.12.1 \u2502\r\n \u2502 \u2502\r\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\r\n \u2502 \u2502 mode: production, ASGI \u2502\r\n \u2502 \u2584\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588 \u2588\u2588 \u2502 server: sanic \u2502\r\n \u2502 \u2588\u2588 \u2502 python: 3.9.10 \u2502\r\n \u2502 \u2580\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2584 \u2502 platform: macOS-12.1-x86_64-i386-64bit \u2502\r\n \u2502 \u2588\u2588 \u2502 packages: sanic-routing==0.7.2 \u2502\r\n \u2502 \u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2580 \u2502 \u2502\r\n \u2502 \u2502 \u2502\r\n \u2502 Build Fast. Run Fast. \u2502 \u2502\r\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\r\n\r\n/Users/andre.ericson/projects/sanic-uvicorn/.venv/lib/python3.9/site-packages/sanic/asgi.py:27: UserWarning: You have set a listener for \"before_server_start\" in ASGI mode. It will be executed as early as possible, but not before the ASGI server is started.\r\n warnings.warn(\r\nTrying to run faster\r\nINFO: ASGI 'lifespan' protocol appears unsupported.\r\nINFO: Application startup complete.\r\nINFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)\r\n``` \r\n**Expected behavior**\r\nThe server should crash, or at least log an error.\r\n\r\n\r\n**Environment (please complete the following information):**\r\n - MacOS\r\n - python 3.9.10\r\n ```\r\n pip list\r\nPackage Version\r\n------------- ------------\r\naiofiles 0.8.0\r\nasgiref 3.5.0\r\nclick 8.0.3\r\nh11 0.13.0\r\nhttptools 0.3.0\r\nmultidict 5.2.0\r\npip 21.3.1\r\nsanic 21.12.1\r\nsanic-routing 0.7.2\r\nsetuptools 58.1.0\r\nujson 5.1.0\r\nuvicorn 0.17.0.post1\r\nuvloop 0.16.0\r\nwebsockets 10.1\r\n ```\r\n\r\n\r\n**Additional context**\r\nWorks as expected with `sanic app.app` and also with GunicornWorker.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport warnings\n\nfrom typing import TYPE_CHECKING, Optional\nfrom urllib.parse import quote\n\nfrom sanic.compat import Header\nfrom sanic.exceptions import ServerError\nfrom sanic.helpers import Default\nfrom sanic.http import Stage\nfrom sanic.log import logger\nfrom sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport\nfrom sanic.request import Request\nfrom sanic.response import BaseHTTPResponse\nfrom sanic.server import ConnInfo\nfrom sanic.server.websockets.connection import WebSocketConnection\n\n\nif TYPE_CHECKING:\n from sanic import Sanic\n\n\nclass Lifespan:\n def __init__(self, asgi_app: ASGIApp) -> None:\n self.asgi_app = asgi_app\n\n if (\n \"server.init.before\"\n in self.asgi_app.sanic_app.signal_router.name_index\n ):\n logger.debug(\n 'You have set a listener for \"before_server_start\" '\n \"in ASGI mode. \"\n \"It will be executed as early as possible, but not before \"\n \"the ASGI server is started.\",\n extra={\"verbosity\": 1},\n )\n if (\n \"server.shutdown.after\"\n in self.asgi_app.sanic_app.signal_router.name_index\n ):\n logger.debug(\n 'You have set a listener for \"after_server_stop\" '\n \"in ASGI mode. \"\n \"It will be executed as late as possible, but not after \"\n \"the ASGI server is stopped.\",\n extra={\"verbosity\": 1},\n )\n\n async def startup(self) -> None:\n \"\"\"\n Gather the listeners to fire on server start.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything BEFORE the server starts.\n Therefore, we fire before_server_start and after_server_start\n in sequence since the ASGI lifespan protocol only supports a single\n startup event.\n \"\"\"\n await self.asgi_app.sanic_app._startup()\n await self.asgi_app.sanic_app._server_event(\"init\", \"before\")\n await self.asgi_app.sanic_app._server_event(\"init\", \"after\")\n\n if not isinstance(self.asgi_app.sanic_app.config.USE_UVLOOP, Default):\n warnings.warn(\n \"You have set the USE_UVLOOP configuration option, but Sanic \"\n \"cannot control the event loop when running in ASGI mode.\"\n \"This option will be ignored.\"\n )\n\n async def shutdown(self) -> None:\n \"\"\"\n Gather the listeners to fire on server stop.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything AFTER the server stops.\n Therefore, we fire before_server_stop and after_server_stop\n in sequence since the ASGI lifespan protocol only supports a single\n shutdown event.\n \"\"\"\n await self.asgi_app.sanic_app._server_event(\"shutdown\", \"before\")\n await self.asgi_app.sanic_app._server_event(\"shutdown\", \"after\")\n\n async def __call__(\n self, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> None:\n message = await receive()\n if message[\"type\"] == \"lifespan.startup\":\n await self.startup()\n await send({\"type\": \"lifespan.startup.complete\"})\n\n message = await receive()\n if message[\"type\"] == \"lifespan.shutdown\":\n await self.shutdown()\n await send({\"type\": \"lifespan.shutdown.complete\"})\n\n\nclass ASGIApp:\n sanic_app: Sanic\n request: Request\n transport: MockTransport\n lifespan: Lifespan\n ws: Optional[WebSocketConnection]\n stage: Stage\n response: Optional[BaseHTTPResponse]\n\n def __init__(self) -> None:\n self.ws = None\n\n @classmethod\n async def create(\n cls, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> \"ASGIApp\":\n instance = cls()\n instance.sanic_app = sanic_app\n instance.transport = MockTransport(scope, receive, send)\n instance.transport.loop = sanic_app.loop\n instance.stage = Stage.IDLE\n instance.response = None\n setattr(instance.transport, \"add_task\", sanic_app.loop.create_task)\n\n headers = Header(\n [\n (key.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for key, value in scope.get(\"headers\", [])\n ]\n )\n instance.lifespan = Lifespan(instance)\n\n if scope[\"type\"] == \"lifespan\":\n await instance.lifespan(scope, receive, send)\n else:\n path = (\n scope[\"path\"][1:]\n if scope[\"path\"].startswith(\"/\")\n else scope[\"path\"]\n )\n url = \"/\".join([scope.get(\"root_path\", \"\"), quote(path)])\n url_bytes = url.encode(\"latin-1\")\n url_bytes += b\"?\" + scope[\"query_string\"]\n\n if scope[\"type\"] == \"http\":\n version = scope[\"http_version\"]\n method = scope[\"method\"]\n elif scope[\"type\"] == \"websocket\":\n version = \"1.1\"\n method = \"GET\"\n\n instance.ws = instance.transport.create_websocket_connection(\n send, receive\n )\n else:\n raise ServerError(\"Received unknown ASGI scope\")\n\n request_class = sanic_app.request_class or Request\n instance.request = request_class(\n url_bytes,\n headers,\n version,\n method,\n instance.transport,\n sanic_app,\n )\n instance.request.stream = instance\n instance.request_body = True\n instance.request.conn_info = ConnInfo(instance.transport)\n\n await sanic_app.dispatch(\n \"http.lifecycle.request\",\n inline=True,\n context={\"request\": instance.request},\n fail_not_found=False,\n )\n\n return instance\n\n async def read(self) -> Optional[bytes]:\n \"\"\"\n Read and stream the body in chunks from an incoming ASGI message.\n \"\"\"\n if self.stage is Stage.IDLE:\n self.stage = Stage.REQUEST\n message = await self.transport.receive()\n body = message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n self.request_body = False\n if not body:\n return None\n return body\n\n async def __aiter__(self):\n while self.request_body:\n data = await self.read()\n if data:\n yield data\n\n def respond(self, response: BaseHTTPResponse):\n if self.stage is not Stage.HANDLER:\n self.stage = Stage.FAILED\n raise RuntimeError(\"Response already started\")\n if self.response is not None:\n self.response.stream = None\n response.stream, self.response = self, response\n return response\n\n async def send(self, data, end_stream):\n self.stage = Stage.IDLE if end_stream else Stage.RESPONSE\n if self.response:\n response, self.response = self.response, None\n await self.transport.send(\n {\n \"type\": \"http.response.start\",\n \"status\": response.status,\n \"headers\": response.processed_headers,\n }\n )\n response_body = getattr(response, \"body\", None)\n if response_body:\n data = response_body + data if data else response_body\n await self.transport.send(\n {\n \"type\": \"http.response.body\",\n \"body\": data.encode() if hasattr(data, \"encode\") else data,\n \"more_body\": not end_stream,\n }\n )\n\n _asgi_single_callable = True # We conform to ASGI 3.0 single-callable\n\n async def __call__(self) -> None:\n \"\"\"\n Handle the incoming request.\n \"\"\"\n try:\n self.stage = Stage.HANDLER\n await self.sanic_app.handle_request(self.request)\n except Exception as e:\n try:\n await self.sanic_app.handle_exception(self.request, e)\n except Exception as exc:\n await self.sanic_app.handle_exception(self.request, exc, False)\n", "path": "sanic/asgi.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport warnings\n\nfrom typing import TYPE_CHECKING, Optional\nfrom urllib.parse import quote\n\nfrom sanic.compat import Header\nfrom sanic.exceptions import ServerError\nfrom sanic.helpers import Default\nfrom sanic.http import Stage\nfrom sanic.log import error_logger, logger\nfrom sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport\nfrom sanic.request import Request\nfrom sanic.response import BaseHTTPResponse\nfrom sanic.server import ConnInfo\nfrom sanic.server.websockets.connection import WebSocketConnection\n\n\nif TYPE_CHECKING:\n from sanic import Sanic\n\n\nclass Lifespan:\n def __init__(self, asgi_app: ASGIApp) -> None:\n self.asgi_app = asgi_app\n\n if (\n \"server.init.before\"\n in self.asgi_app.sanic_app.signal_router.name_index\n ):\n logger.debug(\n 'You have set a listener for \"before_server_start\" '\n \"in ASGI mode. \"\n \"It will be executed as early as possible, but not before \"\n \"the ASGI server is started.\",\n extra={\"verbosity\": 1},\n )\n if (\n \"server.shutdown.after\"\n in self.asgi_app.sanic_app.signal_router.name_index\n ):\n logger.debug(\n 'You have set a listener for \"after_server_stop\" '\n \"in ASGI mode. \"\n \"It will be executed as late as possible, but not after \"\n \"the ASGI server is stopped.\",\n extra={\"verbosity\": 1},\n )\n\n async def startup(self) -> None:\n \"\"\"\n Gather the listeners to fire on server start.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything BEFORE the server starts.\n Therefore, we fire before_server_start and after_server_start\n in sequence since the ASGI lifespan protocol only supports a single\n startup event.\n \"\"\"\n await self.asgi_app.sanic_app._startup()\n await self.asgi_app.sanic_app._server_event(\"init\", \"before\")\n await self.asgi_app.sanic_app._server_event(\"init\", \"after\")\n\n if not isinstance(self.asgi_app.sanic_app.config.USE_UVLOOP, Default):\n warnings.warn(\n \"You have set the USE_UVLOOP configuration option, but Sanic \"\n \"cannot control the event loop when running in ASGI mode.\"\n \"This option will be ignored.\"\n )\n\n async def shutdown(self) -> None:\n \"\"\"\n Gather the listeners to fire on server stop.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything AFTER the server stops.\n Therefore, we fire before_server_stop and after_server_stop\n in sequence since the ASGI lifespan protocol only supports a single\n shutdown event.\n \"\"\"\n await self.asgi_app.sanic_app._server_event(\"shutdown\", \"before\")\n await self.asgi_app.sanic_app._server_event(\"shutdown\", \"after\")\n\n async def __call__(\n self, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> None:\n message = await receive()\n if message[\"type\"] == \"lifespan.startup\":\n try:\n await self.startup()\n except Exception as e:\n error_logger.exception(e)\n await send(\n {\"type\": \"lifespan.startup.failed\", \"message\": str(e)}\n )\n else:\n await send({\"type\": \"lifespan.startup.complete\"})\n\n message = await receive()\n if message[\"type\"] == \"lifespan.shutdown\":\n try:\n await self.shutdown()\n except Exception as e:\n error_logger.exception(e)\n await send(\n {\"type\": \"lifespan.shutdown.failed\", \"message\": str(e)}\n )\n else:\n await send({\"type\": \"lifespan.shutdown.complete\"})\n\n\nclass ASGIApp:\n sanic_app: Sanic\n request: Request\n transport: MockTransport\n lifespan: Lifespan\n ws: Optional[WebSocketConnection]\n stage: Stage\n response: Optional[BaseHTTPResponse]\n\n def __init__(self) -> None:\n self.ws = None\n\n @classmethod\n async def create(\n cls, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> \"ASGIApp\":\n instance = cls()\n instance.sanic_app = sanic_app\n instance.transport = MockTransport(scope, receive, send)\n instance.transport.loop = sanic_app.loop\n instance.stage = Stage.IDLE\n instance.response = None\n setattr(instance.transport, \"add_task\", sanic_app.loop.create_task)\n\n headers = Header(\n [\n (key.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for key, value in scope.get(\"headers\", [])\n ]\n )\n instance.lifespan = Lifespan(instance)\n\n if scope[\"type\"] == \"lifespan\":\n await instance.lifespan(scope, receive, send)\n else:\n path = (\n scope[\"path\"][1:]\n if scope[\"path\"].startswith(\"/\")\n else scope[\"path\"]\n )\n url = \"/\".join([scope.get(\"root_path\", \"\"), quote(path)])\n url_bytes = url.encode(\"latin-1\")\n url_bytes += b\"?\" + scope[\"query_string\"]\n\n if scope[\"type\"] == \"http\":\n version = scope[\"http_version\"]\n method = scope[\"method\"]\n elif scope[\"type\"] == \"websocket\":\n version = \"1.1\"\n method = \"GET\"\n\n instance.ws = instance.transport.create_websocket_connection(\n send, receive\n )\n else:\n raise ServerError(\"Received unknown ASGI scope\")\n\n request_class = sanic_app.request_class or Request\n instance.request = request_class(\n url_bytes,\n headers,\n version,\n method,\n instance.transport,\n sanic_app,\n )\n instance.request.stream = instance\n instance.request_body = True\n instance.request.conn_info = ConnInfo(instance.transport)\n\n await sanic_app.dispatch(\n \"http.lifecycle.request\",\n inline=True,\n context={\"request\": instance.request},\n fail_not_found=False,\n )\n\n return instance\n\n async def read(self) -> Optional[bytes]:\n \"\"\"\n Read and stream the body in chunks from an incoming ASGI message.\n \"\"\"\n if self.stage is Stage.IDLE:\n self.stage = Stage.REQUEST\n message = await self.transport.receive()\n body = message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n self.request_body = False\n if not body:\n return None\n return body\n\n async def __aiter__(self):\n while self.request_body:\n data = await self.read()\n if data:\n yield data\n\n def respond(self, response: BaseHTTPResponse):\n if self.stage is not Stage.HANDLER:\n self.stage = Stage.FAILED\n raise RuntimeError(\"Response already started\")\n if self.response is not None:\n self.response.stream = None\n response.stream, self.response = self, response\n return response\n\n async def send(self, data, end_stream):\n self.stage = Stage.IDLE if end_stream else Stage.RESPONSE\n if self.response:\n response, self.response = self.response, None\n await self.transport.send(\n {\n \"type\": \"http.response.start\",\n \"status\": response.status,\n \"headers\": response.processed_headers,\n }\n )\n response_body = getattr(response, \"body\", None)\n if response_body:\n data = response_body + data if data else response_body\n await self.transport.send(\n {\n \"type\": \"http.response.body\",\n \"body\": data.encode() if hasattr(data, \"encode\") else data,\n \"more_body\": not end_stream,\n }\n )\n\n _asgi_single_callable = True # We conform to ASGI 3.0 single-callable\n\n async def __call__(self) -> None:\n \"\"\"\n Handle the incoming request.\n \"\"\"\n try:\n self.stage = Stage.HANDLER\n await self.sanic_app.handle_request(self.request)\n except Exception as e:\n try:\n await self.sanic_app.handle_exception(self.request, e)\n except Exception as exc:\n await self.sanic_app.handle_exception(self.request, exc, False)\n", "path": "sanic/asgi.py"}]} | 3,386 | 366 |
gh_patches_debug_13769 | rasdani/github-patches | git_diff | spack__spack-12203 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
spack module (tcl|lcov|dotkit) loads: does not work with upstream packages
`spack module * loads` does not work with upstream packages.
### Steps to reproduce the issue
```console
$ spack module <module-type> loads <package_in_upstream>
```
### Error Message
```console
==> Error: 'module' object has no attribute 'upstream_module'
```
### Information on your system
I configured `upstreams.yaml` like this
```yaml
upstreams:
myupstream:
install_tree: <UPSTREAM_SPACK_ROOT>/opt/spack
modules:
tcl: <UPSTREAM_SPACK_ROOT>/share/spack/modules
```
fixes spack module loads for upstream packages
Fixes #12062
It seems that a non-existing function was used. I replaced it with the one used also by the `find` sub-command.
@scheibelp, it seems that last change to this section was from you, I tried tracking if this function was existing before, but with no luck. I mentioned you just because you may be able to check quickly if this is the right fix.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/spack/spack/cmd/modules/__init__.py`
Content:
```
1 # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 """Implementation details of the ``spack module`` command."""
7
8 import collections
9 import os.path
10 import shutil
11 import sys
12
13 from llnl.util import filesystem, tty
14
15 import spack.cmd
16 import spack.modules
17 import spack.repo
18 import spack.modules.common
19
20 import spack.cmd.common.arguments as arguments
21
22 description = "manipulate module files"
23 section = "environment"
24 level = "short"
25
26
27 def setup_parser(subparser):
28 sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='subparser_name')
29
30 refresh_parser = sp.add_parser('refresh', help='regenerate module files')
31 refresh_parser.add_argument(
32 '--delete-tree',
33 help='delete the module file tree before refresh',
34 action='store_true'
35 )
36 refresh_parser.add_argument(
37 '--upstream-modules',
38 help='generate modules for packages installed upstream',
39 action='store_true'
40 )
41 arguments.add_common_arguments(
42 refresh_parser, ['constraint', 'yes_to_all']
43 )
44
45 find_parser = sp.add_parser('find', help='find module files for packages')
46 find_parser.add_argument(
47 '--full-path',
48 help='display full path to module file',
49 action='store_true'
50 )
51 arguments.add_common_arguments(
52 find_parser, ['constraint', 'recurse_dependencies']
53 )
54
55 rm_parser = sp.add_parser('rm', help='remove module files')
56 arguments.add_common_arguments(
57 rm_parser, ['constraint', 'yes_to_all']
58 )
59
60 loads_parser = sp.add_parser(
61 'loads',
62 help='prompt the list of modules associated with a constraint'
63 )
64 add_loads_arguments(loads_parser)
65 arguments.add_common_arguments(loads_parser, ['constraint'])
66
67 return sp
68
69
70 def add_loads_arguments(subparser):
71 subparser.add_argument(
72 '--input-only', action='store_false', dest='shell',
73 help='generate input for module command (instead of a shell script)'
74 )
75 subparser.add_argument(
76 '-p', '--prefix', dest='prefix', default='',
77 help='prepend to module names when issuing module load commands'
78 )
79 subparser.add_argument(
80 '-x', '--exclude', dest='exclude', action='append', default=[],
81 help="exclude package from output; may be specified multiple times"
82 )
83 arguments.add_common_arguments(
84 subparser, ['recurse_dependencies']
85 )
86
87
88 class MultipleSpecsMatch(Exception):
89 """Raised when multiple specs match a constraint, in a context where
90 this is not allowed.
91 """
92
93
94 class NoSpecMatches(Exception):
95 """Raised when no spec matches a constraint, in a context where
96 this is not allowed.
97 """
98
99
100 def one_spec_or_raise(specs):
101 """Ensures exactly one spec has been selected, or raises the appropriate
102 exception.
103 """
104 # Ensure a single spec matches the constraint
105 if len(specs) == 0:
106 raise NoSpecMatches()
107 if len(specs) > 1:
108 raise MultipleSpecsMatch()
109
110 # Get the spec and module type
111 return specs[0]
112
113
114 def loads(module_type, specs, args, out=sys.stdout):
115 """Prompt the list of modules associated with a list of specs"""
116
117 # Get a comprehensive list of specs
118 if args.recurse_dependencies:
119 specs_from_user_constraint = specs[:]
120 specs = []
121 # FIXME : during module file creation nodes seem to be visited
122 # FIXME : multiple times even if cover='nodes' is given. This
123 # FIXME : work around permits to get a unique list of spec anyhow.
124 # FIXME : (same problem as in spack/modules.py)
125 seen = set()
126 seen_add = seen.add
127 for spec in specs_from_user_constraint:
128 specs.extend(
129 [item for item in spec.traverse(order='post', cover='nodes')
130 if not (item in seen or seen_add(item))]
131 )
132
133 module_cls = spack.modules.module_types[module_type]
134 modules = list()
135 for spec in specs:
136 if os.path.exists(module_cls(spec).layout.filename):
137 modules.append((spec, module_cls(spec).layout.use_name))
138 elif spec.package.installed_upstream:
139 tty.debug("Using upstream module for {0}".format(spec))
140 module = spack.modules.common.upstream_module(spec, module_type)
141 modules.append((spec, module.use_name))
142
143 module_commands = {
144 'tcl': 'module load ',
145 'lmod': 'module load ',
146 'dotkit': 'use '
147 }
148
149 d = {
150 'command': '' if not args.shell else module_commands[module_type],
151 'prefix': args.prefix
152 }
153
154 exclude_set = set(args.exclude)
155 prompt_template = '{comment}{exclude}{command}{prefix}{name}'
156 for spec, mod in modules:
157 d['exclude'] = '## ' if spec.name in exclude_set else ''
158 d['comment'] = '' if not args.shell else '# {0}\n'.format(
159 spec.format())
160 d['name'] = mod
161 out.write(prompt_template.format(**d))
162 out.write('\n')
163
164
165 def find(module_type, specs, args):
166 """Retrieve paths or use names of module files"""
167
168 single_spec = one_spec_or_raise(specs)
169
170 if args.recurse_dependencies:
171 specs_to_retrieve = list(
172 single_spec.traverse(order='post', cover='nodes',
173 deptype=('link', 'run')))
174 else:
175 specs_to_retrieve = [single_spec]
176
177 try:
178 modules = [spack.modules.common.get_module(module_type, spec,
179 args.full_path)
180 for spec in specs_to_retrieve]
181 except spack.modules.common.ModuleNotFoundError as e:
182 tty.die(e.message)
183 print(' '.join(modules))
184
185
186 def rm(module_type, specs, args):
187 """Deletes the module files associated with every spec in specs, for every
188 module type in module types.
189 """
190
191 module_cls = spack.modules.module_types[module_type]
192 module_exist = lambda x: os.path.exists(module_cls(x).layout.filename)
193
194 specs_with_modules = [spec for spec in specs if module_exist(spec)]
195
196 modules = [module_cls(spec) for spec in specs_with_modules]
197
198 if not modules:
199 tty.die('No module file matches your query')
200
201 # Ask for confirmation
202 if not args.yes_to_all:
203 msg = 'You are about to remove {0} module files for:\n'
204 tty.msg(msg.format(module_type))
205 spack.cmd.display_specs(specs_with_modules, long=True)
206 print('')
207 answer = tty.get_yes_or_no('Do you want to proceed?')
208 if not answer:
209 tty.die('Will not remove any module files')
210
211 # Remove the module files
212 for s in modules:
213 s.remove()
214
215
216 def refresh(module_type, specs, args):
217 """Regenerates the module files for every spec in specs and every module
218 type in module types.
219 """
220
221 # Prompt a message to the user about what is going to change
222 if not specs:
223 tty.msg('No package matches your query')
224 return
225
226 if not args.upstream_modules:
227 specs = list(s for s in specs if not s.package.installed_upstream)
228
229 if not args.yes_to_all:
230 msg = 'You are about to regenerate {types} module files for:\n'
231 tty.msg(msg.format(types=module_type))
232 spack.cmd.display_specs(specs, long=True)
233 print('')
234 answer = tty.get_yes_or_no('Do you want to proceed?')
235 if not answer:
236 tty.die('Module file regeneration aborted.')
237
238 # Cycle over the module types and regenerate module files
239
240 cls = spack.modules.module_types[module_type]
241
242 # Skip unknown packages.
243 writers = [
244 cls(spec) for spec in specs
245 if spack.repo.path.exists(spec.name)]
246
247 # Filter blacklisted packages early
248 writers = [x for x in writers if not x.conf.blacklisted]
249
250 # Detect name clashes in module files
251 file2writer = collections.defaultdict(list)
252 for item in writers:
253 file2writer[item.layout.filename].append(item)
254
255 if len(file2writer) != len(writers):
256 message = 'Name clashes detected in module files:\n'
257 for filename, writer_list in file2writer.items():
258 if len(writer_list) > 1:
259 message += '\nfile: {0}\n'.format(filename)
260 for x in writer_list:
261 message += 'spec: {0}\n'.format(x.spec.format())
262 tty.error(message)
263 tty.error('Operation aborted')
264 raise SystemExit(1)
265
266 if len(writers) == 0:
267 msg = 'Nothing to be done for {0} module files.'
268 tty.msg(msg.format(module_type))
269 return
270
271 # If we arrived here we have at least one writer
272 module_type_root = writers[0].layout.dirname()
273 spack.modules.common.generate_module_index(module_type_root, writers)
274 # Proceed regenerating module files
275 tty.msg('Regenerating {name} module files'.format(name=module_type))
276 if os.path.isdir(module_type_root) and args.delete_tree:
277 shutil.rmtree(module_type_root, ignore_errors=False)
278 filesystem.mkdirp(module_type_root)
279 for x in writers:
280 try:
281 x.write(overwrite=True)
282 except Exception as e:
283 tty.debug(e)
284 msg = 'Could not write module file [{0}]'
285 tty.warn(msg.format(x.layout.filename))
286 tty.warn('\t--> {0} <--'.format(str(e)))
287
288
289 #: Dictionary populated with the list of sub-commands.
290 #: Each sub-command must be callable and accept 3 arguments:
291 #:
292 #: - module_type: the type of module it refers to
293 #: - specs : the list of specs to be processed
294 #: - args : namespace containing the parsed command line arguments
295 callbacks = {
296 'refresh': refresh,
297 'rm': rm,
298 'find': find,
299 'loads': loads
300 }
301
302
303 def modules_cmd(parser, args, module_type, callbacks=callbacks):
304
305 # Qualifiers to be used when querying the db for specs
306 constraint_qualifiers = {
307 'refresh': {
308 'installed': True,
309 'known': True
310 },
311 }
312 query_args = constraint_qualifiers.get(args.subparser_name, {})
313
314 # Get the specs that match the query from the DB
315 specs = args.specs(**query_args)
316
317 try:
318
319 callbacks[args.subparser_name](module_type, specs, args)
320
321 except MultipleSpecsMatch:
322 msg = "the constraint '{query}' matches multiple packages:\n"
323 for s in specs:
324 spec_fmt = '{hash:7} {name}{@version}{%compiler}'
325 spec_fmt += '{compiler_flags}{variants}{arch=architecture}'
326 msg += '\t' + s.cformat(spec_fmt) + '\n'
327 tty.error(msg.format(query=args.constraint))
328 tty.die('In this context exactly **one** match is needed: please specify your constraints better.') # NOQA: ignore=E501
329
330 except NoSpecMatches:
331 msg = "the constraint '{query}' matches no package."
332 tty.error(msg.format(query=args.constraint))
333 tty.die('In this context exactly **one** match is needed: please specify your constraints better.') # NOQA: ignore=E501
334
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/spack/spack/cmd/modules/__init__.py b/lib/spack/spack/cmd/modules/__init__.py
--- a/lib/spack/spack/cmd/modules/__init__.py
+++ b/lib/spack/spack/cmd/modules/__init__.py
@@ -130,15 +130,9 @@
if not (item in seen or seen_add(item))]
)
- module_cls = spack.modules.module_types[module_type]
- modules = list()
- for spec in specs:
- if os.path.exists(module_cls(spec).layout.filename):
- modules.append((spec, module_cls(spec).layout.use_name))
- elif spec.package.installed_upstream:
- tty.debug("Using upstream module for {0}".format(spec))
- module = spack.modules.common.upstream_module(spec, module_type)
- modules.append((spec, module.use_name))
+ modules = list(
+ (spec, spack.modules.common.get_module(module_type, spec, False))
+ for spec in specs)
module_commands = {
'tcl': 'module load ',
| {"golden_diff": "diff --git a/lib/spack/spack/cmd/modules/__init__.py b/lib/spack/spack/cmd/modules/__init__.py\n--- a/lib/spack/spack/cmd/modules/__init__.py\n+++ b/lib/spack/spack/cmd/modules/__init__.py\n@@ -130,15 +130,9 @@\n if not (item in seen or seen_add(item))]\n )\n \n- module_cls = spack.modules.module_types[module_type]\n- modules = list()\n- for spec in specs:\n- if os.path.exists(module_cls(spec).layout.filename):\n- modules.append((spec, module_cls(spec).layout.use_name))\n- elif spec.package.installed_upstream:\n- tty.debug(\"Using upstream module for {0}\".format(spec))\n- module = spack.modules.common.upstream_module(spec, module_type)\n- modules.append((spec, module.use_name))\n+ modules = list(\n+ (spec, spack.modules.common.get_module(module_type, spec, False))\n+ for spec in specs)\n \n module_commands = {\n 'tcl': 'module load ',\n", "issue": "spack module (tcl|lcov|dotkit) loads: does not work with upstream packages\n`spack module * loads` does not work with upstream packages.\r\n\r\n### Steps to reproduce the issue\r\n\r\n```console\r\n$ spack module <module-type> loads <package_in_upstream>\r\n```\r\n\r\n### Error Message\r\n\r\n```console\r\n==> Error: 'module' object has no attribute 'upstream_module'\r\n```\r\n\r\n### Information on your system\r\n\r\nI configured `upstreams.yaml` like this\r\n\r\n```yaml\r\nupstreams:\r\n myupstream:\r\n install_tree: <UPSTREAM_SPACK_ROOT>/opt/spack\r\n modules:\r\n tcl: <UPSTREAM_SPACK_ROOT>/share/spack/modules\r\n```\nfixes spack module loads for upstream packages\nFixes #12062\r\n\r\nIt seems that a non-existing function was used. I replaced it with the one used also by the `find` sub-command.\r\n\r\n@scheibelp, it seems that last change to this section was from you, I tried tracking if this function was existing before, but with no luck. I mentioned you just because you may be able to check quickly if this is the right fix.\n", "before_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\"\"\"Implementation details of the ``spack module`` command.\"\"\"\n\nimport collections\nimport os.path\nimport shutil\nimport sys\n\nfrom llnl.util import filesystem, tty\n\nimport spack.cmd\nimport spack.modules\nimport spack.repo\nimport spack.modules.common\n\nimport spack.cmd.common.arguments as arguments\n\ndescription = \"manipulate module files\"\nsection = \"environment\"\nlevel = \"short\"\n\n\ndef setup_parser(subparser):\n sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='subparser_name')\n\n refresh_parser = sp.add_parser('refresh', help='regenerate module files')\n refresh_parser.add_argument(\n '--delete-tree',\n help='delete the module file tree before refresh',\n action='store_true'\n )\n refresh_parser.add_argument(\n '--upstream-modules',\n help='generate modules for packages installed upstream',\n action='store_true'\n )\n arguments.add_common_arguments(\n refresh_parser, ['constraint', 'yes_to_all']\n )\n\n find_parser = sp.add_parser('find', help='find module files for packages')\n find_parser.add_argument(\n '--full-path',\n help='display full path to module file',\n action='store_true'\n )\n arguments.add_common_arguments(\n find_parser, ['constraint', 'recurse_dependencies']\n )\n\n rm_parser = sp.add_parser('rm', help='remove module files')\n arguments.add_common_arguments(\n rm_parser, ['constraint', 'yes_to_all']\n )\n\n loads_parser = sp.add_parser(\n 'loads',\n help='prompt the list of modules associated with a constraint'\n )\n add_loads_arguments(loads_parser)\n arguments.add_common_arguments(loads_parser, ['constraint'])\n\n return sp\n\n\ndef add_loads_arguments(subparser):\n subparser.add_argument(\n '--input-only', action='store_false', dest='shell',\n help='generate input for module command (instead of a shell script)'\n )\n subparser.add_argument(\n '-p', '--prefix', dest='prefix', default='',\n help='prepend to module names when issuing module load commands'\n )\n subparser.add_argument(\n '-x', '--exclude', dest='exclude', action='append', default=[],\n help=\"exclude package from output; may be specified multiple times\"\n )\n arguments.add_common_arguments(\n subparser, ['recurse_dependencies']\n )\n\n\nclass MultipleSpecsMatch(Exception):\n \"\"\"Raised when multiple specs match a constraint, in a context where\n this is not allowed.\n \"\"\"\n\n\nclass NoSpecMatches(Exception):\n \"\"\"Raised when no spec matches a constraint, in a context where\n this is not allowed.\n \"\"\"\n\n\ndef one_spec_or_raise(specs):\n \"\"\"Ensures exactly one spec has been selected, or raises the appropriate\n exception.\n \"\"\"\n # Ensure a single spec matches the constraint\n if len(specs) == 0:\n raise NoSpecMatches()\n if len(specs) > 1:\n raise MultipleSpecsMatch()\n\n # Get the spec and module type\n return specs[0]\n\n\ndef loads(module_type, specs, args, out=sys.stdout):\n \"\"\"Prompt the list of modules associated with a list of specs\"\"\"\n\n # Get a comprehensive list of specs\n if args.recurse_dependencies:\n specs_from_user_constraint = specs[:]\n specs = []\n # FIXME : during module file creation nodes seem to be visited\n # FIXME : multiple times even if cover='nodes' is given. This\n # FIXME : work around permits to get a unique list of spec anyhow.\n # FIXME : (same problem as in spack/modules.py)\n seen = set()\n seen_add = seen.add\n for spec in specs_from_user_constraint:\n specs.extend(\n [item for item in spec.traverse(order='post', cover='nodes')\n if not (item in seen or seen_add(item))]\n )\n\n module_cls = spack.modules.module_types[module_type]\n modules = list()\n for spec in specs:\n if os.path.exists(module_cls(spec).layout.filename):\n modules.append((spec, module_cls(spec).layout.use_name))\n elif spec.package.installed_upstream:\n tty.debug(\"Using upstream module for {0}\".format(spec))\n module = spack.modules.common.upstream_module(spec, module_type)\n modules.append((spec, module.use_name))\n\n module_commands = {\n 'tcl': 'module load ',\n 'lmod': 'module load ',\n 'dotkit': 'use '\n }\n\n d = {\n 'command': '' if not args.shell else module_commands[module_type],\n 'prefix': args.prefix\n }\n\n exclude_set = set(args.exclude)\n prompt_template = '{comment}{exclude}{command}{prefix}{name}'\n for spec, mod in modules:\n d['exclude'] = '## ' if spec.name in exclude_set else ''\n d['comment'] = '' if not args.shell else '# {0}\\n'.format(\n spec.format())\n d['name'] = mod\n out.write(prompt_template.format(**d))\n out.write('\\n')\n\n\ndef find(module_type, specs, args):\n \"\"\"Retrieve paths or use names of module files\"\"\"\n\n single_spec = one_spec_or_raise(specs)\n\n if args.recurse_dependencies:\n specs_to_retrieve = list(\n single_spec.traverse(order='post', cover='nodes',\n deptype=('link', 'run')))\n else:\n specs_to_retrieve = [single_spec]\n\n try:\n modules = [spack.modules.common.get_module(module_type, spec,\n args.full_path)\n for spec in specs_to_retrieve]\n except spack.modules.common.ModuleNotFoundError as e:\n tty.die(e.message)\n print(' '.join(modules))\n\n\ndef rm(module_type, specs, args):\n \"\"\"Deletes the module files associated with every spec in specs, for every\n module type in module types.\n \"\"\"\n\n module_cls = spack.modules.module_types[module_type]\n module_exist = lambda x: os.path.exists(module_cls(x).layout.filename)\n\n specs_with_modules = [spec for spec in specs if module_exist(spec)]\n\n modules = [module_cls(spec) for spec in specs_with_modules]\n\n if not modules:\n tty.die('No module file matches your query')\n\n # Ask for confirmation\n if not args.yes_to_all:\n msg = 'You are about to remove {0} module files for:\\n'\n tty.msg(msg.format(module_type))\n spack.cmd.display_specs(specs_with_modules, long=True)\n print('')\n answer = tty.get_yes_or_no('Do you want to proceed?')\n if not answer:\n tty.die('Will not remove any module files')\n\n # Remove the module files\n for s in modules:\n s.remove()\n\n\ndef refresh(module_type, specs, args):\n \"\"\"Regenerates the module files for every spec in specs and every module\n type in module types.\n \"\"\"\n\n # Prompt a message to the user about what is going to change\n if not specs:\n tty.msg('No package matches your query')\n return\n\n if not args.upstream_modules:\n specs = list(s for s in specs if not s.package.installed_upstream)\n\n if not args.yes_to_all:\n msg = 'You are about to regenerate {types} module files for:\\n'\n tty.msg(msg.format(types=module_type))\n spack.cmd.display_specs(specs, long=True)\n print('')\n answer = tty.get_yes_or_no('Do you want to proceed?')\n if not answer:\n tty.die('Module file regeneration aborted.')\n\n # Cycle over the module types and regenerate module files\n\n cls = spack.modules.module_types[module_type]\n\n # Skip unknown packages.\n writers = [\n cls(spec) for spec in specs\n if spack.repo.path.exists(spec.name)]\n\n # Filter blacklisted packages early\n writers = [x for x in writers if not x.conf.blacklisted]\n\n # Detect name clashes in module files\n file2writer = collections.defaultdict(list)\n for item in writers:\n file2writer[item.layout.filename].append(item)\n\n if len(file2writer) != len(writers):\n message = 'Name clashes detected in module files:\\n'\n for filename, writer_list in file2writer.items():\n if len(writer_list) > 1:\n message += '\\nfile: {0}\\n'.format(filename)\n for x in writer_list:\n message += 'spec: {0}\\n'.format(x.spec.format())\n tty.error(message)\n tty.error('Operation aborted')\n raise SystemExit(1)\n\n if len(writers) == 0:\n msg = 'Nothing to be done for {0} module files.'\n tty.msg(msg.format(module_type))\n return\n\n # If we arrived here we have at least one writer\n module_type_root = writers[0].layout.dirname()\n spack.modules.common.generate_module_index(module_type_root, writers)\n # Proceed regenerating module files\n tty.msg('Regenerating {name} module files'.format(name=module_type))\n if os.path.isdir(module_type_root) and args.delete_tree:\n shutil.rmtree(module_type_root, ignore_errors=False)\n filesystem.mkdirp(module_type_root)\n for x in writers:\n try:\n x.write(overwrite=True)\n except Exception as e:\n tty.debug(e)\n msg = 'Could not write module file [{0}]'\n tty.warn(msg.format(x.layout.filename))\n tty.warn('\\t--> {0} <--'.format(str(e)))\n\n\n#: Dictionary populated with the list of sub-commands.\n#: Each sub-command must be callable and accept 3 arguments:\n#:\n#: - module_type: the type of module it refers to\n#: - specs : the list of specs to be processed\n#: - args : namespace containing the parsed command line arguments\ncallbacks = {\n 'refresh': refresh,\n 'rm': rm,\n 'find': find,\n 'loads': loads\n}\n\n\ndef modules_cmd(parser, args, module_type, callbacks=callbacks):\n\n # Qualifiers to be used when querying the db for specs\n constraint_qualifiers = {\n 'refresh': {\n 'installed': True,\n 'known': True\n },\n }\n query_args = constraint_qualifiers.get(args.subparser_name, {})\n\n # Get the specs that match the query from the DB\n specs = args.specs(**query_args)\n\n try:\n\n callbacks[args.subparser_name](module_type, specs, args)\n\n except MultipleSpecsMatch:\n msg = \"the constraint '{query}' matches multiple packages:\\n\"\n for s in specs:\n spec_fmt = '{hash:7} {name}{@version}{%compiler}'\n spec_fmt += '{compiler_flags}{variants}{arch=architecture}'\n msg += '\\t' + s.cformat(spec_fmt) + '\\n'\n tty.error(msg.format(query=args.constraint))\n tty.die('In this context exactly **one** match is needed: please specify your constraints better.') # NOQA: ignore=E501\n\n except NoSpecMatches:\n msg = \"the constraint '{query}' matches no package.\"\n tty.error(msg.format(query=args.constraint))\n tty.die('In this context exactly **one** match is needed: please specify your constraints better.') # NOQA: ignore=E501\n", "path": "lib/spack/spack/cmd/modules/__init__.py"}], "after_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\"\"\"Implementation details of the ``spack module`` command.\"\"\"\n\nimport collections\nimport os.path\nimport shutil\nimport sys\n\nfrom llnl.util import filesystem, tty\n\nimport spack.cmd\nimport spack.modules\nimport spack.repo\nimport spack.modules.common\n\nimport spack.cmd.common.arguments as arguments\n\ndescription = \"manipulate module files\"\nsection = \"environment\"\nlevel = \"short\"\n\n\ndef setup_parser(subparser):\n sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='subparser_name')\n\n refresh_parser = sp.add_parser('refresh', help='regenerate module files')\n refresh_parser.add_argument(\n '--delete-tree',\n help='delete the module file tree before refresh',\n action='store_true'\n )\n refresh_parser.add_argument(\n '--upstream-modules',\n help='generate modules for packages installed upstream',\n action='store_true'\n )\n arguments.add_common_arguments(\n refresh_parser, ['constraint', 'yes_to_all']\n )\n\n find_parser = sp.add_parser('find', help='find module files for packages')\n find_parser.add_argument(\n '--full-path',\n help='display full path to module file',\n action='store_true'\n )\n arguments.add_common_arguments(\n find_parser, ['constraint', 'recurse_dependencies']\n )\n\n rm_parser = sp.add_parser('rm', help='remove module files')\n arguments.add_common_arguments(\n rm_parser, ['constraint', 'yes_to_all']\n )\n\n loads_parser = sp.add_parser(\n 'loads',\n help='prompt the list of modules associated with a constraint'\n )\n add_loads_arguments(loads_parser)\n arguments.add_common_arguments(loads_parser, ['constraint'])\n\n return sp\n\n\ndef add_loads_arguments(subparser):\n subparser.add_argument(\n '--input-only', action='store_false', dest='shell',\n help='generate input for module command (instead of a shell script)'\n )\n subparser.add_argument(\n '-p', '--prefix', dest='prefix', default='',\n help='prepend to module names when issuing module load commands'\n )\n subparser.add_argument(\n '-x', '--exclude', dest='exclude', action='append', default=[],\n help=\"exclude package from output; may be specified multiple times\"\n )\n arguments.add_common_arguments(\n subparser, ['recurse_dependencies']\n )\n\n\nclass MultipleSpecsMatch(Exception):\n \"\"\"Raised when multiple specs match a constraint, in a context where\n this is not allowed.\n \"\"\"\n\n\nclass NoSpecMatches(Exception):\n \"\"\"Raised when no spec matches a constraint, in a context where\n this is not allowed.\n \"\"\"\n\n\ndef one_spec_or_raise(specs):\n \"\"\"Ensures exactly one spec has been selected, or raises the appropriate\n exception.\n \"\"\"\n # Ensure a single spec matches the constraint\n if len(specs) == 0:\n raise NoSpecMatches()\n if len(specs) > 1:\n raise MultipleSpecsMatch()\n\n # Get the spec and module type\n return specs[0]\n\n\ndef loads(module_type, specs, args, out=sys.stdout):\n \"\"\"Prompt the list of modules associated with a list of specs\"\"\"\n\n # Get a comprehensive list of specs\n if args.recurse_dependencies:\n specs_from_user_constraint = specs[:]\n specs = []\n # FIXME : during module file creation nodes seem to be visited\n # FIXME : multiple times even if cover='nodes' is given. This\n # FIXME : work around permits to get a unique list of spec anyhow.\n # FIXME : (same problem as in spack/modules.py)\n seen = set()\n seen_add = seen.add\n for spec in specs_from_user_constraint:\n specs.extend(\n [item for item in spec.traverse(order='post', cover='nodes')\n if not (item in seen or seen_add(item))]\n )\n\n modules = list(\n (spec, spack.modules.common.get_module(module_type, spec, False))\n for spec in specs)\n\n module_commands = {\n 'tcl': 'module load ',\n 'lmod': 'module load ',\n 'dotkit': 'use '\n }\n\n d = {\n 'command': '' if not args.shell else module_commands[module_type],\n 'prefix': args.prefix\n }\n\n exclude_set = set(args.exclude)\n prompt_template = '{comment}{exclude}{command}{prefix}{name}'\n for spec, mod in modules:\n d['exclude'] = '## ' if spec.name in exclude_set else ''\n d['comment'] = '' if not args.shell else '# {0}\\n'.format(\n spec.format())\n d['name'] = mod\n out.write(prompt_template.format(**d))\n out.write('\\n')\n\n\ndef find(module_type, specs, args):\n \"\"\"Retrieve paths or use names of module files\"\"\"\n\n single_spec = one_spec_or_raise(specs)\n\n if args.recurse_dependencies:\n specs_to_retrieve = list(\n single_spec.traverse(order='post', cover='nodes',\n deptype=('link', 'run')))\n else:\n specs_to_retrieve = [single_spec]\n\n try:\n modules = [spack.modules.common.get_module(module_type, spec,\n args.full_path)\n for spec in specs_to_retrieve]\n except spack.modules.common.ModuleNotFoundError as e:\n tty.die(e.message)\n print(' '.join(modules))\n\n\ndef rm(module_type, specs, args):\n \"\"\"Deletes the module files associated with every spec in specs, for every\n module type in module types.\n \"\"\"\n\n module_cls = spack.modules.module_types[module_type]\n module_exist = lambda x: os.path.exists(module_cls(x).layout.filename)\n\n specs_with_modules = [spec for spec in specs if module_exist(spec)]\n\n modules = [module_cls(spec) for spec in specs_with_modules]\n\n if not modules:\n tty.die('No module file matches your query')\n\n # Ask for confirmation\n if not args.yes_to_all:\n msg = 'You are about to remove {0} module files for:\\n'\n tty.msg(msg.format(module_type))\n spack.cmd.display_specs(specs_with_modules, long=True)\n print('')\n answer = tty.get_yes_or_no('Do you want to proceed?')\n if not answer:\n tty.die('Will not remove any module files')\n\n # Remove the module files\n for s in modules:\n s.remove()\n\n\ndef refresh(module_type, specs, args):\n \"\"\"Regenerates the module files for every spec in specs and every module\n type in module types.\n \"\"\"\n\n # Prompt a message to the user about what is going to change\n if not specs:\n tty.msg('No package matches your query')\n return\n\n if not args.upstream_modules:\n specs = list(s for s in specs if not s.package.installed_upstream)\n\n if not args.yes_to_all:\n msg = 'You are about to regenerate {types} module files for:\\n'\n tty.msg(msg.format(types=module_type))\n spack.cmd.display_specs(specs, long=True)\n print('')\n answer = tty.get_yes_or_no('Do you want to proceed?')\n if not answer:\n tty.die('Module file regeneration aborted.')\n\n # Cycle over the module types and regenerate module files\n\n cls = spack.modules.module_types[module_type]\n\n # Skip unknown packages.\n writers = [\n cls(spec) for spec in specs\n if spack.repo.path.exists(spec.name)]\n\n # Filter blacklisted packages early\n writers = [x for x in writers if not x.conf.blacklisted]\n\n # Detect name clashes in module files\n file2writer = collections.defaultdict(list)\n for item in writers:\n file2writer[item.layout.filename].append(item)\n\n if len(file2writer) != len(writers):\n message = 'Name clashes detected in module files:\\n'\n for filename, writer_list in file2writer.items():\n if len(writer_list) > 1:\n message += '\\nfile: {0}\\n'.format(filename)\n for x in writer_list:\n message += 'spec: {0}\\n'.format(x.spec.format())\n tty.error(message)\n tty.error('Operation aborted')\n raise SystemExit(1)\n\n if len(writers) == 0:\n msg = 'Nothing to be done for {0} module files.'\n tty.msg(msg.format(module_type))\n return\n\n # If we arrived here we have at least one writer\n module_type_root = writers[0].layout.dirname()\n spack.modules.common.generate_module_index(module_type_root, writers)\n # Proceed regenerating module files\n tty.msg('Regenerating {name} module files'.format(name=module_type))\n if os.path.isdir(module_type_root) and args.delete_tree:\n shutil.rmtree(module_type_root, ignore_errors=False)\n filesystem.mkdirp(module_type_root)\n for x in writers:\n try:\n x.write(overwrite=True)\n except Exception as e:\n tty.debug(e)\n msg = 'Could not write module file [{0}]'\n tty.warn(msg.format(x.layout.filename))\n tty.warn('\\t--> {0} <--'.format(str(e)))\n\n\n#: Dictionary populated with the list of sub-commands.\n#: Each sub-command must be callable and accept 3 arguments:\n#:\n#: - module_type: the type of module it refers to\n#: - specs : the list of specs to be processed\n#: - args : namespace containing the parsed command line arguments\ncallbacks = {\n 'refresh': refresh,\n 'rm': rm,\n 'find': find,\n 'loads': loads\n}\n\n\ndef modules_cmd(parser, args, module_type, callbacks=callbacks):\n\n # Qualifiers to be used when querying the db for specs\n constraint_qualifiers = {\n 'refresh': {\n 'installed': True,\n 'known': True\n },\n }\n query_args = constraint_qualifiers.get(args.subparser_name, {})\n\n # Get the specs that match the query from the DB\n specs = args.specs(**query_args)\n\n try:\n\n callbacks[args.subparser_name](module_type, specs, args)\n\n except MultipleSpecsMatch:\n msg = \"the constraint '{query}' matches multiple packages:\\n\"\n for s in specs:\n spec_fmt = '{hash:7} {name}{@version}{%compiler}'\n spec_fmt += '{compiler_flags}{variants}{arch=architecture}'\n msg += '\\t' + s.cformat(spec_fmt) + '\\n'\n tty.error(msg.format(query=args.constraint))\n tty.die('In this context exactly **one** match is needed: please specify your constraints better.') # NOQA: ignore=E501\n\n except NoSpecMatches:\n msg = \"the constraint '{query}' matches no package.\"\n tty.error(msg.format(query=args.constraint))\n tty.die('In this context exactly **one** match is needed: please specify your constraints better.') # NOQA: ignore=E501\n", "path": "lib/spack/spack/cmd/modules/__init__.py"}]} | 3,929 | 239 |
gh_patches_debug_27382 | rasdani/github-patches | git_diff | ultralytics__yolov5-5141 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DDP wandb utils not running check_file() on --data
## 🐛 Bug
@AyushExel I found a bit of a bug today on starting a DDP training with `--data coco.yaml`. All our main python files (train, test, detect) are set up so that files passed to the argparser are searched locally inside the yolov5 repo using `file = check_file(file)`. This allows for more concise commands like --cfg yolov5s.yaml rather than --cfg models/yolov5s.yaml, but it looks like opt.data got passed to the wandb utils before it had a chance to pass through check_file() first. Should be an easy fix, I think I can do this tomorrow.
<img width="1214" alt="Screenshot 2021-05-05 at 01 46 55" src="https://user-images.githubusercontent.com/26833433/117083255-94c12780-ad44-11eb-85c2-48bef8943717.png">
## To Reproduce (REQUIRED)
Start DDP training with `--data coco.yaml`
```
python -m torch.distributed.launch --nproc_per_node 2 --master_port 1 train.py --batch 64 --cfg yolov5x.yaml --weights '' --data coco.yaml
```
Question about calculating mAP at test time
## ❔Question
Thank you for your work, I really enjoyed running the codes.
I was trying to understand the way we calculate mAP by reading the [test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)
At line [197](https://github.com/ultralytics/yolov5/blob/master/test.py#L197), as I understood, the IoU values (`ious`) wasn't sorted before the further process (lines [199-211](https://github.com/ultralytics/yolov5/blob/master/test.py#L199-L211)).
Therefore, I think it doesn't guarantee that we find the best IoU (or a detected box) for a ground-truth box.
For example, a detected box is considered correct if the IoU with a ground-truth box is >= 0.5. However, there are possibilities that we detected multiple boxes with different IoU values. In this case, I think we should assign the box with the highest IoU as the correctly detected box.
Will the code affect the result of calculating mAP?
## Additional context
The comments in lines [191-192](https://github.com/ultralytics/yolov5/blob/master/test.py#L191-L192) (`# prediction indices`, `# target indices`) should be swapped, shouldn't they?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hubconf.py`
Content:
```
1 # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 """
3 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/
4
5 Usage:
6 import torch
7 model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
8 """
9
10 import torch
11
12
13 def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
14 """Creates a specified YOLOv5 model
15
16 Arguments:
17 name (str): name of model, i.e. 'yolov5s'
18 pretrained (bool): load pretrained weights into the model
19 channels (int): number of input channels
20 classes (int): number of model classes
21 autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
22 verbose (bool): print all information to screen
23 device (str, torch.device, None): device to use for model parameters
24
25 Returns:
26 YOLOv5 pytorch model
27 """
28 from pathlib import Path
29
30 from models.yolo import Model
31 from models.experimental import attempt_load
32 from utils.general import check_requirements, set_logging
33 from utils.downloads import attempt_download
34 from utils.torch_utils import select_device
35
36 file = Path(__file__).resolve()
37 check_requirements(exclude=('tensorboard', 'thop', 'opencv-python'))
38 set_logging(verbose=verbose)
39
40 save_dir = Path('') if str(name).endswith('.pt') else file.parent
41 path = (save_dir / name).with_suffix('.pt') # checkpoint path
42 try:
43 device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device)
44
45 if pretrained and channels == 3 and classes == 80:
46 model = attempt_load(path, map_location=device) # download/load FP32 model
47 else:
48 cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path
49 model = Model(cfg, channels, classes) # create model
50 if pretrained:
51 ckpt = torch.load(attempt_download(path), map_location=device) # load
52 msd = model.state_dict() # model state_dict
53 csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
54 csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter
55 model.load_state_dict(csd, strict=False) # load
56 if len(ckpt['model'].names) == classes:
57 model.names = ckpt['model'].names # set class names attribute
58 if autoshape:
59 model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
60 return model.to(device)
61
62 except Exception as e:
63 help_url = 'https://github.com/ultralytics/yolov5/issues/36'
64 s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url
65 raise Exception(s) from e
66
67
68 def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):
69 # YOLOv5 custom or local model
70 return _create(path, autoshape=autoshape, verbose=verbose, device=device)
71
72
73 def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
74 # YOLOv5-small model https://github.com/ultralytics/yolov5
75 return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)
76
77
78 def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
79 # YOLOv5-medium model https://github.com/ultralytics/yolov5
80 return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)
81
82
83 def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
84 # YOLOv5-large model https://github.com/ultralytics/yolov5
85 return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)
86
87
88 def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
89 # YOLOv5-xlarge model https://github.com/ultralytics/yolov5
90 return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)
91
92
93 def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
94 # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
95 return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)
96
97
98 def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
99 # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
100 return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)
101
102
103 def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
104 # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
105 return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)
106
107
108 def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
109 # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
110 return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)
111
112
113 if __name__ == '__main__':
114 model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained
115 # model = custom(path='path/to/model.pt') # custom
116
117 # Verify inference
118 import cv2
119 import numpy as np
120 from PIL import Image
121 from pathlib import Path
122
123 imgs = ['data/images/zidane.jpg', # filename
124 Path('data/images/zidane.jpg'), # Path
125 'https://ultralytics.com/images/zidane.jpg', # URI
126 cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
127 Image.open('data/images/bus.jpg'), # PIL
128 np.zeros((320, 640, 3))] # numpy
129
130 results = model(imgs) # batched inference
131 results.print()
132 results.save()
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hubconf.py b/hubconf.py
--- a/hubconf.py
+++ b/hubconf.py
@@ -70,6 +70,11 @@
return _create(path, autoshape=autoshape, verbose=verbose, device=device)
+def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ # YOLOv5-nano model https://github.com/ultralytics/yolov5
+ return _create('yolov5n', pretrained, channels, classes, autoshape, verbose, device)
+
+
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-small model https://github.com/ultralytics/yolov5
return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)
@@ -90,6 +95,11 @@
return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)
+def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5
+ return _create('yolov5n6', pretrained, channels, classes, autoshape, verbose, device)
+
+
def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)
| {"golden_diff": "diff --git a/hubconf.py b/hubconf.py\n--- a/hubconf.py\n+++ b/hubconf.py\n@@ -70,6 +70,11 @@\n return _create(path, autoshape=autoshape, verbose=verbose, device=device)\n \n \n+def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n+ # YOLOv5-nano model https://github.com/ultralytics/yolov5\n+ return _create('yolov5n', pretrained, channels, classes, autoshape, verbose, device)\n+\n+\n def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-small model https://github.com/ultralytics/yolov5\n return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)\n@@ -90,6 +95,11 @@\n return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)\n \n \n+def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n+ # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5\n+ return _create('yolov5n6', pretrained, channels, classes, autoshape, verbose, device)\n+\n+\n def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)\n", "issue": "DDP wandb utils not running check_file() on --data\n## \ud83d\udc1b Bug\r\n\r\n@AyushExel I found a bit of a bug today on starting a DDP training with `--data coco.yaml`. All our main python files (train, test, detect) are set up so that files passed to the argparser are searched locally inside the yolov5 repo using `file = check_file(file)`. This allows for more concise commands like --cfg yolov5s.yaml rather than --cfg models/yolov5s.yaml, but it looks like opt.data got passed to the wandb utils before it had a chance to pass through check_file() first. Should be an easy fix, I think I can do this tomorrow.\r\n\r\n<img width=\"1214\" alt=\"Screenshot 2021-05-05 at 01 46 55\" src=\"https://user-images.githubusercontent.com/26833433/117083255-94c12780-ad44-11eb-85c2-48bef8943717.png\">\r\n\r\n## To Reproduce (REQUIRED)\r\n\r\nStart DDP training with `--data coco.yaml`\r\n```\r\n python -m torch.distributed.launch --nproc_per_node 2 --master_port 1 train.py --batch 64 --cfg yolov5x.yaml --weights '' --data coco.yaml\r\n```\r\n\nQuestion about calculating mAP at test time\n## \u2754Question\r\nThank you for your work, I really enjoyed running the codes.\r\n\r\nI was trying to understand the way we calculate mAP by reading the [test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)\r\nAt line [197](https://github.com/ultralytics/yolov5/blob/master/test.py#L197), as I understood, the IoU values (`ious`) wasn't sorted before the further process (lines [199-211](https://github.com/ultralytics/yolov5/blob/master/test.py#L199-L211)).\r\nTherefore, I think it doesn't guarantee that we find the best IoU (or a detected box) for a ground-truth box.\r\nFor example, a detected box is considered correct if the IoU with a ground-truth box is >= 0.5. However, there are possibilities that we detected multiple boxes with different IoU values. In this case, I think we should assign the box with the highest IoU as the correctly detected box.\r\n\r\nWill the code affect the result of calculating mAP?\r\n\r\n## Additional context\r\nThe comments in lines [191-192](https://github.com/ultralytics/yolov5/blob/master/test.py#L191-L192) (`# prediction indices`, `# target indices`) should be swapped, shouldn't they?\n", "before_files": [{"content": "# YOLOv5 \ud83d\ude80 by Ultralytics, GPL-3.0 license\n\"\"\"\nPyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/\n\nUsage:\n import torch\n model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n\"\"\"\n\nimport torch\n\n\ndef _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n \"\"\"Creates a specified YOLOv5 model\n\n Arguments:\n name (str): name of model, i.e. 'yolov5s'\n pretrained (bool): load pretrained weights into the model\n channels (int): number of input channels\n classes (int): number of model classes\n autoshape (bool): apply YOLOv5 .autoshape() wrapper to model\n verbose (bool): print all information to screen\n device (str, torch.device, None): device to use for model parameters\n\n Returns:\n YOLOv5 pytorch model\n \"\"\"\n from pathlib import Path\n\n from models.yolo import Model\n from models.experimental import attempt_load\n from utils.general import check_requirements, set_logging\n from utils.downloads import attempt_download\n from utils.torch_utils import select_device\n\n file = Path(__file__).resolve()\n check_requirements(exclude=('tensorboard', 'thop', 'opencv-python'))\n set_logging(verbose=verbose)\n\n save_dir = Path('') if str(name).endswith('.pt') else file.parent\n path = (save_dir / name).with_suffix('.pt') # checkpoint path\n try:\n device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device)\n\n if pretrained and channels == 3 and classes == 80:\n model = attempt_load(path, map_location=device) # download/load FP32 model\n else:\n cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path\n model = Model(cfg, channels, classes) # create model\n if pretrained:\n ckpt = torch.load(attempt_download(path), map_location=device) # load\n msd = model.state_dict() # model state_dict\n csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32\n csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter\n model.load_state_dict(csd, strict=False) # load\n if len(ckpt['model'].names) == classes:\n model.names = ckpt['model'].names # set class names attribute\n if autoshape:\n model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS\n return model.to(device)\n\n except Exception as e:\n help_url = 'https://github.com/ultralytics/yolov5/issues/36'\n s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url\n raise Exception(s) from e\n\n\ndef custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):\n # YOLOv5 custom or local model\n return _create(path, autoshape=autoshape, verbose=verbose, device=device)\n\n\ndef yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-small model https://github.com/ultralytics/yolov5\n return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-medium model https://github.com/ultralytics/yolov5\n return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-large model https://github.com/ultralytics/yolov5\n return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-xlarge model https://github.com/ultralytics/yolov5\n return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)\n\n\nif __name__ == '__main__':\n model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained\n # model = custom(path='path/to/model.pt') # custom\n\n # Verify inference\n import cv2\n import numpy as np\n from PIL import Image\n from pathlib import Path\n\n imgs = ['data/images/zidane.jpg', # filename\n Path('data/images/zidane.jpg'), # Path\n 'https://ultralytics.com/images/zidane.jpg', # URI\n cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV\n Image.open('data/images/bus.jpg'), # PIL\n np.zeros((320, 640, 3))] # numpy\n\n results = model(imgs) # batched inference\n results.print()\n results.save()\n", "path": "hubconf.py"}], "after_files": [{"content": "# YOLOv5 \ud83d\ude80 by Ultralytics, GPL-3.0 license\n\"\"\"\nPyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/\n\nUsage:\n import torch\n model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n\"\"\"\n\nimport torch\n\n\ndef _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n \"\"\"Creates a specified YOLOv5 model\n\n Arguments:\n name (str): name of model, i.e. 'yolov5s'\n pretrained (bool): load pretrained weights into the model\n channels (int): number of input channels\n classes (int): number of model classes\n autoshape (bool): apply YOLOv5 .autoshape() wrapper to model\n verbose (bool): print all information to screen\n device (str, torch.device, None): device to use for model parameters\n\n Returns:\n YOLOv5 pytorch model\n \"\"\"\n from pathlib import Path\n\n from models.yolo import Model\n from models.experimental import attempt_load\n from utils.general import check_requirements, set_logging\n from utils.downloads import attempt_download\n from utils.torch_utils import select_device\n\n file = Path(__file__).resolve()\n check_requirements(exclude=('tensorboard', 'thop', 'opencv-python'))\n set_logging(verbose=verbose)\n\n save_dir = Path('') if str(name).endswith('.pt') else file.parent\n path = (save_dir / name).with_suffix('.pt') # checkpoint path\n try:\n device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device)\n\n if pretrained and channels == 3 and classes == 80:\n model = attempt_load(path, map_location=device) # download/load FP32 model\n else:\n cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path\n model = Model(cfg, channels, classes) # create model\n if pretrained:\n ckpt = torch.load(attempt_download(path), map_location=device) # load\n msd = model.state_dict() # model state_dict\n csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32\n csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter\n model.load_state_dict(csd, strict=False) # load\n if len(ckpt['model'].names) == classes:\n model.names = ckpt['model'].names # set class names attribute\n if autoshape:\n model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS\n return model.to(device)\n\n except Exception as e:\n help_url = 'https://github.com/ultralytics/yolov5/issues/36'\n s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url\n raise Exception(s) from e\n\n\ndef custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):\n # YOLOv5 custom or local model\n return _create(path, autoshape=autoshape, verbose=verbose, device=device)\n\n\ndef yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-nano model https://github.com/ultralytics/yolov5\n return _create('yolov5n', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-small model https://github.com/ultralytics/yolov5\n return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-medium model https://github.com/ultralytics/yolov5\n return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-large model https://github.com/ultralytics/yolov5\n return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-xlarge model https://github.com/ultralytics/yolov5\n return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5n6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)\n\n\nif __name__ == '__main__':\n model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained\n # model = custom(path='path/to/model.pt') # custom\n\n # Verify inference\n import cv2\n import numpy as np\n from PIL import Image\n from pathlib import Path\n\n imgs = ['data/images/zidane.jpg', # filename\n Path('data/images/zidane.jpg'), # Path\n 'https://ultralytics.com/images/zidane.jpg', # URI\n cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV\n Image.open('data/images/bus.jpg'), # PIL\n np.zeros((320, 640, 3))] # numpy\n\n results = model(imgs) # batched inference\n results.print()\n results.save()\n", "path": "hubconf.py"}]} | 2,802 | 426 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.