problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_34583 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1526 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error installing skimage-
Hi!
I've install skimage on a MacOs 10.10, like it's said in the documentation:
pip install -U scikit-image
and it say that need scipy to work, I had to install it to work.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2
3 descr = """Image Processing SciKit
4
5 Image processing algorithms for SciPy, including IO, morphology, filtering,
6 warping, color manipulation, object detection, etc.
7
8 Please refer to the online documentation at
9 http://scikit-image.org/
10 """
11
12 DISTNAME = 'scikit-image'
13 DESCRIPTION = 'Image processing routines for SciPy'
14 LONG_DESCRIPTION = descr
15 MAINTAINER = 'Stefan van der Walt'
16 MAINTAINER_EMAIL = '[email protected]'
17 URL = 'http://scikit-image.org'
18 LICENSE = 'Modified BSD'
19 DOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image'
20
21 import os
22 import sys
23
24 import setuptools
25 from distutils.command.build_py import build_py
26
27
28 with open('skimage/__init__.py') as fid:
29 for line in fid:
30 if line.startswith('__version__'):
31 VERSION = line.strip().split()[-1][1:-1]
32 break
33
34 with open('requirements.txt') as fid:
35 INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
36
37 # development versions do not have the cythonized files
38 if VERSION.endswith('dev'):
39 SETUP_REQUIRES = [r for r in INSTALL_REQUIRES if r.startswith('cython')]
40 else:
41 INSTALL_REQUIRES = [r for r in INSTALL_REQUIRES
42 if not r.startswith('cython')]
43 SETUP_REQUIRES = []
44
45
46 # list requirements for PyPI
47 REQUIRES = [r.replace('>=', ' (>= ') + ')'
48 for r in INSTALL_REQUIRES + SETUP_REQUIRES]
49 REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
50
51
52 # do not attempt to install numpy and scipy until they have eggs available
53 INSTALL_REQUIRES = [r for r in INSTALL_REQUIRES
54 if not r.startswith(('scipy', 'numpy'))]
55
56
57 def configuration(parent_package='', top_path=None):
58 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
59
60 from numpy.distutils.misc_util import Configuration
61 config = Configuration(None, parent_package, top_path)
62
63 config.set_options(
64 ignore_setup_xxx_py=True,
65 assume_default_configuration=True,
66 delegate_options_to_subpackages=True,
67 quiet=True)
68
69 config.add_subpackage('skimage')
70 config.add_data_dir('skimage/data')
71
72 return config
73
74
75 if __name__ == "__main__":
76 # purposely fail loudly if numpy or scipy are not available
77 from numpy.distutils.core import setup
78 import scipy
79
80 setup(
81 name=DISTNAME,
82 description=DESCRIPTION,
83 long_description=LONG_DESCRIPTION,
84 maintainer=MAINTAINER,
85 maintainer_email=MAINTAINER_EMAIL,
86 url=URL,
87 license=LICENSE,
88 download_url=DOWNLOAD_URL,
89 version=VERSION,
90
91 classifiers=[
92 'Development Status :: 4 - Beta',
93 'Environment :: Console',
94 'Intended Audience :: Developers',
95 'Intended Audience :: Science/Research',
96 'License :: OSI Approved :: BSD License',
97 'Programming Language :: C',
98 'Programming Language :: Python',
99 'Programming Language :: Python :: 3',
100 'Topic :: Scientific/Engineering',
101 'Operating System :: Microsoft :: Windows',
102 'Operating System :: POSIX',
103 'Operating System :: Unix',
104 'Operating System :: MacOS',
105 ],
106
107 configuration=configuration,
108 setup_requires=SETUP_REQUIRES,
109 install_requires=INSTALL_REQUIRES,
110 requires=REQUIRES,
111 packages=setuptools.find_packages(exclude=['doc']),
112 include_package_data=True,
113 zip_safe=False, # the package can run out of an .egg file
114
115 entry_points={
116 'console_scripts': ['skivi = skimage.scripts.skivi:main'],
117 },
118
119 cmdclass={'build_py': build_py},
120 )
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,24 +34,10 @@
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
-# development versions do not have the cythonized files
-if VERSION.endswith('dev'):
- SETUP_REQUIRES = [r for r in INSTALL_REQUIRES if r.startswith('cython')]
-else:
- INSTALL_REQUIRES = [r for r in INSTALL_REQUIRES
- if not r.startswith('cython')]
- SETUP_REQUIRES = []
-
-
-# list requirements for PyPI
-REQUIRES = [r.replace('>=', ' (>= ') + ')'
- for r in INSTALL_REQUIRES + SETUP_REQUIRES]
+# requirements for those browsing PyPI
+REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
-
-
-# do not attempt to install numpy and scipy until they have eggs available
-INSTALL_REQUIRES = [r for r in INSTALL_REQUIRES
- if not r.startswith(('scipy', 'numpy'))]
+REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
def configuration(parent_package='', top_path=None):
@@ -73,9 +59,17 @@
if __name__ == "__main__":
- # purposely fail loudly if numpy or scipy are not available
- from numpy.distutils.core import setup
- import scipy
+ # purposely fail if numpy is not available
+ # other dependecies will be resolved by pip (install_requires)
+ try:
+ from numpy.distutils.core import setup
+ except ImportError:
+ print('To install scikit-image from source, you will need numpy.\n' +
+ 'Install numpy with pip:\n' +
+ 'pip install numpy\n'
+ 'Or use your operating system package manager. For more\n' +
+ 'details, see http://scikit-image.org/docs/stable/install.html')
+ sys.exit(1)
setup(
name=DISTNAME,
@@ -105,8 +99,9 @@
],
configuration=configuration,
- setup_requires=SETUP_REQUIRES,
install_requires=INSTALL_REQUIRES,
+ # install cython when running setup.py (source install)
+ setup_requires=['cython>=0.21'],
requires=REQUIRES,
packages=setuptools.find_packages(exclude=['doc']),
include_package_data=True,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,24 +34,10 @@\n with open('requirements.txt') as fid:\n INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]\n \n-# development versions do not have the cythonized files\n-if VERSION.endswith('dev'):\n- SETUP_REQUIRES = [r for r in INSTALL_REQUIRES if r.startswith('cython')]\n-else:\n- INSTALL_REQUIRES = [r for r in INSTALL_REQUIRES\n- if not r.startswith('cython')]\n- SETUP_REQUIRES = []\n-\n-\n-# list requirements for PyPI\n-REQUIRES = [r.replace('>=', ' (>= ') + ')'\n- for r in INSTALL_REQUIRES + SETUP_REQUIRES]\n+# requirements for those browsing PyPI\n+REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]\n REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]\n-\n-\n-# do not attempt to install numpy and scipy until they have eggs available\n-INSTALL_REQUIRES = [r for r in INSTALL_REQUIRES\n- if not r.startswith(('scipy', 'numpy'))]\n+REQUIRES = [r.replace('[array]', '') for r in REQUIRES]\n \n \n def configuration(parent_package='', top_path=None):\n@@ -73,9 +59,17 @@\n \n \n if __name__ == \"__main__\":\n- # purposely fail loudly if numpy or scipy are not available\n- from numpy.distutils.core import setup\n- import scipy\n+ # purposely fail if numpy is not available\n+ # other dependecies will be resolved by pip (install_requires)\n+ try:\n+ from numpy.distutils.core import setup\n+ except ImportError:\n+ print('To install scikit-image from source, you will need numpy.\\n' +\n+ 'Install numpy with pip:\\n' +\n+ 'pip install numpy\\n'\n+ 'Or use your operating system package manager. For more\\n' +\n+ 'details, see http://scikit-image.org/docs/stable/install.html')\n+ sys.exit(1)\n \n setup(\n name=DISTNAME,\n@@ -105,8 +99,9 @@\n ],\n \n configuration=configuration,\n- setup_requires=SETUP_REQUIRES,\n install_requires=INSTALL_REQUIRES,\n+ # install cython when running setup.py (source install)\n+ setup_requires=['cython>=0.21'],\n requires=REQUIRES,\n packages=setuptools.find_packages(exclude=['doc']),\n include_package_data=True,\n", "issue": "Error installing skimage-\nHi!\nI've install skimage on a MacOs 10.10, like it's said in the documentation:\npip install -U scikit-image\nand it say that need scipy to work, I had to install it to work.\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\ndescr = \"\"\"Image Processing SciKit\n\nImage processing algorithms for SciPy, including IO, morphology, filtering,\nwarping, color manipulation, object detection, etc.\n\nPlease refer to the online documentation at\nhttp://scikit-image.org/\n\"\"\"\n\nDISTNAME = 'scikit-image'\nDESCRIPTION = 'Image processing routines for SciPy'\nLONG_DESCRIPTION = descr\nMAINTAINER = 'Stefan van der Walt'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'http://scikit-image.org'\nLICENSE = 'Modified BSD'\nDOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image'\n\nimport os\nimport sys\n\nimport setuptools\nfrom distutils.command.build_py import build_py\n\n\nwith open('skimage/__init__.py') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\n\nwith open('requirements.txt') as fid:\n INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]\n\n# development versions do not have the cythonized files\nif VERSION.endswith('dev'):\n SETUP_REQUIRES = [r for r in INSTALL_REQUIRES if r.startswith('cython')]\nelse:\n INSTALL_REQUIRES = [r for r in INSTALL_REQUIRES\n if not r.startswith('cython')]\n SETUP_REQUIRES = []\n\n\n# list requirements for PyPI\nREQUIRES = [r.replace('>=', ' (>= ') + ')'\n for r in INSTALL_REQUIRES + SETUP_REQUIRES]\nREQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]\n\n\n# do not attempt to install numpy and scipy until they have eggs available\nINSTALL_REQUIRES = [r for r in INSTALL_REQUIRES\n if not r.startswith(('scipy', 'numpy'))]\n\n\ndef configuration(parent_package='', top_path=None):\n if os.path.exists('MANIFEST'): os.remove('MANIFEST')\n\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n\n config.set_options(\n ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('skimage')\n config.add_data_dir('skimage/data')\n\n return config\n\n\nif __name__ == \"__main__\":\n # purposely fail loudly if numpy or scipy are not available\n from numpy.distutils.core import setup\n import scipy\n\n setup(\n name=DISTNAME,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n url=URL,\n license=LICENSE,\n download_url=DOWNLOAD_URL,\n version=VERSION,\n\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n\n configuration=configuration,\n setup_requires=SETUP_REQUIRES,\n install_requires=INSTALL_REQUIRES,\n requires=REQUIRES,\n packages=setuptools.find_packages(exclude=['doc']),\n include_package_data=True,\n zip_safe=False, # the package can run out of an .egg file\n\n entry_points={\n 'console_scripts': ['skivi = skimage.scripts.skivi:main'],\n },\n\n cmdclass={'build_py': build_py},\n )\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\ndescr = \"\"\"Image Processing SciKit\n\nImage processing algorithms for SciPy, including IO, morphology, filtering,\nwarping, color manipulation, object detection, etc.\n\nPlease refer to the online documentation at\nhttp://scikit-image.org/\n\"\"\"\n\nDISTNAME = 'scikit-image'\nDESCRIPTION = 'Image processing routines for SciPy'\nLONG_DESCRIPTION = descr\nMAINTAINER = 'Stefan van der Walt'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'http://scikit-image.org'\nLICENSE = 'Modified BSD'\nDOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image'\n\nimport os\nimport sys\n\nimport setuptools\nfrom distutils.command.build_py import build_py\n\n\nwith open('skimage/__init__.py') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\n\nwith open('requirements.txt') as fid:\n INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]\n\n# requirements for those browsing PyPI\nREQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]\nREQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]\nREQUIRES = [r.replace('[array]', '') for r in REQUIRES]\n\n\ndef configuration(parent_package='', top_path=None):\n if os.path.exists('MANIFEST'): os.remove('MANIFEST')\n\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n\n config.set_options(\n ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('skimage')\n config.add_data_dir('skimage/data')\n\n return config\n\n\nif __name__ == \"__main__\":\n # purposely fail if numpy is not available\n # other dependecies will be resolved by pip (install_requires)\n try:\n from numpy.distutils.core import setup\n except ImportError:\n print('To install scikit-image from source, you will need numpy.\\n' +\n 'Install numpy with pip:\\n' +\n 'pip install numpy\\n'\n 'Or use your operating system package manager. For more\\n' +\n 'details, see http://scikit-image.org/docs/stable/install.html')\n sys.exit(1)\n\n setup(\n name=DISTNAME,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n url=URL,\n license=LICENSE,\n download_url=DOWNLOAD_URL,\n version=VERSION,\n\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n\n configuration=configuration,\n install_requires=INSTALL_REQUIRES,\n # install cython when running setup.py (source install)\n setup_requires=['cython>=0.21'],\n requires=REQUIRES,\n packages=setuptools.find_packages(exclude=['doc']),\n include_package_data=True,\n zip_safe=False, # the package can run out of an .egg file\n\n entry_points={\n 'console_scripts': ['skivi = skimage.scripts.skivi:main'],\n },\n\n cmdclass={'build_py': build_py},\n )\n", "path": "setup.py"}]} | 1,396 | 576 |
gh_patches_debug_1542 | rasdani/github-patches | git_diff | pyodide__pyodide-987 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add PYODIDE_MINIMAL build option
From the added documentation,
> Minimal pyodide build can be enabled by setting the `PYODIDE_MINIMAL`
environment variable. For instance,
> ```
> PYODIDE_MINIMAL=true PYODIDE_PACKAGES="micropip" make
> ```
>
> This will,
> - not include freetype and libpng libraries (it won't be possible to build matplotlib)
> - not include the jedi library, disabling auto-completion in iodide
>
> As as a result the size will of the core pyodide binaries will be ~15% smaller.
Addresses two points from https://github.com/iodide-project/pyodide/issues/646
Before (master),
```
6,6M pyodide.asm.data
310K pyodide.asm.data.js
2,8M pyodide.asm.js
11M pyodide.asm.wasm
16K pyodide.js
16K pyodide_dev.js
Total: 20.7 MB
```
after (this PR with PYODIDE_MINIMAL=true)
```
5,1M build/pyodide.asm.data
124K build/pyodide.asm.data.js
2,6M build/pyodide.asm.js
9,9M build/pyodide.asm.wasm
16K build/pyodide.js
16K build/pyodide_dev.js
Total: 17.7 MB
```
so it's not that different (14% less), but it's start.
Draft PR for now, as I think I need to go in a bit more details through tests that are run in the minimal build CI job.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyodide-py/pyodide/console.py`
Content:
```
1 from typing import List, Optional
2
3
4 def get_completions(
5 code: str, cursor: Optional[int] = None, namespaces: Optional[List] = None
6 ) -> List[str]:
7 """
8 Get code autocompletion candidates
9
10 Note that this function requires to have the jedi module loaded.
11
12 Parameters
13 ----------
14 code
15 the Python code to complete.
16 cursor
17 optional position in the code at which to autocomplete
18 namespaces
19 a list of namespaces
20
21 Returns
22 -------
23 a list of autocompleted modules
24 """
25 import jedi
26 import __main__
27
28 if namespaces is None:
29 namespaces = [__main__.__dict__]
30
31 if cursor is None:
32 cursor = len(code)
33 code = code[:cursor]
34 interp = jedi.Interpreter(code, namespaces)
35 completions = interp.completions()
36
37 return [x.name for x in completions]
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pyodide-py/pyodide/console.py b/src/pyodide-py/pyodide/console.py
--- a/src/pyodide-py/pyodide/console.py
+++ b/src/pyodide-py/pyodide/console.py
@@ -32,6 +32,6 @@
cursor = len(code)
code = code[:cursor]
interp = jedi.Interpreter(code, namespaces)
- completions = interp.completions()
+ completions = interp.complete()
return [x.name for x in completions]
| {"golden_diff": "diff --git a/src/pyodide-py/pyodide/console.py b/src/pyodide-py/pyodide/console.py\n--- a/src/pyodide-py/pyodide/console.py\n+++ b/src/pyodide-py/pyodide/console.py\n@@ -32,6 +32,6 @@\n cursor = len(code)\n code = code[:cursor]\n interp = jedi.Interpreter(code, namespaces)\n- completions = interp.completions()\n+ completions = interp.complete()\n \n return [x.name for x in completions]\n", "issue": "Add PYODIDE_MINIMAL build option\nFrom the added documentation,\r\n\r\n> Minimal pyodide build can be enabled by setting the `PYODIDE_MINIMAL`\r\nenvironment variable. For instance,\r\n> ``` \r\n> PYODIDE_MINIMAL=true PYODIDE_PACKAGES=\"micropip\" make\r\n> ``` \r\n> \r\n> This will,\r\n> - not include freetype and libpng libraries (it won't be possible to build matplotlib)\r\n> - not include the jedi library, disabling auto-completion in iodide\r\n> \r\n> As as a result the size will of the core pyodide binaries will be ~15% smaller.\r\n\r\nAddresses two points from https://github.com/iodide-project/pyodide/issues/646\r\n\r\nBefore (master),\r\n```\r\n6,6M pyodide.asm.data\r\n310K pyodide.asm.data.js\r\n2,8M pyodide.asm.js\r\n 11M pyodide.asm.wasm\r\n 16K pyodide.js\r\n 16K pyodide_dev.js\r\n\r\nTotal: 20.7 MB\r\n```\r\nafter (this PR with PYODIDE_MINIMAL=true)\r\n```\r\n5,1M build/pyodide.asm.data\r\n124K build/pyodide.asm.data.js\r\n2,6M build/pyodide.asm.js\r\n9,9M build/pyodide.asm.wasm\r\n 16K build/pyodide.js\r\n 16K build/pyodide_dev.js\r\n\r\nTotal: 17.7 MB\r\n```\r\n\r\nso it's not that different (14% less), but it's start. \r\n\r\nDraft PR for now, as I think I need to go in a bit more details through tests that are run in the minimal build CI job.\n", "before_files": [{"content": "from typing import List, Optional\n\n\ndef get_completions(\n code: str, cursor: Optional[int] = None, namespaces: Optional[List] = None\n) -> List[str]:\n \"\"\"\n Get code autocompletion candidates\n\n Note that this function requires to have the jedi module loaded.\n\n Parameters\n ----------\n code\n the Python code to complete.\n cursor\n optional position in the code at which to autocomplete\n namespaces\n a list of namespaces\n\n Returns\n -------\n a list of autocompleted modules\n \"\"\"\n import jedi\n import __main__\n\n if namespaces is None:\n namespaces = [__main__.__dict__]\n\n if cursor is None:\n cursor = len(code)\n code = code[:cursor]\n interp = jedi.Interpreter(code, namespaces)\n completions = interp.completions()\n\n return [x.name for x in completions]\n", "path": "src/pyodide-py/pyodide/console.py"}], "after_files": [{"content": "from typing import List, Optional\n\n\ndef get_completions(\n code: str, cursor: Optional[int] = None, namespaces: Optional[List] = None\n) -> List[str]:\n \"\"\"\n Get code autocompletion candidates\n\n Note that this function requires to have the jedi module loaded.\n\n Parameters\n ----------\n code\n the Python code to complete.\n cursor\n optional position in the code at which to autocomplete\n namespaces\n a list of namespaces\n\n Returns\n -------\n a list of autocompleted modules\n \"\"\"\n import jedi\n import __main__\n\n if namespaces is None:\n namespaces = [__main__.__dict__]\n\n if cursor is None:\n cursor = len(code)\n code = code[:cursor]\n interp = jedi.Interpreter(code, namespaces)\n completions = interp.complete()\n\n return [x.name for x in completions]\n", "path": "src/pyodide-py/pyodide/console.py"}]} | 918 | 122 |
gh_patches_debug_24541 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-2114 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistent docs and code output for `PerceptualEvaluationSpeechQuality `
## 🐛 Bug
[related to #1092]
The [docs](https://torchmetrics.readthedocs.io/en/stable/audio/perceptual_evaluation_speech_quality.html) for PESQ metric class state that
> As output of _forward_ and _compute_ the metric returns the following output `pesq` (Tensor): float tensor with shape (...,) of PESQ value per sample.
However, I always get one value, no matter the `batch_size`, as if the metric was being averaged.
### To Reproduce
```python
import torch
from torchmetrics.audio import PerceptualEvaluationSpeechQuality as PESQ
pesq = PESQ(fs=16000, mode="wb")
# create data
batch_size = 4
audio_size = 12345
preds = torch.FloatTensor(batch_size, audio_size).uniform_(-1, 1)
target = torch.FloatTensor(batch_size, audio_size).uniform_(-1, 1)
print("preds.shape:", preds.shape)
#>preds.shape: torch.Size([4, 12345])
# compute metric
pesq_score = pesq.forward(preds, target)
print("pesq_score:", pesq_score)
#>pesq_score: tensor(1.5049)
```
I expected the output to be of shape `torch.Size([4])`.
Same behaviour for `pesq.update(preds, target)` followed by `pesq.compute()`.
The functional counterpart returns a result [as documented](https://torchmetrics.readthedocs.io/en/stable/audio/perceptual_evaluation_speech_quality.html#torchmetrics.functional.audio.pesq.perceptual_evaluation_speech_quality):
```python
import torch
from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality as F_PESQ
n = 4
preds = torch.FloatTensor(n, 12345).uniform_(-1, 1)
target = torch.FloatTensor(n, 12345).uniform_(-1, 1)
pesq_score = F_PESQ(preds=preds, target=target, fs=16000, mode="wb")
print("pesq_score:", pesq_score)
#> pesq_score: tensor([1.5882, 1.5080, 1.5149, 1.5997])
```
### Environment
```
Linux
python==3.10.12
torch==2.0.1
torchmetrics== 1.1.2
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/audio/pesq.py`
Content:
```
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Optional, Sequence, Union
15
16 from torch import Tensor, tensor
17
18 from torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality
19 from torchmetrics.metric import Metric
20 from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _PESQ_AVAILABLE
21 from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
22
23 __doctest_requires__ = {"PerceptualEvaluationSpeechQuality": ["pesq"]}
24
25 if not _MATPLOTLIB_AVAILABLE:
26 __doctest_skip__ = ["PerceptualEvaluationSpeechQuality.plot"]
27
28
29 class PerceptualEvaluationSpeechQuality(Metric):
30 """Calculate `Perceptual Evaluation of Speech Quality`_ (PESQ).
31
32 It's a recognized industry standard for audio quality that takes into considerations characteristics such as:
33 audio sharpness, call volume, background noise, clipping, audio interference etc. PESQ returns a score between
34 -0.5 and 4.5 with the higher scores indicating a better quality.
35
36 This metric is a wrapper for the `pesq package`_. Note that input will be moved to ``cpu`` to perform the metric
37 calculation.
38
39 As input to ``forward`` and ``update`` the metric accepts the following input
40
41 - ``preds`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
42 - ``target`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``
43
44 As output of `forward` and `compute` the metric returns the following output
45
46 - ``pesq`` (:class:`~torch.Tensor`): float tensor with shape ``(...,)`` of PESQ value per sample
47
48 .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install
49 torchmetrics[audio]`` or ``pip install pesq``. ``pesq`` will compile with your currently
50 installed version of numpy, meaning that if you upgrade numpy at some point in the future you will
51 most likely have to reinstall ``pesq``.
52
53 Args:
54 fs: sampling frequency, should be 16000 or 8000 (Hz)
55 mode: ``'wb'`` (wide-band) or ``'nb'`` (narrow-band)
56 keep_same_device: whether to move the pesq value to the device of preds
57 n_processes: integer specifying the number of processes to run in parallel for the metric calculation.
58 Only applies to batches of data and if ``multiprocessing`` package is installed.
59 kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
60
61 Raises:
62 ModuleNotFoundError:
63 If ``pesq`` package is not installed
64 ValueError:
65 If ``fs`` is not either ``8000`` or ``16000``
66 ValueError:
67 If ``mode`` is not either ``"wb"`` or ``"nb"``
68
69 Example:
70 >>> import torch
71 >>> from torchmetrics.audio import PerceptualEvaluationSpeechQuality
72 >>> g = torch.manual_seed(1)
73 >>> preds = torch.randn(8000)
74 >>> target = torch.randn(8000)
75 >>> pesq = PerceptualEvaluationSpeechQuality(8000, 'nb')
76 >>> pesq(preds, target)
77 tensor(2.2076)
78 >>> wb_pesq = PerceptualEvaluationSpeechQuality(16000, 'wb')
79 >>> wb_pesq(preds, target)
80 tensor(1.7359)
81
82 """
83
84 sum_pesq: Tensor
85 total: Tensor
86 full_state_update: bool = False
87 is_differentiable: bool = False
88 higher_is_better: bool = True
89 plot_lower_bound: float = -0.5
90 plot_upper_bound: float = 4.5
91
92 def __init__(
93 self,
94 fs: int,
95 mode: str,
96 n_processes: int = 1,
97 **kwargs: Any,
98 ) -> None:
99 super().__init__(**kwargs)
100 if not _PESQ_AVAILABLE:
101 raise ModuleNotFoundError(
102 "PerceptualEvaluationSpeechQuality metric requires that `pesq` is installed."
103 " Either install as `pip install torchmetrics[audio]` or `pip install pesq`."
104 )
105 if fs not in (8000, 16000):
106 raise ValueError(f"Expected argument `fs` to either be 8000 or 16000 but got {fs}")
107 self.fs = fs
108 if mode not in ("wb", "nb"):
109 raise ValueError(f"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}")
110 self.mode = mode
111 if not isinstance(n_processes, int) and n_processes <= 0:
112 raise ValueError(f"Expected argument `n_processes` to be an int larger than 0 but got {n_processes}")
113 self.n_processes = n_processes
114
115 self.add_state("sum_pesq", default=tensor(0.0), dist_reduce_fx="sum")
116 self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
117
118 def update(self, preds: Tensor, target: Tensor) -> None:
119 """Update state with predictions and targets."""
120 pesq_batch = perceptual_evaluation_speech_quality(
121 preds, target, self.fs, self.mode, False, self.n_processes
122 ).to(self.sum_pesq.device)
123
124 self.sum_pesq += pesq_batch.sum()
125 self.total += pesq_batch.numel()
126
127 def compute(self) -> Tensor:
128 """Compute metric."""
129 return self.sum_pesq / self.total
130
131 def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:
132 """Plot a single or multiple values from the metric.
133
134 Args:
135 val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
136 If no value is provided, will automatically call `metric.compute` and plot that result.
137 ax: An matplotlib axis object. If provided will add plot to that axis
138
139 Returns:
140 Figure and Axes object
141
142 Raises:
143 ModuleNotFoundError:
144 If `matplotlib` is not installed
145
146 .. plot::
147 :scale: 75
148
149 >>> # Example plotting a single value
150 >>> import torch
151 >>> from torchmetrics.audio import PerceptualEvaluationSpeechQuality
152 >>> metric = PerceptualEvaluationSpeechQuality(8000, 'nb')
153 >>> metric.update(torch.rand(8000), torch.rand(8000))
154 >>> fig_, ax_ = metric.plot()
155
156 .. plot::
157 :scale: 75
158
159 >>> # Example plotting multiple values
160 >>> import torch
161 >>> from torchmetrics.audio import PerceptualEvaluationSpeechQuality
162 >>> metric = PerceptualEvaluationSpeechQuality(8000, 'nb')
163 >>> values = [ ]
164 >>> for _ in range(10):
165 ... values.append(metric(torch.rand(8000), torch.rand(8000)))
166 >>> fig_, ax_ = metric.plot(values)
167
168 """
169 return self._plot(val, ax)
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/audio/pesq.py b/src/torchmetrics/audio/pesq.py
--- a/src/torchmetrics/audio/pesq.py
+++ b/src/torchmetrics/audio/pesq.py
@@ -43,13 +43,17 @@
As output of `forward` and `compute` the metric returns the following output
- - ``pesq`` (:class:`~torch.Tensor`): float tensor with shape ``(...,)`` of PESQ value per sample
+ - ``pesq`` (:class:`~torch.Tensor`): float tensor of PESQ value reduced across the batch
.. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install
torchmetrics[audio]`` or ``pip install pesq``. ``pesq`` will compile with your currently
installed version of numpy, meaning that if you upgrade numpy at some point in the future you will
most likely have to reinstall ``pesq``.
+ .. note:: the ``forward`` and ``compute`` methods in this class return a single (reduced) PESQ value
+ for a batch. To obtain a PESQ value for each sample, you may use the functional counterpart in
+ :func:`~torchmetrics.functional.audio.pesq.perceptual_evaluation_speech_quality`.
+
Args:
fs: sampling frequency, should be 16000 or 8000 (Hz)
mode: ``'wb'`` (wide-band) or ``'nb'`` (narrow-band)
| {"golden_diff": "diff --git a/src/torchmetrics/audio/pesq.py b/src/torchmetrics/audio/pesq.py\n--- a/src/torchmetrics/audio/pesq.py\n+++ b/src/torchmetrics/audio/pesq.py\n@@ -43,13 +43,17 @@\n \n As output of `forward` and `compute` the metric returns the following output\n \n- - ``pesq`` (:class:`~torch.Tensor`): float tensor with shape ``(...,)`` of PESQ value per sample\n+ - ``pesq`` (:class:`~torch.Tensor`): float tensor of PESQ value reduced across the batch\n \n .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install\n torchmetrics[audio]`` or ``pip install pesq``. ``pesq`` will compile with your currently\n installed version of numpy, meaning that if you upgrade numpy at some point in the future you will\n most likely have to reinstall ``pesq``.\n \n+ .. note:: the ``forward`` and ``compute`` methods in this class return a single (reduced) PESQ value\n+ for a batch. To obtain a PESQ value for each sample, you may use the functional counterpart in\n+ :func:`~torchmetrics.functional.audio.pesq.perceptual_evaluation_speech_quality`.\n+\n Args:\n fs: sampling frequency, should be 16000 or 8000 (Hz)\n mode: ``'wb'`` (wide-band) or ``'nb'`` (narrow-band)\n", "issue": "Inconsistent docs and code output for `PerceptualEvaluationSpeechQuality `\n## \ud83d\udc1b Bug\r\n[related to #1092]\r\n\r\nThe [docs](https://torchmetrics.readthedocs.io/en/stable/audio/perceptual_evaluation_speech_quality.html) for PESQ metric class state that \r\n> As output of _forward_ and _compute_ the metric returns the following output `pesq` (Tensor): float tensor with shape (...,) of PESQ value per sample.\r\n\r\nHowever, I always get one value, no matter the `batch_size`, as if the metric was being averaged.\r\n\r\n### To Reproduce\r\n\r\n```python\r\nimport torch \r\nfrom torchmetrics.audio import PerceptualEvaluationSpeechQuality as PESQ\r\n\r\npesq = PESQ(fs=16000, mode=\"wb\")\r\n\r\n# create data\r\nbatch_size = 4\r\naudio_size = 12345\r\npreds = torch.FloatTensor(batch_size, audio_size).uniform_(-1, 1)\r\ntarget = torch.FloatTensor(batch_size, audio_size).uniform_(-1, 1)\r\nprint(\"preds.shape:\", preds.shape)\r\n#>preds.shape: torch.Size([4, 12345])\r\n\r\n# compute metric\r\npesq_score = pesq.forward(preds, target)\r\nprint(\"pesq_score:\", pesq_score)\r\n#>pesq_score: tensor(1.5049)\r\n```\r\nI expected the output to be of shape `torch.Size([4])`.\r\nSame behaviour for `pesq.update(preds, target)` followed by `pesq.compute()`.\r\n\r\nThe functional counterpart returns a result [as documented](https://torchmetrics.readthedocs.io/en/stable/audio/perceptual_evaluation_speech_quality.html#torchmetrics.functional.audio.pesq.perceptual_evaluation_speech_quality):\r\n```python\r\nimport torch \r\nfrom torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality as F_PESQ\r\n\r\nn = 4\r\npreds = torch.FloatTensor(n, 12345).uniform_(-1, 1)\r\ntarget = torch.FloatTensor(n, 12345).uniform_(-1, 1)\r\n\r\npesq_score = F_PESQ(preds=preds, target=target, fs=16000, mode=\"wb\")\r\nprint(\"pesq_score:\", pesq_score)\r\n#> pesq_score: tensor([1.5882, 1.5080, 1.5149, 1.5997])\r\n```\r\n\r\n### Environment\r\n\r\n```\r\nLinux\r\npython==3.10.12\r\ntorch==2.0.1\r\ntorchmetrics== 1.1.2\r\n```\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Optional, Sequence, Union\n\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _PESQ_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\n__doctest_requires__ = {\"PerceptualEvaluationSpeechQuality\": [\"pesq\"]}\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"PerceptualEvaluationSpeechQuality.plot\"]\n\n\nclass PerceptualEvaluationSpeechQuality(Metric):\n \"\"\"Calculate `Perceptual Evaluation of Speech Quality`_ (PESQ).\n\n It's a recognized industry standard for audio quality that takes into considerations characteristics such as:\n audio sharpness, call volume, background noise, clipping, audio interference etc. PESQ returns a score between\n -0.5 and 4.5 with the higher scores indicating a better quality.\n\n This metric is a wrapper for the `pesq package`_. Note that input will be moved to ``cpu`` to perform the metric\n calculation.\n\n As input to ``forward`` and ``update`` the metric accepts the following input\n\n - ``preds`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``\n - ``target`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``\n\n As output of `forward` and `compute` the metric returns the following output\n\n - ``pesq`` (:class:`~torch.Tensor`): float tensor with shape ``(...,)`` of PESQ value per sample\n\n .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install\n torchmetrics[audio]`` or ``pip install pesq``. ``pesq`` will compile with your currently\n installed version of numpy, meaning that if you upgrade numpy at some point in the future you will\n most likely have to reinstall ``pesq``.\n\n Args:\n fs: sampling frequency, should be 16000 or 8000 (Hz)\n mode: ``'wb'`` (wide-band) or ``'nb'`` (narrow-band)\n keep_same_device: whether to move the pesq value to the device of preds\n n_processes: integer specifying the number of processes to run in parallel for the metric calculation.\n Only applies to batches of data and if ``multiprocessing`` package is installed.\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ModuleNotFoundError:\n If ``pesq`` package is not installed\n ValueError:\n If ``fs`` is not either ``8000`` or ``16000``\n ValueError:\n If ``mode`` is not either ``\"wb\"`` or ``\"nb\"``\n\n Example:\n >>> import torch\n >>> from torchmetrics.audio import PerceptualEvaluationSpeechQuality\n >>> g = torch.manual_seed(1)\n >>> preds = torch.randn(8000)\n >>> target = torch.randn(8000)\n >>> pesq = PerceptualEvaluationSpeechQuality(8000, 'nb')\n >>> pesq(preds, target)\n tensor(2.2076)\n >>> wb_pesq = PerceptualEvaluationSpeechQuality(16000, 'wb')\n >>> wb_pesq(preds, target)\n tensor(1.7359)\n\n \"\"\"\n\n sum_pesq: Tensor\n total: Tensor\n full_state_update: bool = False\n is_differentiable: bool = False\n higher_is_better: bool = True\n plot_lower_bound: float = -0.5\n plot_upper_bound: float = 4.5\n\n def __init__(\n self,\n fs: int,\n mode: str,\n n_processes: int = 1,\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n if not _PESQ_AVAILABLE:\n raise ModuleNotFoundError(\n \"PerceptualEvaluationSpeechQuality metric requires that `pesq` is installed.\"\n \" Either install as `pip install torchmetrics[audio]` or `pip install pesq`.\"\n )\n if fs not in (8000, 16000):\n raise ValueError(f\"Expected argument `fs` to either be 8000 or 16000 but got {fs}\")\n self.fs = fs\n if mode not in (\"wb\", \"nb\"):\n raise ValueError(f\"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}\")\n self.mode = mode\n if not isinstance(n_processes, int) and n_processes <= 0:\n raise ValueError(f\"Expected argument `n_processes` to be an int larger than 0 but got {n_processes}\")\n self.n_processes = n_processes\n\n self.add_state(\"sum_pesq\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n\n def update(self, preds: Tensor, target: Tensor) -> None:\n \"\"\"Update state with predictions and targets.\"\"\"\n pesq_batch = perceptual_evaluation_speech_quality(\n preds, target, self.fs, self.mode, False, self.n_processes\n ).to(self.sum_pesq.device)\n\n self.sum_pesq += pesq_batch.sum()\n self.total += pesq_batch.numel()\n\n def compute(self) -> Tensor:\n \"\"\"Compute metric.\"\"\"\n return self.sum_pesq / self.total\n\n def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.audio import PerceptualEvaluationSpeechQuality\n >>> metric = PerceptualEvaluationSpeechQuality(8000, 'nb')\n >>> metric.update(torch.rand(8000), torch.rand(8000))\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.audio import PerceptualEvaluationSpeechQuality\n >>> metric = PerceptualEvaluationSpeechQuality(8000, 'nb')\n >>> values = [ ]\n >>> for _ in range(10):\n ... values.append(metric(torch.rand(8000), torch.rand(8000)))\n >>> fig_, ax_ = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/audio/pesq.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Optional, Sequence, Union\n\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.audio.pesq import perceptual_evaluation_speech_quality\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _PESQ_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\n__doctest_requires__ = {\"PerceptualEvaluationSpeechQuality\": [\"pesq\"]}\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"PerceptualEvaluationSpeechQuality.plot\"]\n\n\nclass PerceptualEvaluationSpeechQuality(Metric):\n \"\"\"Calculate `Perceptual Evaluation of Speech Quality`_ (PESQ).\n\n It's a recognized industry standard for audio quality that takes into considerations characteristics such as:\n audio sharpness, call volume, background noise, clipping, audio interference etc. PESQ returns a score between\n -0.5 and 4.5 with the higher scores indicating a better quality.\n\n This metric is a wrapper for the `pesq package`_. Note that input will be moved to ``cpu`` to perform the metric\n calculation.\n\n As input to ``forward`` and ``update`` the metric accepts the following input\n\n - ``preds`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``\n - ``target`` (:class:`~torch.Tensor`): float tensor with shape ``(...,time)``\n\n As output of `forward` and `compute` the metric returns the following output\n\n - ``pesq`` (:class:`~torch.Tensor`): float tensor of PESQ value reduced across the batch\n\n .. note:: using this metrics requires you to have ``pesq`` install. Either install as ``pip install\n torchmetrics[audio]`` or ``pip install pesq``. ``pesq`` will compile with your currently\n installed version of numpy, meaning that if you upgrade numpy at some point in the future you will\n most likely have to reinstall ``pesq``.\n\n .. note:: the ``forward`` and ``compute`` methods in this class return a single (reduced) PESQ value\n for a batch. To obtain a PESQ value for each sample, you may use the functional counterpart in\n :func:`~torchmetrics.functional.audio.pesq.perceptual_evaluation_speech_quality`.\n\n Args:\n fs: sampling frequency, should be 16000 or 8000 (Hz)\n mode: ``'wb'`` (wide-band) or ``'nb'`` (narrow-band)\n keep_same_device: whether to move the pesq value to the device of preds\n n_processes: integer specifying the number of processes to run in parallel for the metric calculation.\n Only applies to batches of data and if ``multiprocessing`` package is installed.\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ModuleNotFoundError:\n If ``pesq`` package is not installed\n ValueError:\n If ``fs`` is not either ``8000`` or ``16000``\n ValueError:\n If ``mode`` is not either ``\"wb\"`` or ``\"nb\"``\n\n Example:\n >>> import torch\n >>> from torchmetrics.audio import PerceptualEvaluationSpeechQuality\n >>> g = torch.manual_seed(1)\n >>> preds = torch.randn(8000)\n >>> target = torch.randn(8000)\n >>> pesq = PerceptualEvaluationSpeechQuality(8000, 'nb')\n >>> pesq(preds, target)\n tensor(2.2076)\n >>> wb_pesq = PerceptualEvaluationSpeechQuality(16000, 'wb')\n >>> wb_pesq(preds, target)\n tensor(1.7359)\n\n \"\"\"\n\n sum_pesq: Tensor\n total: Tensor\n full_state_update: bool = False\n is_differentiable: bool = False\n higher_is_better: bool = True\n plot_lower_bound: float = -0.5\n plot_upper_bound: float = 4.5\n\n def __init__(\n self,\n fs: int,\n mode: str,\n n_processes: int = 1,\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n if not _PESQ_AVAILABLE:\n raise ModuleNotFoundError(\n \"PerceptualEvaluationSpeechQuality metric requires that `pesq` is installed.\"\n \" Either install as `pip install torchmetrics[audio]` or `pip install pesq`.\"\n )\n if fs not in (8000, 16000):\n raise ValueError(f\"Expected argument `fs` to either be 8000 or 16000 but got {fs}\")\n self.fs = fs\n if mode not in (\"wb\", \"nb\"):\n raise ValueError(f\"Expected argument `mode` to either be 'wb' or 'nb' but got {mode}\")\n self.mode = mode\n if not isinstance(n_processes, int) and n_processes <= 0:\n raise ValueError(f\"Expected argument `n_processes` to be an int larger than 0 but got {n_processes}\")\n self.n_processes = n_processes\n\n self.add_state(\"sum_pesq\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n\n def update(self, preds: Tensor, target: Tensor) -> None:\n \"\"\"Update state with predictions and targets.\"\"\"\n pesq_batch = perceptual_evaluation_speech_quality(\n preds, target, self.fs, self.mode, False, self.n_processes\n ).to(self.sum_pesq.device)\n\n self.sum_pesq += pesq_batch.sum()\n self.total += pesq_batch.numel()\n\n def compute(self) -> Tensor:\n \"\"\"Compute metric.\"\"\"\n return self.sum_pesq / self.total\n\n def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_AX_TYPE] = None) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.audio import PerceptualEvaluationSpeechQuality\n >>> metric = PerceptualEvaluationSpeechQuality(8000, 'nb')\n >>> metric.update(torch.rand(8000), torch.rand(8000))\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.audio import PerceptualEvaluationSpeechQuality\n >>> metric = PerceptualEvaluationSpeechQuality(8000, 'nb')\n >>> values = [ ]\n >>> for _ in range(10):\n ... values.append(metric(torch.rand(8000), torch.rand(8000)))\n >>> fig_, ax_ = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/audio/pesq.py"}]} | 2,960 | 343 |
gh_patches_debug_19070 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-2339 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Difference in training results between single GPU and multi-GPU
For single GPU and multi-GPU training, the single card is 4% higher than the multi-card training. What is the cause?
Here is my configuration:
<img width="150" alt="image" src="https://user-images.githubusercontent.com/109410944/226503405-f70fa165-2fe6-4c23-b1bd-e4d4e7610c32.png">
<img width="207" alt="image" src="https://user-images.githubusercontent.com/109410944/226503453-a0e1ba34-47ff-482e-86ad-13602c338ef1.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onmt/utils/distributed.py`
Content:
```
1 """ Pytorch Distributed utils
2 This piece of code was heavily inspired by the equivalent of Fairseq-py
3 https://github.com/pytorch/fairseq
4 """
5 import os
6 import signal
7 import math
8 import pickle
9 import torch.distributed
10 from onmt.utils.logging import logger
11
12
13 def is_master(opt, device_id):
14 return opt.gpu_ranks[device_id] == 0
15
16
17 def multi_init(opt, device_id):
18 dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
19 master_ip=opt.master_ip,
20 master_port=opt.master_port)
21 dist_world_size = opt.world_size
22 torch.distributed.init_process_group(
23 backend=opt.gpu_backend, init_method=dist_init_method,
24 world_size=dist_world_size, rank=opt.gpu_ranks[device_id])
25 gpu_rank = torch.distributed.get_rank()
26 if not is_master(opt, device_id):
27 logger.disabled = True
28
29 return gpu_rank
30
31
32 def all_reduce_and_rescale_tensors(tensors, rescale_denom,
33 buffer_size=104857600):
34 """All-reduce and rescale tensors in chunks of the specified size.
35
36 Args:
37 tensors: list of Tensors to all-reduce
38 rescale_denom: denominator for rescaling summed Tensors
39 buffer_size: all-reduce chunk size in bytes
40 """
41 # buffer size in bytes, determine equiv. # of elements based on data type
42 buffer_t = tensors[0].new(
43 math.ceil(buffer_size / tensors[0].element_size())).zero_()
44 buffer = []
45
46 def all_reduce_buffer():
47 # copy tensors into buffer_t
48 offset = 0
49 for t in buffer:
50 numel = t.numel()
51 buffer_t[offset:offset+numel].copy_(t.view(-1))
52 offset += numel
53
54 # all-reduce and rescale
55 torch.distributed.all_reduce(buffer_t[:offset], async_op=True)
56 buffer_t.div_(rescale_denom)
57
58 # copy all-reduced buffer back into tensors
59 offset = 0
60 for t in buffer:
61 numel = t.numel()
62 t.view(-1).copy_(buffer_t[offset:offset+numel])
63 offset += numel
64
65 filled = 0
66 for t in tensors:
67 sz = t.numel() * t.element_size()
68 # print(filled, sz)
69 if sz > buffer_size:
70 # tensor is bigger than buffer, all-reduce and rescale directly
71 torch.distributed.all_reduce(t, async_op=True)
72 t.div_(rescale_denom)
73 elif filled + sz > buffer_size:
74 # buffer is full, all-reduce and replace buffer with grad
75 all_reduce_buffer()
76 buffer = [t]
77 filled = sz
78 else:
79 # add tensor to buffer
80 buffer.append(t)
81 filled += sz
82
83 if len(buffer) > 0:
84 all_reduce_buffer()
85
86
87 def all_gather_list(data, max_size=4096):
88 """Gathers arbitrary data from all nodes into a list."""
89 world_size = torch.distributed.get_world_size()
90 if not hasattr(all_gather_list, '_in_buffer') or \
91 max_size != all_gather_list._in_buffer.size():
92 all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)
93 all_gather_list._out_buffers = [
94 torch.cuda.ByteTensor(max_size)
95 for i in range(world_size)
96 ]
97 in_buffer = all_gather_list._in_buffer
98 out_buffers = all_gather_list._out_buffers
99
100 enc = pickle.dumps(data)
101 enc_size = len(enc)
102 if enc_size + 2 > max_size:
103 raise ValueError(
104 'encoded data exceeds max_size: {}'.format(enc_size + 2))
105 assert max_size < 255*256
106 in_buffer[0] = enc_size // 255 # this encoding works for max_size < 65k
107 in_buffer[1] = enc_size % 255
108 in_buffer[2:enc_size+2] = torch.ByteTensor(list(enc))
109
110 torch.distributed.all_gather(out_buffers, in_buffer.cuda())
111
112 results = []
113 for i in range(world_size):
114 out_buffer = out_buffers[i]
115 size = (255 * out_buffer[0].item()) + out_buffer[1].item()
116
117 bytes_list = bytes(out_buffer[2:size+2].tolist())
118 result = pickle.loads(bytes_list)
119 results.append(result)
120 return results
121
122
123 class ErrorHandler(object):
124 """A class that listens for exceptions in children processes and propagates
125 the tracebacks to the parent process."""
126
127 def __init__(self, error_queue):
128 """ init error handler """
129 import signal
130 import threading
131 self.error_queue = error_queue
132 self.children_pids = []
133 self.error_thread = threading.Thread(
134 target=self.error_listener, daemon=True)
135 self.error_thread.start()
136 signal.signal(signal.SIGUSR1, self.signal_handler)
137
138 def add_child(self, pid):
139 """ error handler """
140 self.children_pids.append(pid)
141
142 def error_listener(self):
143 """ error listener """
144 (rank, original_trace) = self.error_queue.get()
145 self.error_queue.put((rank, original_trace))
146 os.kill(os.getpid(), signal.SIGUSR1)
147
148 def signal_handler(self, signalnum, stackframe):
149 """ signal handler """
150 for pid in self.children_pids:
151 os.kill(pid, signal.SIGINT) # kill children processes
152 (rank, original_trace) = self.error_queue.get()
153 msg = """\n\n-- Tracebacks above this line can probably
154 be ignored --\n\n"""
155 msg += original_trace
156 raise Exception(msg)
157
158
159 def consumer(process_fn, opt, device_id, error_queue): # noqa: E501
160 """Run `process_fn` on `device_id` with data from `batch_queue`."""
161 try:
162 gpu_rank = multi_init(opt, device_id)
163 if gpu_rank != opt.gpu_ranks[device_id]:
164 raise AssertionError("An error occurred in \
165 Distributed initialization")
166 process_fn(opt, device_id=device_id)
167 except KeyboardInterrupt:
168 pass # killed by parent, do nothing
169 except Exception:
170 # propagate exception to parent process, keeping original traceback
171 import traceback
172 error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onmt/utils/distributed.py b/onmt/utils/distributed.py
--- a/onmt/utils/distributed.py
+++ b/onmt/utils/distributed.py
@@ -52,7 +52,7 @@
offset += numel
# all-reduce and rescale
- torch.distributed.all_reduce(buffer_t[:offset], async_op=True)
+ torch.distributed.all_reduce(buffer_t[:offset], async_op=False)
buffer_t.div_(rescale_denom)
# copy all-reduced buffer back into tensors
@@ -68,7 +68,7 @@
# print(filled, sz)
if sz > buffer_size:
# tensor is bigger than buffer, all-reduce and rescale directly
- torch.distributed.all_reduce(t, async_op=True)
+ torch.distributed.all_reduce(t, async_op=False)
t.div_(rescale_denom)
elif filled + sz > buffer_size:
# buffer is full, all-reduce and replace buffer with grad
| {"golden_diff": "diff --git a/onmt/utils/distributed.py b/onmt/utils/distributed.py\n--- a/onmt/utils/distributed.py\n+++ b/onmt/utils/distributed.py\n@@ -52,7 +52,7 @@\n offset += numel\n \n # all-reduce and rescale\n- torch.distributed.all_reduce(buffer_t[:offset], async_op=True)\n+ torch.distributed.all_reduce(buffer_t[:offset], async_op=False)\n buffer_t.div_(rescale_denom)\n \n # copy all-reduced buffer back into tensors\n@@ -68,7 +68,7 @@\n # print(filled, sz)\n if sz > buffer_size:\n # tensor is bigger than buffer, all-reduce and rescale directly\n- torch.distributed.all_reduce(t, async_op=True)\n+ torch.distributed.all_reduce(t, async_op=False)\n t.div_(rescale_denom)\n elif filled + sz > buffer_size:\n # buffer is full, all-reduce and replace buffer with grad\n", "issue": "Difference in training results between single GPU and multi-GPU\nFor single GPU and multi-GPU training, the single card is 4% higher than the multi-card training. What is the cause?\r\nHere is my configuration\uff1a\r\n<img width=\"150\" alt=\"image\" src=\"https://user-images.githubusercontent.com/109410944/226503405-f70fa165-2fe6-4c23-b1bd-e4d4e7610c32.png\">\r\n<img width=\"207\" alt=\"image\" src=\"https://user-images.githubusercontent.com/109410944/226503453-a0e1ba34-47ff-482e-86ad-13602c338ef1.png\">\r\n\n", "before_files": [{"content": "\"\"\" Pytorch Distributed utils\n This piece of code was heavily inspired by the equivalent of Fairseq-py\n https://github.com/pytorch/fairseq\n\"\"\"\nimport os\nimport signal\nimport math\nimport pickle\nimport torch.distributed\nfrom onmt.utils.logging import logger\n\n\ndef is_master(opt, device_id):\n return opt.gpu_ranks[device_id] == 0\n\n\ndef multi_init(opt, device_id):\n dist_init_method = 'tcp://{master_ip}:{master_port}'.format(\n master_ip=opt.master_ip,\n master_port=opt.master_port)\n dist_world_size = opt.world_size\n torch.distributed.init_process_group(\n backend=opt.gpu_backend, init_method=dist_init_method,\n world_size=dist_world_size, rank=opt.gpu_ranks[device_id])\n gpu_rank = torch.distributed.get_rank()\n if not is_master(opt, device_id):\n logger.disabled = True\n\n return gpu_rank\n\n\ndef all_reduce_and_rescale_tensors(tensors, rescale_denom,\n buffer_size=104857600):\n \"\"\"All-reduce and rescale tensors in chunks of the specified size.\n\n Args:\n tensors: list of Tensors to all-reduce\n rescale_denom: denominator for rescaling summed Tensors\n buffer_size: all-reduce chunk size in bytes\n \"\"\"\n # buffer size in bytes, determine equiv. # of elements based on data type\n buffer_t = tensors[0].new(\n math.ceil(buffer_size / tensors[0].element_size())).zero_()\n buffer = []\n\n def all_reduce_buffer():\n # copy tensors into buffer_t\n offset = 0\n for t in buffer:\n numel = t.numel()\n buffer_t[offset:offset+numel].copy_(t.view(-1))\n offset += numel\n\n # all-reduce and rescale\n torch.distributed.all_reduce(buffer_t[:offset], async_op=True)\n buffer_t.div_(rescale_denom)\n\n # copy all-reduced buffer back into tensors\n offset = 0\n for t in buffer:\n numel = t.numel()\n t.view(-1).copy_(buffer_t[offset:offset+numel])\n offset += numel\n\n filled = 0\n for t in tensors:\n sz = t.numel() * t.element_size()\n # print(filled, sz)\n if sz > buffer_size:\n # tensor is bigger than buffer, all-reduce and rescale directly\n torch.distributed.all_reduce(t, async_op=True)\n t.div_(rescale_denom)\n elif filled + sz > buffer_size:\n # buffer is full, all-reduce and replace buffer with grad\n all_reduce_buffer()\n buffer = [t]\n filled = sz\n else:\n # add tensor to buffer\n buffer.append(t)\n filled += sz\n\n if len(buffer) > 0:\n all_reduce_buffer()\n\n\ndef all_gather_list(data, max_size=4096):\n \"\"\"Gathers arbitrary data from all nodes into a list.\"\"\"\n world_size = torch.distributed.get_world_size()\n if not hasattr(all_gather_list, '_in_buffer') or \\\n max_size != all_gather_list._in_buffer.size():\n all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)\n all_gather_list._out_buffers = [\n torch.cuda.ByteTensor(max_size)\n for i in range(world_size)\n ]\n in_buffer = all_gather_list._in_buffer\n out_buffers = all_gather_list._out_buffers\n\n enc = pickle.dumps(data)\n enc_size = len(enc)\n if enc_size + 2 > max_size:\n raise ValueError(\n 'encoded data exceeds max_size: {}'.format(enc_size + 2))\n assert max_size < 255*256\n in_buffer[0] = enc_size // 255 # this encoding works for max_size < 65k\n in_buffer[1] = enc_size % 255\n in_buffer[2:enc_size+2] = torch.ByteTensor(list(enc))\n\n torch.distributed.all_gather(out_buffers, in_buffer.cuda())\n\n results = []\n for i in range(world_size):\n out_buffer = out_buffers[i]\n size = (255 * out_buffer[0].item()) + out_buffer[1].item()\n\n bytes_list = bytes(out_buffer[2:size+2].tolist())\n result = pickle.loads(bytes_list)\n results.append(result)\n return results\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)\n\n\ndef consumer(process_fn, opt, device_id, error_queue): # noqa: E501\n \"\"\"Run `process_fn` on `device_id` with data from `batch_queue`.\"\"\"\n try:\n gpu_rank = multi_init(opt, device_id)\n if gpu_rank != opt.gpu_ranks[device_id]:\n raise AssertionError(\"An error occurred in \\\n Distributed initialization\")\n process_fn(opt, device_id=device_id)\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))\n", "path": "onmt/utils/distributed.py"}], "after_files": [{"content": "\"\"\" Pytorch Distributed utils\n This piece of code was heavily inspired by the equivalent of Fairseq-py\n https://github.com/pytorch/fairseq\n\"\"\"\nimport os\nimport signal\nimport math\nimport pickle\nimport torch.distributed\nfrom onmt.utils.logging import logger\n\n\ndef is_master(opt, device_id):\n return opt.gpu_ranks[device_id] == 0\n\n\ndef multi_init(opt, device_id):\n dist_init_method = 'tcp://{master_ip}:{master_port}'.format(\n master_ip=opt.master_ip,\n master_port=opt.master_port)\n dist_world_size = opt.world_size\n torch.distributed.init_process_group(\n backend=opt.gpu_backend, init_method=dist_init_method,\n world_size=dist_world_size, rank=opt.gpu_ranks[device_id])\n gpu_rank = torch.distributed.get_rank()\n if not is_master(opt, device_id):\n logger.disabled = True\n\n return gpu_rank\n\n\ndef all_reduce_and_rescale_tensors(tensors, rescale_denom,\n buffer_size=104857600):\n \"\"\"All-reduce and rescale tensors in chunks of the specified size.\n\n Args:\n tensors: list of Tensors to all-reduce\n rescale_denom: denominator for rescaling summed Tensors\n buffer_size: all-reduce chunk size in bytes\n \"\"\"\n # buffer size in bytes, determine equiv. # of elements based on data type\n buffer_t = tensors[0].new(\n math.ceil(buffer_size / tensors[0].element_size())).zero_()\n buffer = []\n\n def all_reduce_buffer():\n # copy tensors into buffer_t\n offset = 0\n for t in buffer:\n numel = t.numel()\n buffer_t[offset:offset+numel].copy_(t.view(-1))\n offset += numel\n\n # all-reduce and rescale\n torch.distributed.all_reduce(buffer_t[:offset], async_op=False)\n buffer_t.div_(rescale_denom)\n\n # copy all-reduced buffer back into tensors\n offset = 0\n for t in buffer:\n numel = t.numel()\n t.view(-1).copy_(buffer_t[offset:offset+numel])\n offset += numel\n\n filled = 0\n for t in tensors:\n sz = t.numel() * t.element_size()\n # print(filled, sz)\n if sz > buffer_size:\n # tensor is bigger than buffer, all-reduce and rescale directly\n torch.distributed.all_reduce(t, async_op=False)\n t.div_(rescale_denom)\n elif filled + sz > buffer_size:\n # buffer is full, all-reduce and replace buffer with grad\n all_reduce_buffer()\n buffer = [t]\n filled = sz\n else:\n # add tensor to buffer\n buffer.append(t)\n filled += sz\n\n if len(buffer) > 0:\n all_reduce_buffer()\n\n\ndef all_gather_list(data, max_size=4096):\n \"\"\"Gathers arbitrary data from all nodes into a list.\"\"\"\n world_size = torch.distributed.get_world_size()\n if not hasattr(all_gather_list, '_in_buffer') or \\\n max_size != all_gather_list._in_buffer.size():\n all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)\n all_gather_list._out_buffers = [\n torch.cuda.ByteTensor(max_size)\n for i in range(world_size)\n ]\n in_buffer = all_gather_list._in_buffer\n out_buffers = all_gather_list._out_buffers\n\n enc = pickle.dumps(data)\n enc_size = len(enc)\n if enc_size + 2 > max_size:\n raise ValueError(\n 'encoded data exceeds max_size: {}'.format(enc_size + 2))\n assert max_size < 255*256\n in_buffer[0] = enc_size // 255 # this encoding works for max_size < 65k\n in_buffer[1] = enc_size % 255\n in_buffer[2:enc_size+2] = torch.ByteTensor(list(enc))\n\n torch.distributed.all_gather(out_buffers, in_buffer.cuda())\n\n results = []\n for i in range(world_size):\n out_buffer = out_buffers[i]\n size = (255 * out_buffer[0].item()) + out_buffer[1].item()\n\n bytes_list = bytes(out_buffer[2:size+2].tolist())\n result = pickle.loads(bytes_list)\n results.append(result)\n return results\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)\n\n\ndef consumer(process_fn, opt, device_id, error_queue): # noqa: E501\n \"\"\"Run `process_fn` on `device_id` with data from `batch_queue`.\"\"\"\n try:\n gpu_rank = multi_init(opt, device_id)\n if gpu_rank != opt.gpu_ranks[device_id]:\n raise AssertionError(\"An error occurred in \\\n Distributed initialization\")\n process_fn(opt, device_id=device_id)\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))\n", "path": "onmt/utils/distributed.py"}]} | 2,266 | 218 |
gh_patches_debug_14695 | rasdani/github-patches | git_diff | Netflix__lemur-142 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SubCA autogenerated descriptions for their certs are incorrect
If you create a root CA, and look up the certificate for that CA its description is:
This is the ROOT certificate for the $CN certificate authority.
If you create a subCA off of that rootCA, and look up the certificate for that SubCA its description is:
This is the ROOT certificate for the $CN certificate authority
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lemur/authorities/service.py`
Content:
```
1 """
2 .. module: lemur.authorities.service
3 :platform: Unix
4 :synopsis: This module contains all of the services level functions used to
5 administer authorities in Lemur
6 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
7 :license: Apache, see LICENSE for more details.
8 .. moduleauthor:: Kevin Glisson <[email protected]>
9
10 """
11 from flask import g
12 from flask import current_app
13
14 from lemur import database
15 from lemur.authorities.models import Authority
16 from lemur.roles import service as role_service
17 from lemur.notifications import service as notification_service
18
19 from lemur.roles.models import Role
20 from lemur.certificates.models import Certificate
21
22 from lemur.plugins.base import plugins
23
24
25 def update(authority_id, description=None, owner=None, active=None, roles=None):
26 """
27 Update a an authority with new values.
28
29 :param authority_id:
30 :param roles: roles that are allowed to use this authority
31 :rtype : Authority
32 :return:
33 """
34 authority = get(authority_id)
35 if roles:
36 authority = database.update_list(authority, 'roles', Role, roles)
37
38 if active:
39 authority.active = active
40
41 authority.description = description
42 authority.owner = owner
43 return database.update(authority)
44
45
46 def create(kwargs):
47 """
48 Create a new authority.
49
50 :rtype : Authority
51 :return:
52 """
53
54 issuer = plugins.get(kwargs.get('pluginName'))
55
56 kwargs['creator'] = g.current_user.email
57 cert_body, intermediate, issuer_roles = issuer.create_authority(kwargs)
58
59 cert = Certificate(cert_body, chain=intermediate)
60 cert.owner = kwargs['ownerEmail']
61 cert.description = "This is the ROOT certificate for the {0} certificate authority".format(kwargs.get('caName'))
62 cert.user = g.current_user
63
64 cert.notifications = notification_service.create_default_expiration_notifications(
65 'DEFAULT_SECURITY',
66 current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')
67 )
68
69 # we create and attach any roles that the issuer gives us
70 role_objs = []
71 for r in issuer_roles:
72
73 role = role_service.create(
74 r['name'],
75 password=r['password'],
76 description="{0} auto generated role".format(kwargs.get('pluginName')),
77 username=r['username'])
78
79 # the user creating the authority should be able to administer it
80 if role.username == 'admin':
81 g.current_user.roles.append(role)
82
83 role_objs.append(role)
84
85 authority = Authority(
86 kwargs.get('caName'),
87 kwargs['ownerEmail'],
88 kwargs['pluginName'],
89 cert_body,
90 description=kwargs['caDescription'],
91 chain=intermediate,
92 roles=role_objs
93 )
94
95 database.update(cert)
96 authority = database.create(authority)
97
98 g.current_user.authorities.append(authority)
99
100 return authority
101
102
103 def get_all():
104 """
105 Get all authorities that are currently in Lemur.
106
107 :rtype : List
108 :return:
109 """
110 query = database.session_query(Authority)
111 return database.find_all(query, Authority, {}).all()
112
113
114 def get(authority_id):
115 """
116 Retrieves an authority given it's ID
117
118 :rtype : Authority
119 :param authority_id:
120 :return:
121 """
122 return database.get(Authority, authority_id)
123
124
125 def get_by_name(authority_name):
126 """
127 Retrieves an authority given it's name.
128
129 :param authority_name:
130 :rtype : Authority
131 :return:
132 """
133 return database.get(Authority, authority_name, field='name')
134
135
136 def get_authority_role(ca_name):
137 """
138 Attempts to get the authority role for a given ca uses current_user
139 as a basis for accomplishing that.
140
141 :param ca_name:
142 """
143 if g.current_user.is_admin:
144 authority = get_by_name(ca_name)
145 # TODO we should pick admin ca roles for admin
146 return authority.roles[0]
147 else:
148 for role in g.current_user.roles:
149 if role.authority:
150 if role.authority.name == ca_name:
151 return role
152
153
154 def render(args):
155 """
156 Helper that helps us render the REST Api responses.
157 :param args:
158 :return:
159 """
160 query = database.session_query(Authority)
161 sort_by = args.pop('sort_by')
162 sort_dir = args.pop('sort_dir')
163 page = args.pop('page')
164 count = args.pop('count')
165 filt = args.pop('filter')
166
167 if filt:
168 terms = filt.split(';')
169 if 'active' in filt: # this is really weird but strcmp seems to not work here??
170 query = query.filter(Authority.active == terms[1])
171 else:
172 query = database.filter(query, Authority, terms)
173
174 # we make sure that a user can only use an authority they either own are are a member of - admins can see all
175 if not g.current_user.is_admin:
176 authority_ids = []
177 for role in g.current_user.roles:
178 if role.authority:
179 authority_ids.append(role.authority.id)
180 query = query.filter(Authority.id.in_(authority_ids))
181
182 query = database.find_all(query, Authority, args)
183
184 if sort_by and sort_dir:
185 query = database.sort(query, Authority, sort_by, sort_dir)
186
187 return database.paginate(query, page, count)
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lemur/authorities/service.py b/lemur/authorities/service.py
--- a/lemur/authorities/service.py
+++ b/lemur/authorities/service.py
@@ -58,7 +58,15 @@
cert = Certificate(cert_body, chain=intermediate)
cert.owner = kwargs['ownerEmail']
- cert.description = "This is the ROOT certificate for the {0} certificate authority".format(kwargs.get('caName'))
+
+ if kwargs['caType'] == 'subca':
+ cert.description = "This is the ROOT certificate for the {0} sub certificate authority the parent \
+ authority is {1}.".format(kwargs.get('caName'), kwargs.get('caParent'))
+ else:
+ cert.description = "This is the ROOT certificate for the {0} certificate authority.".format(
+ kwargs.get('caName')
+ )
+
cert.user = g.current_user
cert.notifications = notification_service.create_default_expiration_notifications(
| {"golden_diff": "diff --git a/lemur/authorities/service.py b/lemur/authorities/service.py\n--- a/lemur/authorities/service.py\n+++ b/lemur/authorities/service.py\n@@ -58,7 +58,15 @@\n \n cert = Certificate(cert_body, chain=intermediate)\n cert.owner = kwargs['ownerEmail']\n- cert.description = \"This is the ROOT certificate for the {0} certificate authority\".format(kwargs.get('caName'))\n+\n+ if kwargs['caType'] == 'subca':\n+ cert.description = \"This is the ROOT certificate for the {0} sub certificate authority the parent \\\n+ authority is {1}.\".format(kwargs.get('caName'), kwargs.get('caParent'))\n+ else:\n+ cert.description = \"This is the ROOT certificate for the {0} certificate authority.\".format(\n+ kwargs.get('caName')\n+ )\n+\n cert.user = g.current_user\n \n cert.notifications = notification_service.create_default_expiration_notifications(\n", "issue": "SubCA autogenerated descriptions for their certs are incorrect\nIf you create a root CA, and look up the certificate for that CA its description is: \nThis is the ROOT certificate for the $CN certificate authority.\nIf you create a subCA off of that rootCA, and look up the certificate for that SubCA its description is: \nThis is the ROOT certificate for the $CN certificate authority\n\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.authorities.service\n :platform: Unix\n :synopsis: This module contains all of the services level functions used to\n administer authorities in Lemur\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nfrom flask import g\nfrom flask import current_app\n\nfrom lemur import database\nfrom lemur.authorities.models import Authority\nfrom lemur.roles import service as role_service\nfrom lemur.notifications import service as notification_service\n\nfrom lemur.roles.models import Role\nfrom lemur.certificates.models import Certificate\n\nfrom lemur.plugins.base import plugins\n\n\ndef update(authority_id, description=None, owner=None, active=None, roles=None):\n \"\"\"\n Update a an authority with new values.\n\n :param authority_id:\n :param roles: roles that are allowed to use this authority\n :rtype : Authority\n :return:\n \"\"\"\n authority = get(authority_id)\n if roles:\n authority = database.update_list(authority, 'roles', Role, roles)\n\n if active:\n authority.active = active\n\n authority.description = description\n authority.owner = owner\n return database.update(authority)\n\n\ndef create(kwargs):\n \"\"\"\n Create a new authority.\n\n :rtype : Authority\n :return:\n \"\"\"\n\n issuer = plugins.get(kwargs.get('pluginName'))\n\n kwargs['creator'] = g.current_user.email\n cert_body, intermediate, issuer_roles = issuer.create_authority(kwargs)\n\n cert = Certificate(cert_body, chain=intermediate)\n cert.owner = kwargs['ownerEmail']\n cert.description = \"This is the ROOT certificate for the {0} certificate authority\".format(kwargs.get('caName'))\n cert.user = g.current_user\n\n cert.notifications = notification_service.create_default_expiration_notifications(\n 'DEFAULT_SECURITY',\n current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')\n )\n\n # we create and attach any roles that the issuer gives us\n role_objs = []\n for r in issuer_roles:\n\n role = role_service.create(\n r['name'],\n password=r['password'],\n description=\"{0} auto generated role\".format(kwargs.get('pluginName')),\n username=r['username'])\n\n # the user creating the authority should be able to administer it\n if role.username == 'admin':\n g.current_user.roles.append(role)\n\n role_objs.append(role)\n\n authority = Authority(\n kwargs.get('caName'),\n kwargs['ownerEmail'],\n kwargs['pluginName'],\n cert_body,\n description=kwargs['caDescription'],\n chain=intermediate,\n roles=role_objs\n )\n\n database.update(cert)\n authority = database.create(authority)\n\n g.current_user.authorities.append(authority)\n\n return authority\n\n\ndef get_all():\n \"\"\"\n Get all authorities that are currently in Lemur.\n\n :rtype : List\n :return:\n \"\"\"\n query = database.session_query(Authority)\n return database.find_all(query, Authority, {}).all()\n\n\ndef get(authority_id):\n \"\"\"\n Retrieves an authority given it's ID\n\n :rtype : Authority\n :param authority_id:\n :return:\n \"\"\"\n return database.get(Authority, authority_id)\n\n\ndef get_by_name(authority_name):\n \"\"\"\n Retrieves an authority given it's name.\n\n :param authority_name:\n :rtype : Authority\n :return:\n \"\"\"\n return database.get(Authority, authority_name, field='name')\n\n\ndef get_authority_role(ca_name):\n \"\"\"\n Attempts to get the authority role for a given ca uses current_user\n as a basis for accomplishing that.\n\n :param ca_name:\n \"\"\"\n if g.current_user.is_admin:\n authority = get_by_name(ca_name)\n # TODO we should pick admin ca roles for admin\n return authority.roles[0]\n else:\n for role in g.current_user.roles:\n if role.authority:\n if role.authority.name == ca_name:\n return role\n\n\ndef render(args):\n \"\"\"\n Helper that helps us render the REST Api responses.\n :param args:\n :return:\n \"\"\"\n query = database.session_query(Authority)\n sort_by = args.pop('sort_by')\n sort_dir = args.pop('sort_dir')\n page = args.pop('page')\n count = args.pop('count')\n filt = args.pop('filter')\n\n if filt:\n terms = filt.split(';')\n if 'active' in filt: # this is really weird but strcmp seems to not work here??\n query = query.filter(Authority.active == terms[1])\n else:\n query = database.filter(query, Authority, terms)\n\n # we make sure that a user can only use an authority they either own are are a member of - admins can see all\n if not g.current_user.is_admin:\n authority_ids = []\n for role in g.current_user.roles:\n if role.authority:\n authority_ids.append(role.authority.id)\n query = query.filter(Authority.id.in_(authority_ids))\n\n query = database.find_all(query, Authority, args)\n\n if sort_by and sort_dir:\n query = database.sort(query, Authority, sort_by, sort_dir)\n\n return database.paginate(query, page, count)\n", "path": "lemur/authorities/service.py"}], "after_files": [{"content": "\"\"\"\n.. module: lemur.authorities.service\n :platform: Unix\n :synopsis: This module contains all of the services level functions used to\n administer authorities in Lemur\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nfrom flask import g\nfrom flask import current_app\n\nfrom lemur import database\nfrom lemur.authorities.models import Authority\nfrom lemur.roles import service as role_service\nfrom lemur.notifications import service as notification_service\n\nfrom lemur.roles.models import Role\nfrom lemur.certificates.models import Certificate\n\nfrom lemur.plugins.base import plugins\n\n\ndef update(authority_id, description=None, owner=None, active=None, roles=None):\n \"\"\"\n Update a an authority with new values.\n\n :param authority_id:\n :param roles: roles that are allowed to use this authority\n :rtype : Authority\n :return:\n \"\"\"\n authority = get(authority_id)\n if roles:\n authority = database.update_list(authority, 'roles', Role, roles)\n\n if active:\n authority.active = active\n\n authority.description = description\n authority.owner = owner\n return database.update(authority)\n\n\ndef create(kwargs):\n \"\"\"\n Create a new authority.\n\n :rtype : Authority\n :return:\n \"\"\"\n\n issuer = plugins.get(kwargs.get('pluginName'))\n\n kwargs['creator'] = g.current_user.email\n cert_body, intermediate, issuer_roles = issuer.create_authority(kwargs)\n\n cert = Certificate(cert_body, chain=intermediate)\n cert.owner = kwargs['ownerEmail']\n\n if kwargs['caType'] == 'subca':\n cert.description = \"This is the ROOT certificate for the {0} sub certificate authority the parent \\\n authority is {1}.\".format(kwargs.get('caName'), kwargs.get('caParent'))\n else:\n cert.description = \"This is the ROOT certificate for the {0} certificate authority.\".format(\n kwargs.get('caName')\n )\n\n cert.user = g.current_user\n\n cert.notifications = notification_service.create_default_expiration_notifications(\n 'DEFAULT_SECURITY',\n current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')\n )\n\n # we create and attach any roles that the issuer gives us\n role_objs = []\n for r in issuer_roles:\n\n role = role_service.create(\n r['name'],\n password=r['password'],\n description=\"{0} auto generated role\".format(kwargs.get('pluginName')),\n username=r['username'])\n\n # the user creating the authority should be able to administer it\n if role.username == 'admin':\n g.current_user.roles.append(role)\n\n role_objs.append(role)\n\n authority = Authority(\n kwargs.get('caName'),\n kwargs['ownerEmail'],\n kwargs['pluginName'],\n cert_body,\n description=kwargs['caDescription'],\n chain=intermediate,\n roles=role_objs\n )\n\n database.update(cert)\n authority = database.create(authority)\n\n g.current_user.authorities.append(authority)\n\n return authority\n\n\ndef get_all():\n \"\"\"\n Get all authorities that are currently in Lemur.\n\n :rtype : List\n :return:\n \"\"\"\n query = database.session_query(Authority)\n return database.find_all(query, Authority, {}).all()\n\n\ndef get(authority_id):\n \"\"\"\n Retrieves an authority given it's ID\n\n :rtype : Authority\n :param authority_id:\n :return:\n \"\"\"\n return database.get(Authority, authority_id)\n\n\ndef get_by_name(authority_name):\n \"\"\"\n Retrieves an authority given it's name.\n\n :param authority_name:\n :rtype : Authority\n :return:\n \"\"\"\n return database.get(Authority, authority_name, field='name')\n\n\ndef get_authority_role(ca_name):\n \"\"\"\n Attempts to get the authority role for a given ca uses current_user\n as a basis for accomplishing that.\n\n :param ca_name:\n \"\"\"\n if g.current_user.is_admin:\n authority = get_by_name(ca_name)\n # TODO we should pick admin ca roles for admin\n return authority.roles[0]\n else:\n for role in g.current_user.roles:\n if role.authority:\n if role.authority.name == ca_name:\n return role\n\n\ndef render(args):\n \"\"\"\n Helper that helps us render the REST Api responses.\n :param args:\n :return:\n \"\"\"\n query = database.session_query(Authority)\n sort_by = args.pop('sort_by')\n sort_dir = args.pop('sort_dir')\n page = args.pop('page')\n count = args.pop('count')\n filt = args.pop('filter')\n\n if filt:\n terms = filt.split(';')\n if 'active' in filt: # this is really weird but strcmp seems to not work here??\n query = query.filter(Authority.active == terms[1])\n else:\n query = database.filter(query, Authority, terms)\n\n # we make sure that a user can only use an authority they either own are are a member of - admins can see all\n if not g.current_user.is_admin:\n authority_ids = []\n for role in g.current_user.roles:\n if role.authority:\n authority_ids.append(role.authority.id)\n query = query.filter(Authority.id.in_(authority_ids))\n\n query = database.find_all(query, Authority, args)\n\n if sort_by and sort_dir:\n query = database.sort(query, Authority, sort_by, sort_dir)\n\n return database.paginate(query, page, count)\n", "path": "lemur/authorities/service.py"}]} | 1,972 | 219 |
gh_patches_debug_26002 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2493 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OTLP HTTP Exporter should append `v1/{signal}` to the endpoint URL when non-per-signal env var is used
Per the spec re: [Endpoint URLs for OTLP/HTTP](https://github.com/open-telemetry/opentelemetry-specification/blob/b7473b5de0f55f921f896948442ebb274f58b584/specification/protocol/exporter.md#per-signal-urls), when the non-per-signal endpoint env var (`OTEL_EXPORTER_OTLP_ENDPOINT`) is set, the exporter *must* construct per-signal URLs (either `v1/traces` or `v1/metrics`).
Currently, the [exporter does not do this](https://github.com/open-telemetry/opentelemetry-python/blob/80f5a20ba8f3a71450fe3020fecf362fedb76bff/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py#L68); `v1/traces` must be manually added to the end point when setting only `OTEL_EXPORTER_OTLP_ENDPOINT`. Not doing so produces a 404 error when attempting to export spans.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import gzip
16 import logging
17 import zlib
18 from io import BytesIO
19 from os import environ
20 from typing import Dict, Optional
21 from time import sleep
22
23 import requests
24 from backoff import expo
25
26 from opentelemetry.sdk.environment_variables import (
27 OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,
28 OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,
29 OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
30 OTEL_EXPORTER_OTLP_TRACES_HEADERS,
31 OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,
32 OTEL_EXPORTER_OTLP_CERTIFICATE,
33 OTEL_EXPORTER_OTLP_COMPRESSION,
34 OTEL_EXPORTER_OTLP_ENDPOINT,
35 OTEL_EXPORTER_OTLP_HEADERS,
36 OTEL_EXPORTER_OTLP_TIMEOUT,
37 )
38 from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
39 from opentelemetry.exporter.otlp.proto.http import Compression
40 from opentelemetry.exporter.otlp.proto.http.trace_exporter.encoder import (
41 _ProtobufEncoder,
42 )
43 from opentelemetry.util.re import parse_headers
44
45
46 _logger = logging.getLogger(__name__)
47
48
49 DEFAULT_COMPRESSION = Compression.NoCompression
50 DEFAULT_ENDPOINT = "http://localhost:4318/v1/traces"
51 DEFAULT_TIMEOUT = 10 # in seconds
52
53
54 class OTLPSpanExporter(SpanExporter):
55
56 _MAX_RETRY_TIMEOUT = 64
57
58 def __init__(
59 self,
60 endpoint: Optional[str] = None,
61 certificate_file: Optional[str] = None,
62 headers: Optional[Dict[str, str]] = None,
63 timeout: Optional[int] = None,
64 compression: Optional[Compression] = None,
65 ):
66 self._endpoint = endpoint or environ.get(
67 OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
68 environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT),
69 )
70 self._certificate_file = certificate_file or environ.get(
71 OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,
72 environ.get(OTEL_EXPORTER_OTLP_CERTIFICATE, True),
73 )
74 headers_string = environ.get(
75 OTEL_EXPORTER_OTLP_TRACES_HEADERS,
76 environ.get(OTEL_EXPORTER_OTLP_HEADERS, ""),
77 )
78 self._headers = headers or parse_headers(headers_string)
79 self._timeout = timeout or int(
80 environ.get(
81 OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,
82 environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, DEFAULT_TIMEOUT),
83 )
84 )
85 self._compression = compression or _compression_from_env()
86 self._session = requests.Session()
87 self._session.headers.update(self._headers)
88 self._session.headers.update(
89 {"Content-Type": _ProtobufEncoder._CONTENT_TYPE}
90 )
91 if self._compression is not Compression.NoCompression:
92 self._session.headers.update(
93 {"Content-Encoding": self._compression.value}
94 )
95 self._shutdown = False
96
97 def _export(self, serialized_data: str):
98 data = serialized_data
99 if self._compression == Compression.Gzip:
100 gzip_data = BytesIO()
101 with gzip.GzipFile(fileobj=gzip_data, mode="w") as gzip_stream:
102 gzip_stream.write(serialized_data)
103 data = gzip_data.getvalue()
104 elif self._compression == Compression.Deflate:
105 data = zlib.compress(bytes(serialized_data))
106
107 return self._session.post(
108 url=self._endpoint,
109 data=data,
110 verify=self._certificate_file,
111 timeout=self._timeout,
112 )
113
114 @staticmethod
115 def _retryable(resp: requests.Response) -> bool:
116 if resp.status_code == 408:
117 return True
118 if resp.status_code >= 500 and resp.status_code <= 599:
119 return True
120 return False
121
122 def export(self, spans) -> SpanExportResult:
123 # After the call to Shutdown subsequent calls to Export are
124 # not allowed and should return a Failure result.
125 if self._shutdown:
126 _logger.warning("Exporter already shutdown, ignoring batch")
127 return SpanExportResult.FAILURE
128
129 serialized_data = _ProtobufEncoder.serialize(spans)
130
131 for delay in expo(max_value=self._MAX_RETRY_TIMEOUT):
132
133 if delay == self._MAX_RETRY_TIMEOUT:
134 return SpanExportResult.FAILURE
135
136 resp = self._export(serialized_data)
137 # pylint: disable=no-else-return
138 if resp.status_code in (200, 202):
139 return SpanExportResult.SUCCESS
140 elif self._retryable(resp):
141 _logger.warning(
142 "Transient error %s encountered while exporting span batch, retrying in %ss.",
143 resp.reason,
144 delay,
145 )
146 sleep(delay)
147 continue
148 else:
149 _logger.error(
150 "Failed to export batch code: %s, reason: %s",
151 resp.status_code,
152 resp.text,
153 )
154 return SpanExportResult.FAILURE
155 return SpanExportResult.FAILURE
156
157 def shutdown(self):
158 if self._shutdown:
159 _logger.warning("Exporter already shutdown, ignoring call")
160 return
161 self._session.close()
162 self._shutdown = True
163
164
165 def _compression_from_env() -> Compression:
166 compression = (
167 environ.get(
168 OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,
169 environ.get(OTEL_EXPORTER_OTLP_COMPRESSION, "none"),
170 )
171 .lower()
172 .strip()
173 )
174 return Compression(compression)
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py
--- a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py
+++ b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py
@@ -47,7 +47,8 @@
DEFAULT_COMPRESSION = Compression.NoCompression
-DEFAULT_ENDPOINT = "http://localhost:4318/v1/traces"
+DEFAULT_ENDPOINT = "http://localhost:4318/"
+DEFAULT_TRACES_EXPORT_PATH = "v1/traces"
DEFAULT_TIMEOUT = 10 # in seconds
@@ -65,7 +66,9 @@
):
self._endpoint = endpoint or environ.get(
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
- environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT),
+ _append_trace_path(
+ environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT)
+ ),
)
self._certificate_file = certificate_file or environ.get(
OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,
@@ -172,3 +175,9 @@
.strip()
)
return Compression(compression)
+
+
+def _append_trace_path(endpoint: str) -> str:
+ if endpoint.endswith("/"):
+ return endpoint + DEFAULT_TRACES_EXPORT_PATH
+ return endpoint + f"/{DEFAULT_TRACES_EXPORT_PATH}"
| {"golden_diff": "diff --git a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py\n--- a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py\n+++ b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py\n@@ -47,7 +47,8 @@\n \n \n DEFAULT_COMPRESSION = Compression.NoCompression\n-DEFAULT_ENDPOINT = \"http://localhost:4318/v1/traces\"\n+DEFAULT_ENDPOINT = \"http://localhost:4318/\"\n+DEFAULT_TRACES_EXPORT_PATH = \"v1/traces\"\n DEFAULT_TIMEOUT = 10 # in seconds\n \n \n@@ -65,7 +66,9 @@\n ):\n self._endpoint = endpoint or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n- environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT),\n+ _append_trace_path(\n+ environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT)\n+ ),\n )\n self._certificate_file = certificate_file or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n@@ -172,3 +175,9 @@\n .strip()\n )\n return Compression(compression)\n+\n+\n+def _append_trace_path(endpoint: str) -> str:\n+ if endpoint.endswith(\"/\"):\n+ return endpoint + DEFAULT_TRACES_EXPORT_PATH\n+ return endpoint + f\"/{DEFAULT_TRACES_EXPORT_PATH}\"\n", "issue": "OTLP HTTP Exporter should append `v1/{signal}` to the endpoint URL when non-per-signal env var is used\nPer the spec re: [Endpoint URLs for OTLP/HTTP](https://github.com/open-telemetry/opentelemetry-specification/blob/b7473b5de0f55f921f896948442ebb274f58b584/specification/protocol/exporter.md#per-signal-urls), when the non-per-signal endpoint env var (`OTEL_EXPORTER_OTLP_ENDPOINT`) is set, the exporter *must* construct per-signal URLs (either `v1/traces` or `v1/metrics`). \r\n\r\nCurrently, the [exporter does not do this](https://github.com/open-telemetry/opentelemetry-python/blob/80f5a20ba8f3a71450fe3020fecf362fedb76bff/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py#L68); `v1/traces` must be manually added to the end point when setting only `OTEL_EXPORTER_OTLP_ENDPOINT`. Not doing so produces a 404 error when attempting to export spans. \r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gzip\nimport logging\nimport zlib\nfrom io import BytesIO\nfrom os import environ\nfrom typing import Dict, Optional\nfrom time import sleep\n\nimport requests\nfrom backoff import expo\n\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n OTEL_EXPORTER_OTLP_TRACES_HEADERS,\n OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,\n OTEL_EXPORTER_OTLP_CERTIFICATE,\n OTEL_EXPORTER_OTLP_COMPRESSION,\n OTEL_EXPORTER_OTLP_ENDPOINT,\n OTEL_EXPORTER_OTLP_HEADERS,\n OTEL_EXPORTER_OTLP_TIMEOUT,\n)\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.exporter.otlp.proto.http import Compression\nfrom opentelemetry.exporter.otlp.proto.http.trace_exporter.encoder import (\n _ProtobufEncoder,\n)\nfrom opentelemetry.util.re import parse_headers\n\n\n_logger = logging.getLogger(__name__)\n\n\nDEFAULT_COMPRESSION = Compression.NoCompression\nDEFAULT_ENDPOINT = \"http://localhost:4318/v1/traces\"\nDEFAULT_TIMEOUT = 10 # in seconds\n\n\nclass OTLPSpanExporter(SpanExporter):\n\n _MAX_RETRY_TIMEOUT = 64\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n certificate_file: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n self._endpoint = endpoint or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT),\n )\n self._certificate_file = certificate_file or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n environ.get(OTEL_EXPORTER_OTLP_CERTIFICATE, True),\n )\n headers_string = environ.get(\n OTEL_EXPORTER_OTLP_TRACES_HEADERS,\n environ.get(OTEL_EXPORTER_OTLP_HEADERS, \"\"),\n )\n self._headers = headers or parse_headers(headers_string)\n self._timeout = timeout or int(\n environ.get(\n OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,\n environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, DEFAULT_TIMEOUT),\n )\n )\n self._compression = compression or _compression_from_env()\n self._session = requests.Session()\n self._session.headers.update(self._headers)\n self._session.headers.update(\n {\"Content-Type\": _ProtobufEncoder._CONTENT_TYPE}\n )\n if self._compression is not Compression.NoCompression:\n self._session.headers.update(\n {\"Content-Encoding\": self._compression.value}\n )\n self._shutdown = False\n\n def _export(self, serialized_data: str):\n data = serialized_data\n if self._compression == Compression.Gzip:\n gzip_data = BytesIO()\n with gzip.GzipFile(fileobj=gzip_data, mode=\"w\") as gzip_stream:\n gzip_stream.write(serialized_data)\n data = gzip_data.getvalue()\n elif self._compression == Compression.Deflate:\n data = zlib.compress(bytes(serialized_data))\n\n return self._session.post(\n url=self._endpoint,\n data=data,\n verify=self._certificate_file,\n timeout=self._timeout,\n )\n\n @staticmethod\n def _retryable(resp: requests.Response) -> bool:\n if resp.status_code == 408:\n return True\n if resp.status_code >= 500 and resp.status_code <= 599:\n return True\n return False\n\n def export(self, spans) -> SpanExportResult:\n # After the call to Shutdown subsequent calls to Export are\n # not allowed and should return a Failure result.\n if self._shutdown:\n _logger.warning(\"Exporter already shutdown, ignoring batch\")\n return SpanExportResult.FAILURE\n\n serialized_data = _ProtobufEncoder.serialize(spans)\n\n for delay in expo(max_value=self._MAX_RETRY_TIMEOUT):\n\n if delay == self._MAX_RETRY_TIMEOUT:\n return SpanExportResult.FAILURE\n\n resp = self._export(serialized_data)\n # pylint: disable=no-else-return\n if resp.status_code in (200, 202):\n return SpanExportResult.SUCCESS\n elif self._retryable(resp):\n _logger.warning(\n \"Transient error %s encountered while exporting span batch, retrying in %ss.\",\n resp.reason,\n delay,\n )\n sleep(delay)\n continue\n else:\n _logger.error(\n \"Failed to export batch code: %s, reason: %s\",\n resp.status_code,\n resp.text,\n )\n return SpanExportResult.FAILURE\n return SpanExportResult.FAILURE\n\n def shutdown(self):\n if self._shutdown:\n _logger.warning(\"Exporter already shutdown, ignoring call\")\n return\n self._session.close()\n self._shutdown = True\n\n\ndef _compression_from_env() -> Compression:\n compression = (\n environ.get(\n OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,\n environ.get(OTEL_EXPORTER_OTLP_COMPRESSION, \"none\"),\n )\n .lower()\n .strip()\n )\n return Compression(compression)\n", "path": "exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gzip\nimport logging\nimport zlib\nfrom io import BytesIO\nfrom os import environ\nfrom typing import Dict, Optional\nfrom time import sleep\n\nimport requests\nfrom backoff import expo\n\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n OTEL_EXPORTER_OTLP_TRACES_HEADERS,\n OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,\n OTEL_EXPORTER_OTLP_CERTIFICATE,\n OTEL_EXPORTER_OTLP_COMPRESSION,\n OTEL_EXPORTER_OTLP_ENDPOINT,\n OTEL_EXPORTER_OTLP_HEADERS,\n OTEL_EXPORTER_OTLP_TIMEOUT,\n)\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.exporter.otlp.proto.http import Compression\nfrom opentelemetry.exporter.otlp.proto.http.trace_exporter.encoder import (\n _ProtobufEncoder,\n)\nfrom opentelemetry.util.re import parse_headers\n\n\n_logger = logging.getLogger(__name__)\n\n\nDEFAULT_COMPRESSION = Compression.NoCompression\nDEFAULT_ENDPOINT = \"http://localhost:4318/\"\nDEFAULT_TRACES_EXPORT_PATH = \"v1/traces\"\nDEFAULT_TIMEOUT = 10 # in seconds\n\n\nclass OTLPSpanExporter(SpanExporter):\n\n _MAX_RETRY_TIMEOUT = 64\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n certificate_file: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n self._endpoint = endpoint or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n _append_trace_path(\n environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT)\n ),\n )\n self._certificate_file = certificate_file or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n environ.get(OTEL_EXPORTER_OTLP_CERTIFICATE, True),\n )\n headers_string = environ.get(\n OTEL_EXPORTER_OTLP_TRACES_HEADERS,\n environ.get(OTEL_EXPORTER_OTLP_HEADERS, \"\"),\n )\n self._headers = headers or parse_headers(headers_string)\n self._timeout = timeout or int(\n environ.get(\n OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,\n environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, DEFAULT_TIMEOUT),\n )\n )\n self._compression = compression or _compression_from_env()\n self._session = requests.Session()\n self._session.headers.update(self._headers)\n self._session.headers.update(\n {\"Content-Type\": _ProtobufEncoder._CONTENT_TYPE}\n )\n if self._compression is not Compression.NoCompression:\n self._session.headers.update(\n {\"Content-Encoding\": self._compression.value}\n )\n self._shutdown = False\n\n def _export(self, serialized_data: str):\n data = serialized_data\n if self._compression == Compression.Gzip:\n gzip_data = BytesIO()\n with gzip.GzipFile(fileobj=gzip_data, mode=\"w\") as gzip_stream:\n gzip_stream.write(serialized_data)\n data = gzip_data.getvalue()\n elif self._compression == Compression.Deflate:\n data = zlib.compress(bytes(serialized_data))\n\n return self._session.post(\n url=self._endpoint,\n data=data,\n verify=self._certificate_file,\n timeout=self._timeout,\n )\n\n @staticmethod\n def _retryable(resp: requests.Response) -> bool:\n if resp.status_code == 408:\n return True\n if resp.status_code >= 500 and resp.status_code <= 599:\n return True\n return False\n\n def export(self, spans) -> SpanExportResult:\n # After the call to Shutdown subsequent calls to Export are\n # not allowed and should return a Failure result.\n if self._shutdown:\n _logger.warning(\"Exporter already shutdown, ignoring batch\")\n return SpanExportResult.FAILURE\n\n serialized_data = _ProtobufEncoder.serialize(spans)\n\n for delay in expo(max_value=self._MAX_RETRY_TIMEOUT):\n\n if delay == self._MAX_RETRY_TIMEOUT:\n return SpanExportResult.FAILURE\n\n resp = self._export(serialized_data)\n # pylint: disable=no-else-return\n if resp.status_code in (200, 202):\n return SpanExportResult.SUCCESS\n elif self._retryable(resp):\n _logger.warning(\n \"Transient error %s encountered while exporting span batch, retrying in %ss.\",\n resp.reason,\n delay,\n )\n sleep(delay)\n continue\n else:\n _logger.error(\n \"Failed to export batch code: %s, reason: %s\",\n resp.status_code,\n resp.text,\n )\n return SpanExportResult.FAILURE\n return SpanExportResult.FAILURE\n\n def shutdown(self):\n if self._shutdown:\n _logger.warning(\"Exporter already shutdown, ignoring call\")\n return\n self._session.close()\n self._shutdown = True\n\n\ndef _compression_from_env() -> Compression:\n compression = (\n environ.get(\n OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,\n environ.get(OTEL_EXPORTER_OTLP_COMPRESSION, \"none\"),\n )\n .lower()\n .strip()\n )\n return Compression(compression)\n\n\ndef _append_trace_path(endpoint: str) -> str:\n if endpoint.endswith(\"/\"):\n return endpoint + DEFAULT_TRACES_EXPORT_PATH\n return endpoint + f\"/{DEFAULT_TRACES_EXPORT_PATH}\"\n", "path": "exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py"}]} | 2,269 | 394 |
gh_patches_debug_66174 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1985 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Provide documentation about user_groups and permission changes
### Issue description
If we edit permissions for User Groups, such as to make different items viewable on django admin, this requires a very specific set of instructions that is only documentated inside user group migration files. We should reference this documentation somewhere else in our developer readme so it's clear what is needed to be done if you ever change permissions in the future.
We had a case where permissions were changed and it wasn't clear what needed to be done (make a manual migration). The developers lost time trying to figure out why permissions didn't show and why running makemigrations changed nothing. I suggest adding an inline code comment around where permissions are set in user_groups too that points to documentation in developer readme. This may save future developer's time.
### Acceptance criteria
- [ ] update developer documentation (inline and md) about user_group/ permission changes
### Additional context
the migration files that have documentation about this are all ones that say _create_groups_v (followed by a number), such as 0075_create_groups_v08.py. See those for the current documentation and note that this was hard for developers to find.
[Slack thread](https://cisa-corp.slack.com/archives/C05BGB4L5NF/p1709939481415349)
### Links to other issues
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/models/user_group.py`
Content:
```
1 from django.contrib.auth.models import Group
2 import logging
3
4 logger = logging.getLogger(__name__)
5
6
7 class UserGroup(Group):
8 class Meta:
9 verbose_name = "User group"
10 verbose_name_plural = "User groups"
11
12 def create_cisa_analyst_group(apps, schema_editor):
13 """This method gets run from a data migration."""
14
15 # Hard to pass self to these methods as the calls from migrations
16 # are only expecting apps and schema_editor, so we'll just define
17 # apps, schema_editor in the local scope instead
18 CISA_ANALYST_GROUP_PERMISSIONS = [
19 {
20 "app_label": "auditlog",
21 "model": "logentry",
22 "permissions": ["view_logentry"],
23 },
24 {
25 "app_label": "registrar",
26 "model": "contact",
27 "permissions": ["change_contact"],
28 },
29 {
30 "app_label": "registrar",
31 "model": "domainrequest",
32 "permissions": ["change_domainrequest"],
33 },
34 {
35 "app_label": "registrar",
36 "model": "domain",
37 "permissions": ["view_domain"],
38 },
39 {
40 "app_label": "registrar",
41 "model": "draftdomain",
42 "permissions": ["change_draftdomain"],
43 },
44 {
45 "app_label": "registrar",
46 "model": "user",
47 "permissions": ["analyst_access_permission", "change_user"],
48 },
49 {
50 "app_label": "registrar",
51 "model": "domaininvitation",
52 "permissions": ["add_domaininvitation", "view_domaininvitation"],
53 },
54 {
55 "app_label": "registrar",
56 "model": "website",
57 "permissions": ["change_website"],
58 },
59 {
60 "app_label": "registrar",
61 "model": "userdomainrole",
62 "permissions": ["view_userdomainrole", "delete_userdomainrole"],
63 },
64 {
65 "app_label": "registrar",
66 "model": "verifiedbystaff",
67 "permissions": ["add_verifiedbystaff", "change_verifiedbystaff", "delete_verifiedbystaff"],
68 },
69 {
70 "app_label": "registrar",
71 "model": "federalagency",
72 "permissions": ["add_federalagency", "change_federalagency", "delete_federalagency"],
73 },
74 ]
75
76 # Avoid error: You can't execute queries until the end
77 # of the 'atomic' block.
78 # From django docs:
79 # https://docs.djangoproject.com/en/4.2/topics/migrations/#data-migrations
80 # We can’t import the Person model directly as it may be a newer
81 # version than this migration expects. We use the historical version.
82 ContentType = apps.get_model("contenttypes", "ContentType")
83 Permission = apps.get_model("auth", "Permission")
84 UserGroup = apps.get_model("registrar", "UserGroup")
85
86 logger.info("Going to create the Analyst Group")
87 try:
88 cisa_analysts_group, _ = UserGroup.objects.get_or_create(
89 name="cisa_analysts_group",
90 )
91
92 cisa_analysts_group.permissions.clear()
93
94 for permission in CISA_ANALYST_GROUP_PERMISSIONS:
95 app_label = permission["app_label"]
96 model_name = permission["model"]
97 permissions = permission["permissions"]
98
99 # Retrieve the content type for the app and model
100 content_type = ContentType.objects.get(app_label=app_label, model=model_name)
101
102 # Retrieve the permissions based on their codenames
103 permissions = Permission.objects.filter(content_type=content_type, codename__in=permissions)
104
105 # Assign the permissions to the group
106 cisa_analysts_group.permissions.add(*permissions)
107
108 # Convert the permissions QuerySet to a list of codenames
109 permission_list = list(permissions.values_list("codename", flat=True))
110
111 logger.debug(
112 app_label
113 + " | "
114 + model_name
115 + " | "
116 + ", ".join(permission_list)
117 + " added to group "
118 + cisa_analysts_group.name
119 )
120
121 cisa_analysts_group.save()
122 logger.debug("CISA Analyst permissions added to group " + cisa_analysts_group.name)
123 except Exception as e:
124 logger.error(f"Error creating analyst permissions group: {e}")
125
126 def create_full_access_group(apps, schema_editor):
127 """This method gets run from a data migration."""
128
129 Permission = apps.get_model("auth", "Permission")
130 UserGroup = apps.get_model("registrar", "UserGroup")
131
132 logger.info("Going to create the Full Access Group")
133 try:
134 full_access_group, _ = UserGroup.objects.get_or_create(
135 name="full_access_group",
136 )
137 # Get all available permissions
138 all_permissions = Permission.objects.all()
139
140 # Assign all permissions to the group
141 full_access_group.permissions.add(*all_permissions)
142
143 full_access_group.save()
144 logger.debug("All permissions added to group " + full_access_group.name)
145 except Exception as e:
146 logger.error(f"Error creating full access group: {e}")
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/registrar/models/user_group.py b/src/registrar/models/user_group.py
--- a/src/registrar/models/user_group.py
+++ b/src/registrar/models/user_group.py
@@ -5,6 +5,11 @@
class UserGroup(Group):
+ """
+ UserGroup sets read and write permissions for superusers (who have full access)
+ and analysts. For more details, see the dev docs for user-permissions.
+ """
+
class Meta:
verbose_name = "User group"
verbose_name_plural = "User groups"
| {"golden_diff": "diff --git a/src/registrar/models/user_group.py b/src/registrar/models/user_group.py\n--- a/src/registrar/models/user_group.py\n+++ b/src/registrar/models/user_group.py\n@@ -5,6 +5,11 @@\n \n \n class UserGroup(Group):\n+ \"\"\"\n+ UserGroup sets read and write permissions for superusers (who have full access)\n+ and analysts. For more details, see the dev docs for user-permissions.\n+ \"\"\"\n+\n class Meta:\n verbose_name = \"User group\"\n verbose_name_plural = \"User groups\"\n", "issue": "Provide documentation about user_groups and permission changes\n### Issue description\r\n\r\nIf we edit permissions for User Groups, such as to make different items viewable on django admin, this requires a very specific set of instructions that is only documentated inside user group migration files. We should reference this documentation somewhere else in our developer readme so it's clear what is needed to be done if you ever change permissions in the future.\r\n\r\n We had a case where permissions were changed and it wasn't clear what needed to be done (make a manual migration). The developers lost time trying to figure out why permissions didn't show and why running makemigrations changed nothing. I suggest adding an inline code comment around where permissions are set in user_groups too that points to documentation in developer readme. This may save future developer's time.\r\n\r\n### Acceptance criteria\r\n\r\n- [ ] update developer documentation (inline and md) about user_group/ permission changes\r\n\r\n### Additional context\r\nthe migration files that have documentation about this are all ones that say _create_groups_v (followed by a number), such as 0075_create_groups_v08.py. See those for the current documentation and note that this was hard for developers to find.\r\n[Slack thread](https://cisa-corp.slack.com/archives/C05BGB4L5NF/p1709939481415349)\r\n\r\n### Links to other issues\r\n\r\n_No response_\n", "before_files": [{"content": "from django.contrib.auth.models import Group\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserGroup(Group):\n class Meta:\n verbose_name = \"User group\"\n verbose_name_plural = \"User groups\"\n\n def create_cisa_analyst_group(apps, schema_editor):\n \"\"\"This method gets run from a data migration.\"\"\"\n\n # Hard to pass self to these methods as the calls from migrations\n # are only expecting apps and schema_editor, so we'll just define\n # apps, schema_editor in the local scope instead\n CISA_ANALYST_GROUP_PERMISSIONS = [\n {\n \"app_label\": \"auditlog\",\n \"model\": \"logentry\",\n \"permissions\": [\"view_logentry\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"contact\",\n \"permissions\": [\"change_contact\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"domainrequest\",\n \"permissions\": [\"change_domainrequest\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"domain\",\n \"permissions\": [\"view_domain\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"draftdomain\",\n \"permissions\": [\"change_draftdomain\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"user\",\n \"permissions\": [\"analyst_access_permission\", \"change_user\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"domaininvitation\",\n \"permissions\": [\"add_domaininvitation\", \"view_domaininvitation\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"website\",\n \"permissions\": [\"change_website\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"userdomainrole\",\n \"permissions\": [\"view_userdomainrole\", \"delete_userdomainrole\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"verifiedbystaff\",\n \"permissions\": [\"add_verifiedbystaff\", \"change_verifiedbystaff\", \"delete_verifiedbystaff\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"federalagency\",\n \"permissions\": [\"add_federalagency\", \"change_federalagency\", \"delete_federalagency\"],\n },\n ]\n\n # Avoid error: You can't execute queries until the end\n # of the 'atomic' block.\n # From django docs:\n # https://docs.djangoproject.com/en/4.2/topics/migrations/#data-migrations\n # We can\u2019t import the Person model directly as it may be a newer\n # version than this migration expects. We use the historical version.\n ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n Permission = apps.get_model(\"auth\", \"Permission\")\n UserGroup = apps.get_model(\"registrar\", \"UserGroup\")\n\n logger.info(\"Going to create the Analyst Group\")\n try:\n cisa_analysts_group, _ = UserGroup.objects.get_or_create(\n name=\"cisa_analysts_group\",\n )\n\n cisa_analysts_group.permissions.clear()\n\n for permission in CISA_ANALYST_GROUP_PERMISSIONS:\n app_label = permission[\"app_label\"]\n model_name = permission[\"model\"]\n permissions = permission[\"permissions\"]\n\n # Retrieve the content type for the app and model\n content_type = ContentType.objects.get(app_label=app_label, model=model_name)\n\n # Retrieve the permissions based on their codenames\n permissions = Permission.objects.filter(content_type=content_type, codename__in=permissions)\n\n # Assign the permissions to the group\n cisa_analysts_group.permissions.add(*permissions)\n\n # Convert the permissions QuerySet to a list of codenames\n permission_list = list(permissions.values_list(\"codename\", flat=True))\n\n logger.debug(\n app_label\n + \" | \"\n + model_name\n + \" | \"\n + \", \".join(permission_list)\n + \" added to group \"\n + cisa_analysts_group.name\n )\n\n cisa_analysts_group.save()\n logger.debug(\"CISA Analyst permissions added to group \" + cisa_analysts_group.name)\n except Exception as e:\n logger.error(f\"Error creating analyst permissions group: {e}\")\n\n def create_full_access_group(apps, schema_editor):\n \"\"\"This method gets run from a data migration.\"\"\"\n\n Permission = apps.get_model(\"auth\", \"Permission\")\n UserGroup = apps.get_model(\"registrar\", \"UserGroup\")\n\n logger.info(\"Going to create the Full Access Group\")\n try:\n full_access_group, _ = UserGroup.objects.get_or_create(\n name=\"full_access_group\",\n )\n # Get all available permissions\n all_permissions = Permission.objects.all()\n\n # Assign all permissions to the group\n full_access_group.permissions.add(*all_permissions)\n\n full_access_group.save()\n logger.debug(\"All permissions added to group \" + full_access_group.name)\n except Exception as e:\n logger.error(f\"Error creating full access group: {e}\")\n", "path": "src/registrar/models/user_group.py"}], "after_files": [{"content": "from django.contrib.auth.models import Group\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserGroup(Group):\n \"\"\"\n UserGroup sets read and write permissions for superusers (who have full access)\n and analysts. For more details, see the dev docs for user-permissions.\n \"\"\"\n\n class Meta:\n verbose_name = \"User group\"\n verbose_name_plural = \"User groups\"\n\n def create_cisa_analyst_group(apps, schema_editor):\n \"\"\"This method gets run from a data migration.\"\"\"\n\n # Hard to pass self to these methods as the calls from migrations\n # are only expecting apps and schema_editor, so we'll just define\n # apps, schema_editor in the local scope instead\n CISA_ANALYST_GROUP_PERMISSIONS = [\n {\n \"app_label\": \"auditlog\",\n \"model\": \"logentry\",\n \"permissions\": [\"view_logentry\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"contact\",\n \"permissions\": [\"change_contact\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"domaininformation\",\n \"permissions\": [\"change_domaininformation\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"domainrequest\",\n \"permissions\": [\"change_domainrequest\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"domain\",\n \"permissions\": [\"view_domain\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"draftdomain\",\n \"permissions\": [\"change_draftdomain\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"user\",\n \"permissions\": [\"analyst_access_permission\", \"change_user\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"domaininvitation\",\n \"permissions\": [\"add_domaininvitation\", \"view_domaininvitation\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"website\",\n \"permissions\": [\"change_website\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"userdomainrole\",\n \"permissions\": [\"view_userdomainrole\", \"delete_userdomainrole\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"verifiedbystaff\",\n \"permissions\": [\"add_verifiedbystaff\", \"change_verifiedbystaff\", \"delete_verifiedbystaff\"],\n },\n {\n \"app_label\": \"registrar\",\n \"model\": \"federalagency\",\n \"permissions\": [\"add_federalagency\", \"change_federalagency\", \"delete_federalagency\"],\n },\n ]\n\n # Avoid error: You can't execute queries until the end\n # of the 'atomic' block.\n # From django docs:\n # https://docs.djangoproject.com/en/4.2/topics/migrations/#data-migrations\n # We can\u2019t import the Person model directly as it may be a newer\n # version than this migration expects. We use the historical version.\n ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n Permission = apps.get_model(\"auth\", \"Permission\")\n UserGroup = apps.get_model(\"registrar\", \"UserGroup\")\n\n logger.info(\"Going to create the Analyst Group\")\n try:\n cisa_analysts_group, _ = UserGroup.objects.get_or_create(\n name=\"cisa_analysts_group\",\n )\n\n cisa_analysts_group.permissions.clear()\n\n for permission in CISA_ANALYST_GROUP_PERMISSIONS:\n app_label = permission[\"app_label\"]\n model_name = permission[\"model\"]\n permissions = permission[\"permissions\"]\n\n # Retrieve the content type for the app and model\n content_type = ContentType.objects.get(app_label=app_label, model=model_name)\n\n # Retrieve the permissions based on their codenames\n permissions = Permission.objects.filter(content_type=content_type, codename__in=permissions)\n\n # Assign the permissions to the group\n cisa_analysts_group.permissions.add(*permissions)\n\n # Convert the permissions QuerySet to a list of codenames\n permission_list = list(permissions.values_list(\"codename\", flat=True))\n\n logger.debug(\n app_label\n + \" | \"\n + model_name\n + \" | \"\n + \", \".join(permission_list)\n + \" added to group \"\n + cisa_analysts_group.name\n )\n\n cisa_analysts_group.save()\n logger.debug(\"CISA Analyst permissions added to group \" + cisa_analysts_group.name)\n except Exception as e:\n logger.error(f\"Error creating analyst permissions group: {e}\")\n\n def create_full_access_group(apps, schema_editor):\n \"\"\"This method gets run from a data migration.\"\"\"\n\n Permission = apps.get_model(\"auth\", \"Permission\")\n UserGroup = apps.get_model(\"registrar\", \"UserGroup\")\n\n logger.info(\"Going to create the Full Access Group\")\n try:\n full_access_group, _ = UserGroup.objects.get_or_create(\n name=\"full_access_group\",\n )\n # Get all available permissions\n all_permissions = Permission.objects.all()\n\n # Assign all permissions to the group\n full_access_group.permissions.add(*all_permissions)\n\n full_access_group.save()\n logger.debug(\"All permissions added to group \" + full_access_group.name)\n except Exception as e:\n logger.error(f\"Error creating full access group: {e}\")\n", "path": "src/registrar/models/user_group.py"}]} | 1,989 | 121 |
gh_patches_debug_19297 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1442 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DeepFM unit test for new PS
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/common/tensor.py`
Content:
```
1 import numpy as np
2 import tensorflow as tf
3
4 from elasticdl.proto import elasticdl_pb2
5 from elasticdl.python.common.dtypes import (
6 dtype_numpy_to_tensor,
7 dtype_tensor_to_numpy,
8 )
9 from elasticdl.python.common.log_utils import default_logger as logger
10
11
12 class Tensor(object):
13 """Data structure for tensors in ElasticDL.
14
15 `Tensor` can save dense tensors and sparse tensors. For sparse tensors,
16 this structure saves them in the same way as `TensorFlow.IndexedSlices`.
17 """
18
19 def __init__(self, values=None, indices=None, name=None):
20 """
21 `Tensor` can save dense tensors and sparse tensors.
22 To pass in a dense tensor, `values` should be `numpy.ndarray` and
23 `indices` should be None.
24 There are two ways to pass in a sparse tensor:
25 * `values` is a `numpy.ndarray` and `indices` is a `numpy.ndarray`.
26 * `values` is a `TensorFlow.IndexedSlices` and `indices` is None.
27
28 Args:
29 values: A `numpy.ndarray` or `TensorFlow.IndexedSlices`.
30 If `values` is a `TensorFlow.IndexedSlices`, `indices` should
31 be None.
32 indices: A `numpy.ndarray` or None.
33 name: A python string.
34 """
35 self.set(values, indices, name)
36
37 @classmethod
38 def from_tensor_pb(cls, tensor_pb):
39 """Create an ElasticDL Tensor object from tensor protocol buffer.
40
41 Return the created Tensor object.
42 """
43 tensor = cls()
44 deserialize_tensor_pb(tensor_pb, tensor)
45 return tensor
46
47 def set(self, values=None, indices=None, name=None):
48 self.name = name
49 if isinstance(values, tf.IndexedSlices):
50 if indices is not None:
51 raise ValueError(
52 "When creating a Tensor object with values of type "
53 "tf.IndexedSlices, indices must be None."
54 )
55 if values.dense_shape is not None:
56 # TODO(yunjian.lmh): Support dense shape, or do not print
57 # warning message, or there will be too much warning
58 # messages.
59 logger.warning(
60 "ElasticDL Tensor ignores dense_shape in "
61 "TensorFlow.IndexedSlices."
62 )
63
64 self.values = values.values.numpy()
65 self.indices = values.indices.numpy()
66 else:
67 self.values = (
68 values.numpy() if isinstance(values, tf.Tensor) else values
69 )
70 self.indices = (
71 indices.numpy() if isinstance(indices, tf.Tensor) else indices
72 )
73
74 def is_indexed_slices(self):
75 return self.indices is not None
76
77 def to_tensor_pb(self):
78 tensor_pb = elasticdl_pb2.Tensor()
79 serialize_tensor(self, tensor_pb)
80 return tensor_pb
81
82 def to_tf_tensor(self):
83 if self.is_indexed_slices():
84 return tf.IndexedSlices(self.values, self.indices)
85 else:
86 return tf.constant(self.values)
87
88 def to_ndarray(self):
89 if self.is_indexed_slices():
90 # Currently Tensor does not have a field representing dense shape,
91 # thus can not convert it to numpy.ndarray.
92 raise NotImplementedError(
93 "Converting an ElasticDL Tensor object, which contains a "
94 "sparse tensor, to a numpy.ndarray is not supported."
95 )
96 return self.values
97
98 def __add__(self, other):
99 if self.is_indexed_slices() and other.is_indexed_slices():
100 self.values = np.concatenate((self.values, other.values), axis=0)
101 self.indices = np.concatenate(
102 (self.indices, other.indices), axis=0
103 )
104 elif not self.is_indexed_slices() and not other.is_indexed_slices():
105 self.values = self.values + other.values
106 else:
107 raise NotImplementedError(
108 "Only Tensor with the same type could be added"
109 )
110 return self
111
112 def __radd__(self, other):
113 return self + other
114
115
116 def serialize_tensor(tensor, tensor_pb):
117 """Serialize ElasticDL Tensor to tensor protocol buffer."""
118 dtype = dtype_numpy_to_tensor(tensor.values.dtype)
119 if not dtype:
120 raise ValueError(
121 "Dtype of ndarray %s is not supported", tensor.values.dtype
122 )
123 tensor_pb.dtype = dtype
124 tensor_pb.dim.extend(tensor.values.shape)
125 tensor_pb.content = tensor.values.tobytes()
126 if tensor.is_indexed_slices():
127 tensor_pb.indices.extend(tuple(tensor.indices))
128 if tensor.name:
129 tensor_pb.name = tensor.name
130
131
132 def deserialize_tensor_pb(tensor_pb, tensor):
133 """Deserialize tensor protocol buffer to ElasticDL Tensor.
134
135 Note that the input tensor protocol buffer is reset and underlying buffer
136 is passed to the returned ndarray.
137 """
138 if not tensor_pb.dim:
139 raise ValueError("Tensor PB has no dim defined")
140
141 dtype = dtype_tensor_to_numpy(tensor_pb.dtype)
142 # Check that the buffer size agrees with dimensions.
143 size = dtype.itemsize
144 for d in tensor_pb.dim:
145 size *= d
146 if size != len(tensor_pb.content):
147 raise ValueError(
148 "Tensor PB size mismatch, dim: %s, len(content): %d",
149 tensor_pb.dim,
150 len(tensor_pb.content),
151 )
152 tensor.set(
153 values=np.ndarray(
154 shape=tensor_pb.dim, dtype=dtype, buffer=tensor_pb.content
155 ),
156 indices=np.array(tensor_pb.indices) if tensor_pb.indices else None,
157 name=tensor_pb.name,
158 )
159 tensor_pb.Clear()
160
161
162 def tensor_pb_to_ndarray(tensor_pb):
163 """Deserialize tensor protocol buffer and return a numpy ndarray."""
164 return Tensor.from_tensor_pb(tensor_pb).to_ndarray()
165
166
167 def tensor_pb_to_tf_tensor(tensor_pb):
168 """Deserialize tensor protocol buffer and return a TensorFlow tensor."""
169 return Tensor.from_tensor_pb(tensor_pb).to_tf_tensor()
170
171
172 def emplace_tensor_pb_from_ndarray(
173 tensor_pb_list, values, indices=None, name=None
174 ):
175 """Generate a tensor procotol buffer and append it to tensor_pb_list.
176
177 Note:
178 This function does not use list append function as following code
179 snippet. It is slow because append function will copy the input
180 protocol buffer.
181
182 ```
183 pb = elasticdl_pb2.Tensor()
184 pb.dim.extend([3])
185 pb.name = "test"
186 pb.dtype = DT_INT64
187 pb.content = np.array([1, 2, 3]).tobytes()
188 tensor_pb_list.append(tensor_pb) # slow, because append copies pb
189 ```
190 """
191 tensor_pb = tensor_pb_list.add()
192 tensor = Tensor(values, indices, name)
193 serialize_tensor(tensor, tensor_pb)
194
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/common/tensor.py b/elasticdl/python/common/tensor.py
--- a/elasticdl/python/common/tensor.py
+++ b/elasticdl/python/common/tensor.py
@@ -6,7 +6,6 @@
dtype_numpy_to_tensor,
dtype_tensor_to_numpy,
)
-from elasticdl.python.common.log_utils import default_logger as logger
class Tensor(object):
@@ -53,13 +52,8 @@
"tf.IndexedSlices, indices must be None."
)
if values.dense_shape is not None:
- # TODO(yunjian.lmh): Support dense shape, or do not print
- # warning message, or there will be too much warning
- # messages.
- logger.warning(
- "ElasticDL Tensor ignores dense_shape in "
- "TensorFlow.IndexedSlices."
- )
+ # TODO(yunjian.lmh): Support dense shape
+ pass
self.values = values.values.numpy()
self.indices = values.indices.numpy()
| {"golden_diff": "diff --git a/elasticdl/python/common/tensor.py b/elasticdl/python/common/tensor.py\n--- a/elasticdl/python/common/tensor.py\n+++ b/elasticdl/python/common/tensor.py\n@@ -6,7 +6,6 @@\n dtype_numpy_to_tensor,\n dtype_tensor_to_numpy,\n )\n-from elasticdl.python.common.log_utils import default_logger as logger\n \n \n class Tensor(object):\n@@ -53,13 +52,8 @@\n \"tf.IndexedSlices, indices must be None.\"\n )\n if values.dense_shape is not None:\n- # TODO(yunjian.lmh): Support dense shape, or do not print\n- # warning message, or there will be too much warning\n- # messages.\n- logger.warning(\n- \"ElasticDL Tensor ignores dense_shape in \"\n- \"TensorFlow.IndexedSlices.\"\n- )\n+ # TODO(yunjian.lmh): Support dense shape\n+ pass\n \n self.values = values.values.numpy()\n self.indices = values.indices.numpy()\n", "issue": "DeepFM unit test for new PS\n\n", "before_files": [{"content": "import numpy as np\nimport tensorflow as tf\n\nfrom elasticdl.proto import elasticdl_pb2\nfrom elasticdl.python.common.dtypes import (\n dtype_numpy_to_tensor,\n dtype_tensor_to_numpy,\n)\nfrom elasticdl.python.common.log_utils import default_logger as logger\n\n\nclass Tensor(object):\n \"\"\"Data structure for tensors in ElasticDL.\n\n `Tensor` can save dense tensors and sparse tensors. For sparse tensors,\n this structure saves them in the same way as `TensorFlow.IndexedSlices`.\n \"\"\"\n\n def __init__(self, values=None, indices=None, name=None):\n \"\"\"\n `Tensor` can save dense tensors and sparse tensors.\n To pass in a dense tensor, `values` should be `numpy.ndarray` and\n `indices` should be None.\n There are two ways to pass in a sparse tensor:\n * `values` is a `numpy.ndarray` and `indices` is a `numpy.ndarray`.\n * `values` is a `TensorFlow.IndexedSlices` and `indices` is None.\n\n Args:\n values: A `numpy.ndarray` or `TensorFlow.IndexedSlices`.\n If `values` is a `TensorFlow.IndexedSlices`, `indices` should\n be None.\n indices: A `numpy.ndarray` or None.\n name: A python string.\n \"\"\"\n self.set(values, indices, name)\n\n @classmethod\n def from_tensor_pb(cls, tensor_pb):\n \"\"\"Create an ElasticDL Tensor object from tensor protocol buffer.\n\n Return the created Tensor object.\n \"\"\"\n tensor = cls()\n deserialize_tensor_pb(tensor_pb, tensor)\n return tensor\n\n def set(self, values=None, indices=None, name=None):\n self.name = name\n if isinstance(values, tf.IndexedSlices):\n if indices is not None:\n raise ValueError(\n \"When creating a Tensor object with values of type \"\n \"tf.IndexedSlices, indices must be None.\"\n )\n if values.dense_shape is not None:\n # TODO(yunjian.lmh): Support dense shape, or do not print\n # warning message, or there will be too much warning\n # messages.\n logger.warning(\n \"ElasticDL Tensor ignores dense_shape in \"\n \"TensorFlow.IndexedSlices.\"\n )\n\n self.values = values.values.numpy()\n self.indices = values.indices.numpy()\n else:\n self.values = (\n values.numpy() if isinstance(values, tf.Tensor) else values\n )\n self.indices = (\n indices.numpy() if isinstance(indices, tf.Tensor) else indices\n )\n\n def is_indexed_slices(self):\n return self.indices is not None\n\n def to_tensor_pb(self):\n tensor_pb = elasticdl_pb2.Tensor()\n serialize_tensor(self, tensor_pb)\n return tensor_pb\n\n def to_tf_tensor(self):\n if self.is_indexed_slices():\n return tf.IndexedSlices(self.values, self.indices)\n else:\n return tf.constant(self.values)\n\n def to_ndarray(self):\n if self.is_indexed_slices():\n # Currently Tensor does not have a field representing dense shape,\n # thus can not convert it to numpy.ndarray.\n raise NotImplementedError(\n \"Converting an ElasticDL Tensor object, which contains a \"\n \"sparse tensor, to a numpy.ndarray is not supported.\"\n )\n return self.values\n\n def __add__(self, other):\n if self.is_indexed_slices() and other.is_indexed_slices():\n self.values = np.concatenate((self.values, other.values), axis=0)\n self.indices = np.concatenate(\n (self.indices, other.indices), axis=0\n )\n elif not self.is_indexed_slices() and not other.is_indexed_slices():\n self.values = self.values + other.values\n else:\n raise NotImplementedError(\n \"Only Tensor with the same type could be added\"\n )\n return self\n\n def __radd__(self, other):\n return self + other\n\n\ndef serialize_tensor(tensor, tensor_pb):\n \"\"\"Serialize ElasticDL Tensor to tensor protocol buffer.\"\"\"\n dtype = dtype_numpy_to_tensor(tensor.values.dtype)\n if not dtype:\n raise ValueError(\n \"Dtype of ndarray %s is not supported\", tensor.values.dtype\n )\n tensor_pb.dtype = dtype\n tensor_pb.dim.extend(tensor.values.shape)\n tensor_pb.content = tensor.values.tobytes()\n if tensor.is_indexed_slices():\n tensor_pb.indices.extend(tuple(tensor.indices))\n if tensor.name:\n tensor_pb.name = tensor.name\n\n\ndef deserialize_tensor_pb(tensor_pb, tensor):\n \"\"\"Deserialize tensor protocol buffer to ElasticDL Tensor.\n\n Note that the input tensor protocol buffer is reset and underlying buffer\n is passed to the returned ndarray.\n \"\"\"\n if not tensor_pb.dim:\n raise ValueError(\"Tensor PB has no dim defined\")\n\n dtype = dtype_tensor_to_numpy(tensor_pb.dtype)\n # Check that the buffer size agrees with dimensions.\n size = dtype.itemsize\n for d in tensor_pb.dim:\n size *= d\n if size != len(tensor_pb.content):\n raise ValueError(\n \"Tensor PB size mismatch, dim: %s, len(content): %d\",\n tensor_pb.dim,\n len(tensor_pb.content),\n )\n tensor.set(\n values=np.ndarray(\n shape=tensor_pb.dim, dtype=dtype, buffer=tensor_pb.content\n ),\n indices=np.array(tensor_pb.indices) if tensor_pb.indices else None,\n name=tensor_pb.name,\n )\n tensor_pb.Clear()\n\n\ndef tensor_pb_to_ndarray(tensor_pb):\n \"\"\"Deserialize tensor protocol buffer and return a numpy ndarray.\"\"\"\n return Tensor.from_tensor_pb(tensor_pb).to_ndarray()\n\n\ndef tensor_pb_to_tf_tensor(tensor_pb):\n \"\"\"Deserialize tensor protocol buffer and return a TensorFlow tensor.\"\"\"\n return Tensor.from_tensor_pb(tensor_pb).to_tf_tensor()\n\n\ndef emplace_tensor_pb_from_ndarray(\n tensor_pb_list, values, indices=None, name=None\n):\n \"\"\"Generate a tensor procotol buffer and append it to tensor_pb_list.\n\n Note:\n This function does not use list append function as following code\n snippet. It is slow because append function will copy the input\n protocol buffer.\n\n ```\n pb = elasticdl_pb2.Tensor()\n pb.dim.extend([3])\n pb.name = \"test\"\n pb.dtype = DT_INT64\n pb.content = np.array([1, 2, 3]).tobytes()\n tensor_pb_list.append(tensor_pb) # slow, because append copies pb\n ```\n \"\"\"\n tensor_pb = tensor_pb_list.add()\n tensor = Tensor(values, indices, name)\n serialize_tensor(tensor, tensor_pb)\n", "path": "elasticdl/python/common/tensor.py"}], "after_files": [{"content": "import numpy as np\nimport tensorflow as tf\n\nfrom elasticdl.proto import elasticdl_pb2\nfrom elasticdl.python.common.dtypes import (\n dtype_numpy_to_tensor,\n dtype_tensor_to_numpy,\n)\n\n\nclass Tensor(object):\n \"\"\"Data structure for tensors in ElasticDL.\n\n `Tensor` can save dense tensors and sparse tensors. For sparse tensors,\n this structure saves them in the same way as `TensorFlow.IndexedSlices`.\n \"\"\"\n\n def __init__(self, values=None, indices=None, name=None):\n \"\"\"\n `Tensor` can save dense tensors and sparse tensors.\n To pass in a dense tensor, `values` should be `numpy.ndarray` and\n `indices` should be None.\n There are two ways to pass in a sparse tensor:\n * `values` is a `numpy.ndarray` and `indices` is a `numpy.ndarray`.\n * `values` is a `TensorFlow.IndexedSlices` and `indices` is None.\n\n Args:\n values: A `numpy.ndarray` or `TensorFlow.IndexedSlices`.\n If `values` is a `TensorFlow.IndexedSlices`, `indices` should\n be None.\n indices: A `numpy.ndarray` or None.\n name: A python string.\n \"\"\"\n self.set(values, indices, name)\n\n @classmethod\n def from_tensor_pb(cls, tensor_pb):\n \"\"\"Create an ElasticDL Tensor object from tensor protocol buffer.\n\n Return the created Tensor object.\n \"\"\"\n tensor = cls()\n deserialize_tensor_pb(tensor_pb, tensor)\n return tensor\n\n def set(self, values=None, indices=None, name=None):\n self.name = name\n if isinstance(values, tf.IndexedSlices):\n if indices is not None:\n raise ValueError(\n \"When creating a Tensor object with values of type \"\n \"tf.IndexedSlices, indices must be None.\"\n )\n if values.dense_shape is not None:\n # TODO(yunjian.lmh): Support dense shape\n pass\n\n self.values = values.values.numpy()\n self.indices = values.indices.numpy()\n else:\n self.values = (\n values.numpy() if isinstance(values, tf.Tensor) else values\n )\n self.indices = (\n indices.numpy() if isinstance(indices, tf.Tensor) else indices\n )\n\n def is_indexed_slices(self):\n return self.indices is not None\n\n def to_tensor_pb(self):\n tensor_pb = elasticdl_pb2.Tensor()\n serialize_tensor(self, tensor_pb)\n return tensor_pb\n\n def to_tf_tensor(self):\n if self.is_indexed_slices():\n return tf.IndexedSlices(self.values, self.indices)\n else:\n return tf.constant(self.values)\n\n def to_ndarray(self):\n if self.is_indexed_slices():\n # Currently Tensor does not have a field representing dense shape,\n # thus can not convert it to numpy.ndarray.\n raise NotImplementedError(\n \"Converting an ElasticDL Tensor object, which contains a \"\n \"sparse tensor, to a numpy.ndarray is not supported.\"\n )\n return self.values\n\n def __add__(self, other):\n if self.is_indexed_slices() and other.is_indexed_slices():\n self.values = np.concatenate((self.values, other.values), axis=0)\n self.indices = np.concatenate(\n (self.indices, other.indices), axis=0\n )\n elif not self.is_indexed_slices() and not other.is_indexed_slices():\n self.values = self.values + other.values\n else:\n raise NotImplementedError(\n \"Only Tensor with the same type could be added\"\n )\n return self\n\n def __radd__(self, other):\n return self + other\n\n\ndef serialize_tensor(tensor, tensor_pb):\n \"\"\"Serialize ElasticDL Tensor to tensor protocol buffer.\"\"\"\n dtype = dtype_numpy_to_tensor(tensor.values.dtype)\n if not dtype:\n raise ValueError(\n \"Dtype of ndarray %s is not supported\", tensor.values.dtype\n )\n tensor_pb.dtype = dtype\n tensor_pb.dim.extend(tensor.values.shape)\n tensor_pb.content = tensor.values.tobytes()\n if tensor.is_indexed_slices():\n tensor_pb.indices.extend(tuple(tensor.indices))\n if tensor.name:\n tensor_pb.name = tensor.name\n\n\ndef deserialize_tensor_pb(tensor_pb, tensor):\n \"\"\"Deserialize tensor protocol buffer to ElasticDL Tensor.\n\n Note that the input tensor protocol buffer is reset and underlying buffer\n is passed to the returned ndarray.\n \"\"\"\n if not tensor_pb.dim:\n raise ValueError(\"Tensor PB has no dim defined\")\n\n dtype = dtype_tensor_to_numpy(tensor_pb.dtype)\n # Check that the buffer size agrees with dimensions.\n size = dtype.itemsize\n for d in tensor_pb.dim:\n size *= d\n if size != len(tensor_pb.content):\n raise ValueError(\n \"Tensor PB size mismatch, dim: %s, len(content): %d\",\n tensor_pb.dim,\n len(tensor_pb.content),\n )\n tensor.set(\n values=np.ndarray(\n shape=tensor_pb.dim, dtype=dtype, buffer=tensor_pb.content\n ),\n indices=np.array(tensor_pb.indices) if tensor_pb.indices else None,\n name=tensor_pb.name,\n )\n tensor_pb.Clear()\n\n\ndef tensor_pb_to_ndarray(tensor_pb):\n \"\"\"Deserialize tensor protocol buffer and return a numpy ndarray.\"\"\"\n return Tensor.from_tensor_pb(tensor_pb).to_ndarray()\n\n\ndef tensor_pb_to_tf_tensor(tensor_pb):\n \"\"\"Deserialize tensor protocol buffer and return a TensorFlow tensor.\"\"\"\n return Tensor.from_tensor_pb(tensor_pb).to_tf_tensor()\n\n\ndef emplace_tensor_pb_from_ndarray(\n tensor_pb_list, values, indices=None, name=None\n):\n \"\"\"Generate a tensor procotol buffer and append it to tensor_pb_list.\n\n Note:\n This function does not use list append function as following code\n snippet. It is slow because append function will copy the input\n protocol buffer.\n\n ```\n pb = elasticdl_pb2.Tensor()\n pb.dim.extend([3])\n pb.name = \"test\"\n pb.dtype = DT_INT64\n pb.content = np.array([1, 2, 3]).tobytes()\n tensor_pb_list.append(tensor_pb) # slow, because append copies pb\n ```\n \"\"\"\n tensor_pb = tensor_pb_list.add()\n tensor = Tensor(values, indices, name)\n serialize_tensor(tensor, tensor_pb)\n", "path": "elasticdl/python/common/tensor.py"}]} | 2,171 | 226 |
gh_patches_debug_19548 | rasdani/github-patches | git_diff | liqd__a4-opin-347 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Name of Template visible while creating the project
Hi, when creating a project with a template it would be helpful to see the name of the project type. Maybe in the black button?
The author/creator currently has no idea which template s/he chose and in case s/he saves it and returns to it, it may appear helpful to know which one s/he choose. Thanks & Best

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/dashboard/views.py`
Content:
```
1 from allauth.account import views as account_views
2 from allauth.socialaccount import views as socialaccount_views
3 from django.contrib.messages.views import SuccessMessageMixin
4 from django.core.urlresolvers import reverse
5 from django.shortcuts import get_object_or_404, redirect
6 from django.utils import functional
7 from django.utils.translation import ugettext as _
8 from django.views import generic
9 from rules.compat import access_mixins as mixins
10 from rules.contrib import views as rules_views
11
12 from euth.memberships import models as member_models
13 from euth.organisations import models as org_models
14 from euth.phases import models as phase_models
15 from euth.projects import models as project_models
16 from euth.users import models as user_models
17
18 from . import blueprints, forms
19
20
21 def dashboard(request):
22 return redirect('dashboard-profile')
23
24
25 class DashboardBaseMixin(mixins.LoginRequiredMixin,
26 generic.base.ContextMixin,):
27
28 @functional.cached_property
29 def user_has_organisation(self):
30 return bool(self.request.user.organisation_set.all())
31
32 @functional.cached_property
33 def organisation(self):
34 if 'organisation_slug' in self.kwargs:
35 slug = self.kwargs['organisation_slug']
36 return get_object_or_404(org_models.Organisation, slug=slug)
37 else:
38 return self.request.user.organisation_set.first()
39
40 @functional.cached_property
41 def other_organisations_of_user(self):
42 user = self.request.user
43 return user.organisation_set.exclude(pk=self.organisation.pk)
44
45
46 class DashboardEmailView(DashboardBaseMixin, account_views.EmailView):
47 pass
48
49
50 class DashboardAccountView(DashboardBaseMixin,
51 socialaccount_views.ConnectionsView):
52 pass
53
54
55 class DashboardProfileView(DashboardBaseMixin,
56 SuccessMessageMixin,
57 generic.UpdateView):
58
59 model = user_models.User
60 template_name = "euth_dashboard/profile_detail.html"
61 form_class = forms.ProfileForm
62 success_message = _("Your profile was successfully updated.")
63
64 def get_object(self):
65 return get_object_or_404(user_models.User, pk=self.request.user.id)
66
67 def get_success_url(self):
68 return self.request.path
69
70
71 class DashboardOrganisationUpdateView(DashboardBaseMixin,
72 rules_views.PermissionRequiredMixin,
73 SuccessMessageMixin,
74 generic.UpdateView):
75 model = org_models.Organisation
76 form_class = forms.OrganisationForm
77 slug_url_kwarg = 'organisation_slug'
78 template_name = 'euth_dashboard/organisation_form.html'
79 success_message = _('Organisation successfully updated.')
80 permission_required = 'euth_organisations.modify_organisation'
81
82 def get_success_url(self):
83 return self.request.path
84
85
86 class DashboardProjectListView(DashboardBaseMixin,
87 rules_views.PermissionRequiredMixin,
88 generic.ListView):
89 model = project_models.Project
90 template_name = 'euth_dashboard/project_list.html'
91 permission_required = 'euth_organisations.modify_organisation'
92
93 def get_queryset(self):
94 return self.model.objects.filter(
95 organisation=self.organisation
96 )
97
98 def get_permission_object(self):
99 return self.organisation
100
101 @property
102 def raise_exception(self):
103 return self.request.user.is_authenticated()
104
105 def get_success_url(self):
106 return reverse('dashboard-project-list')
107
108
109 class DashboardBlueprintListView(DashboardBaseMixin,
110 rules_views.PermissionRequiredMixin,
111 generic.TemplateView):
112 template_name = 'euth_dashboard/blueprint_list.html'
113 blueprints = blueprints.blueprints
114 permission_required = 'euth_organisations.initiate_project'
115
116
117 class DashboardProjectCreateView(DashboardBaseMixin,
118 rules_views.PermissionRequiredMixin,
119 SuccessMessageMixin,
120 blueprints.BlueprintMixin,
121 generic.CreateView):
122 model = project_models.Project
123 form_class = forms.ProjectCreateForm
124 template_name = 'euth_dashboard/project_form.html'
125 success_message = _('Project succesfully created.')
126 permission_required = 'euth_organisations.initiate_project'
127
128 def get_permission_object(self):
129 return self.organisation
130
131 @property
132 def raise_exception(self):
133 return self.request.user.is_authenticated()
134
135 def get_form_kwargs(self):
136 kwargs = super().get_form_kwargs()
137 kwargs['blueprint'] = self.blueprint
138 kwargs['organisation'] = self.organisation
139 return kwargs
140
141 def get_success_url(self):
142 return reverse('dashboard-project-list',
143 kwargs={
144 'organisation_slug': self.organisation.slug,
145 })
146
147
148 class DashboardProjectUpdateView(DashboardBaseMixin,
149 rules_views.PermissionRequiredMixin,
150 SuccessMessageMixin,
151 generic.UpdateView):
152 model = project_models.Project
153 form_class = forms.ProjectCompleteForm
154 template_name = 'euth_dashboard/project_form.html'
155 success_message = _('Project successfully updated.')
156 permission_required = 'euth_organisations.initiate_project'
157
158 def get_permission_object(self):
159 return self.organisation
160
161 @property
162 def raise_exception(self):
163 return self.request.user.is_authenticated()
164
165 def get_success_url(self):
166 return reverse('dashboard-project-edit',
167 kwargs={
168 'organisation_slug': self.organisation.slug,
169 'slug': self.get_object().slug
170 })
171
172 def get_form_kwargs(self):
173 kwargs = super().get_form_kwargs()
174 qs = phase_models.Phase.objects.filter(module__project=self.object)
175 kwargs['phases__queryset'] = qs
176 return kwargs
177
178
179 class DashboardProjectInviteView(DashboardBaseMixin,
180 rules_views.PermissionRequiredMixin,
181 SuccessMessageMixin,
182 generic.FormView):
183 form_class = forms.ProjectInviteForm
184 template_name = 'euth_dashboard/project_invites.html'
185 success_message = _("Invitations successfully sent.")
186 permission_required = 'euth_organisations.initiate_project'
187
188 def get_permission_object(self):
189 return self.organisation
190
191 @property
192 def raise_exception(self):
193 return self.request.user.is_authenticated()
194
195 @functional.cached_property
196 def project(self):
197 return project_models.Project.objects.get(
198 slug=self.kwargs['slug']
199 )
200
201 def get_form_kwargs(self):
202 kwargs = super().get_form_kwargs()
203 kwargs['project'] = self.project
204 return kwargs
205
206 def form_valid(self, form):
207 emails = form.cleaned_data['emails']
208 user = self.request.user
209 project = self.project
210 for (name, address) in emails:
211 member_models.Invite.objects.invite(user, project, address)
212 return super().form_valid(form)
213
214 def get_success_url(self):
215 return reverse('dashboard-project-users',
216 kwargs={
217 'organisation_slug': self.organisation.slug,
218 'slug': self.project.slug
219 })
220
221
222 class DashboardProjectUserView(DashboardBaseMixin,
223 rules_views.PermissionRequiredMixin,
224 SuccessMessageMixin,
225 generic.FormView):
226
227 form_class = forms.ProjectUserForm
228 template_name = 'euth_dashboard/project_users.html'
229 success_message = _("User request successfully updated.")
230 permission_required = 'euth_organisations.initiate_project'
231
232 def get_permission_object(self):
233 return self.organisation
234
235 @property
236 def raise_exception(self):
237 return self.request.user.is_authenticated()
238
239 def get_form_kwargs(self):
240 kwargs = super().get_form_kwargs()
241 qs = member_models.Request.objects.order_by('created').filter(
242 project__slug=self.kwargs['slug']
243 )
244 kwargs['requests__queryset'] = qs
245 qs = member_models.Invite.objects.order_by('created').filter(
246 project__slug=self.kwargs['slug']
247 )
248 kwargs['invites__queryset'] = qs
249 qs = user_models.User.objects.order_by('email').filter(
250 project_participant__slug=self.kwargs['slug']
251 )
252 kwargs['users__queryset'] = qs
253 kwargs['project'] = self.project
254 return kwargs
255
256 @functional.cached_property
257 def project(self):
258 return project_models.Project.objects.get(
259 slug=self.kwargs['slug']
260 )
261
262 def get_context_data(self, **kwargs):
263 context = super().get_context_data(**kwargs)
264 context['project'] = self.project
265 return context
266
267 def get_success_url(self):
268 return self.request.path
269
270 def form_valid(self, form):
271 form.save()
272 return super().form_valid(form)
273
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/dashboard/views.py b/euth/dashboard/views.py
--- a/euth/dashboard/views.py
+++ b/euth/dashboard/views.py
@@ -125,6 +125,11 @@
success_message = _('Project succesfully created.')
permission_required = 'euth_organisations.initiate_project'
+ def get_context_data(self, **kwargs):
+ context = super().get_context_data(**kwargs)
+ context['heading'] = _("New project based on")
+ return context
+
def get_permission_object(self):
return self.organisation
@@ -155,6 +160,11 @@
success_message = _('Project successfully updated.')
permission_required = 'euth_organisations.initiate_project'
+ def get_context_data(self, **kwargs):
+ context = super().get_context_data(**kwargs)
+ context['heading'] = _("Update project: " + self.object.name)
+ return context
+
def get_permission_object(self):
return self.organisation
| {"golden_diff": "diff --git a/euth/dashboard/views.py b/euth/dashboard/views.py\n--- a/euth/dashboard/views.py\n+++ b/euth/dashboard/views.py\n@@ -125,6 +125,11 @@\n success_message = _('Project succesfully created.')\n permission_required = 'euth_organisations.initiate_project'\n \n+ def get_context_data(self, **kwargs):\n+ context = super().get_context_data(**kwargs)\n+ context['heading'] = _(\"New project based on\")\n+ return context\n+\n def get_permission_object(self):\n return self.organisation\n \n@@ -155,6 +160,11 @@\n success_message = _('Project successfully updated.')\n permission_required = 'euth_organisations.initiate_project'\n \n+ def get_context_data(self, **kwargs):\n+ context = super().get_context_data(**kwargs)\n+ context['heading'] = _(\"Update project: \" + self.object.name)\n+ return context\n+\n def get_permission_object(self):\n return self.organisation\n", "issue": "Name of Template visible while creating the project\nHi, when creating a project with a template it would be helpful to see the name of the project type. Maybe in the black button? \nThe author/creator currently has no idea which template s/he chose and in case s/he saves it and returns to it, it may appear helpful to know which one s/he choose. Thanks & Best\n\n\n\n", "before_files": [{"content": "from allauth.account import views as account_views\nfrom allauth.socialaccount import views as socialaccount_views\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import functional\nfrom django.utils.translation import ugettext as _\nfrom django.views import generic\nfrom rules.compat import access_mixins as mixins\nfrom rules.contrib import views as rules_views\n\nfrom euth.memberships import models as member_models\nfrom euth.organisations import models as org_models\nfrom euth.phases import models as phase_models\nfrom euth.projects import models as project_models\nfrom euth.users import models as user_models\n\nfrom . import blueprints, forms\n\n\ndef dashboard(request):\n return redirect('dashboard-profile')\n\n\nclass DashboardBaseMixin(mixins.LoginRequiredMixin,\n generic.base.ContextMixin,):\n\n @functional.cached_property\n def user_has_organisation(self):\n return bool(self.request.user.organisation_set.all())\n\n @functional.cached_property\n def organisation(self):\n if 'organisation_slug' in self.kwargs:\n slug = self.kwargs['organisation_slug']\n return get_object_or_404(org_models.Organisation, slug=slug)\n else:\n return self.request.user.organisation_set.first()\n\n @functional.cached_property\n def other_organisations_of_user(self):\n user = self.request.user\n return user.organisation_set.exclude(pk=self.organisation.pk)\n\n\nclass DashboardEmailView(DashboardBaseMixin, account_views.EmailView):\n pass\n\n\nclass DashboardAccountView(DashboardBaseMixin,\n socialaccount_views.ConnectionsView):\n pass\n\n\nclass DashboardProfileView(DashboardBaseMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n\n model = user_models.User\n template_name = \"euth_dashboard/profile_detail.html\"\n form_class = forms.ProfileForm\n success_message = _(\"Your profile was successfully updated.\")\n\n def get_object(self):\n return get_object_or_404(user_models.User, pk=self.request.user.id)\n\n def get_success_url(self):\n return self.request.path\n\n\nclass DashboardOrganisationUpdateView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = org_models.Organisation\n form_class = forms.OrganisationForm\n slug_url_kwarg = 'organisation_slug'\n template_name = 'euth_dashboard/organisation_form.html'\n success_message = _('Organisation successfully updated.')\n permission_required = 'euth_organisations.modify_organisation'\n\n def get_success_url(self):\n return self.request.path\n\n\nclass DashboardProjectListView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n generic.ListView):\n model = project_models.Project\n template_name = 'euth_dashboard/project_list.html'\n permission_required = 'euth_organisations.modify_organisation'\n\n def get_queryset(self):\n return self.model.objects.filter(\n organisation=self.organisation\n )\n\n def get_permission_object(self):\n return self.organisation\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated()\n\n def get_success_url(self):\n return reverse('dashboard-project-list')\n\n\nclass DashboardBlueprintListView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n generic.TemplateView):\n template_name = 'euth_dashboard/blueprint_list.html'\n blueprints = blueprints.blueprints\n permission_required = 'euth_organisations.initiate_project'\n\n\nclass DashboardProjectCreateView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n SuccessMessageMixin,\n blueprints.BlueprintMixin,\n generic.CreateView):\n model = project_models.Project\n form_class = forms.ProjectCreateForm\n template_name = 'euth_dashboard/project_form.html'\n success_message = _('Project succesfully created.')\n permission_required = 'euth_organisations.initiate_project'\n\n def get_permission_object(self):\n return self.organisation\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['blueprint'] = self.blueprint\n kwargs['organisation'] = self.organisation\n return kwargs\n\n def get_success_url(self):\n return reverse('dashboard-project-list',\n kwargs={\n 'organisation_slug': self.organisation.slug,\n })\n\n\nclass DashboardProjectUpdateView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = project_models.Project\n form_class = forms.ProjectCompleteForm\n template_name = 'euth_dashboard/project_form.html'\n success_message = _('Project successfully updated.')\n permission_required = 'euth_organisations.initiate_project'\n\n def get_permission_object(self):\n return self.organisation\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated()\n\n def get_success_url(self):\n return reverse('dashboard-project-edit',\n kwargs={\n 'organisation_slug': self.organisation.slug,\n 'slug': self.get_object().slug\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n qs = phase_models.Phase.objects.filter(module__project=self.object)\n kwargs['phases__queryset'] = qs\n return kwargs\n\n\nclass DashboardProjectInviteView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.FormView):\n form_class = forms.ProjectInviteForm\n template_name = 'euth_dashboard/project_invites.html'\n success_message = _(\"Invitations successfully sent.\")\n permission_required = 'euth_organisations.initiate_project'\n\n def get_permission_object(self):\n return self.organisation\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated()\n\n @functional.cached_property\n def project(self):\n return project_models.Project.objects.get(\n slug=self.kwargs['slug']\n )\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['project'] = self.project\n return kwargs\n\n def form_valid(self, form):\n emails = form.cleaned_data['emails']\n user = self.request.user\n project = self.project\n for (name, address) in emails:\n member_models.Invite.objects.invite(user, project, address)\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse('dashboard-project-users',\n kwargs={\n 'organisation_slug': self.organisation.slug,\n 'slug': self.project.slug\n })\n\n\nclass DashboardProjectUserView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.FormView):\n\n form_class = forms.ProjectUserForm\n template_name = 'euth_dashboard/project_users.html'\n success_message = _(\"User request successfully updated.\")\n permission_required = 'euth_organisations.initiate_project'\n\n def get_permission_object(self):\n return self.organisation\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n qs = member_models.Request.objects.order_by('created').filter(\n project__slug=self.kwargs['slug']\n )\n kwargs['requests__queryset'] = qs\n qs = member_models.Invite.objects.order_by('created').filter(\n project__slug=self.kwargs['slug']\n )\n kwargs['invites__queryset'] = qs\n qs = user_models.User.objects.order_by('email').filter(\n project_participant__slug=self.kwargs['slug']\n )\n kwargs['users__queryset'] = qs\n kwargs['project'] = self.project\n return kwargs\n\n @functional.cached_property\n def project(self):\n return project_models.Project.objects.get(\n slug=self.kwargs['slug']\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['project'] = self.project\n return context\n\n def get_success_url(self):\n return self.request.path\n\n def form_valid(self, form):\n form.save()\n return super().form_valid(form)\n", "path": "euth/dashboard/views.py"}], "after_files": [{"content": "from allauth.account import views as account_views\nfrom allauth.socialaccount import views as socialaccount_views\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import functional\nfrom django.utils.translation import ugettext as _\nfrom django.views import generic\nfrom rules.compat import access_mixins as mixins\nfrom rules.contrib import views as rules_views\n\nfrom euth.memberships import models as member_models\nfrom euth.organisations import models as org_models\nfrom euth.phases import models as phase_models\nfrom euth.projects import models as project_models\nfrom euth.users import models as user_models\n\nfrom . import blueprints, forms\n\n\ndef dashboard(request):\n return redirect('dashboard-profile')\n\n\nclass DashboardBaseMixin(mixins.LoginRequiredMixin,\n generic.base.ContextMixin,):\n\n @functional.cached_property\n def user_has_organisation(self):\n return bool(self.request.user.organisation_set.all())\n\n @functional.cached_property\n def organisation(self):\n if 'organisation_slug' in self.kwargs:\n slug = self.kwargs['organisation_slug']\n return get_object_or_404(org_models.Organisation, slug=slug)\n else:\n return self.request.user.organisation_set.first()\n\n @functional.cached_property\n def other_organisations_of_user(self):\n user = self.request.user\n return user.organisation_set.exclude(pk=self.organisation.pk)\n\n\nclass DashboardEmailView(DashboardBaseMixin, account_views.EmailView):\n pass\n\n\nclass DashboardAccountView(DashboardBaseMixin,\n socialaccount_views.ConnectionsView):\n pass\n\n\nclass DashboardProfileView(DashboardBaseMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n\n model = user_models.User\n template_name = \"euth_dashboard/profile_detail.html\"\n form_class = forms.ProfileForm\n success_message = _(\"Your profile was successfully updated.\")\n\n def get_object(self):\n return get_object_or_404(user_models.User, pk=self.request.user.id)\n\n def get_success_url(self):\n return self.request.path\n\n\nclass DashboardOrganisationUpdateView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = org_models.Organisation\n form_class = forms.OrganisationForm\n slug_url_kwarg = 'organisation_slug'\n template_name = 'euth_dashboard/organisation_form.html'\n success_message = _('Organisation successfully updated.')\n permission_required = 'euth_organisations.modify_organisation'\n\n def get_success_url(self):\n return self.request.path\n\n\nclass DashboardProjectListView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n generic.ListView):\n model = project_models.Project\n template_name = 'euth_dashboard/project_list.html'\n permission_required = 'euth_organisations.modify_organisation'\n\n def get_queryset(self):\n return self.model.objects.filter(\n organisation=self.organisation\n )\n\n def get_permission_object(self):\n return self.organisation\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated()\n\n def get_success_url(self):\n return reverse('dashboard-project-list')\n\n\nclass DashboardBlueprintListView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n generic.TemplateView):\n template_name = 'euth_dashboard/blueprint_list.html'\n blueprints = blueprints.blueprints\n permission_required = 'euth_organisations.initiate_project'\n\n\nclass DashboardProjectCreateView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n SuccessMessageMixin,\n blueprints.BlueprintMixin,\n generic.CreateView):\n model = project_models.Project\n form_class = forms.ProjectCreateForm\n template_name = 'euth_dashboard/project_form.html'\n success_message = _('Project succesfully created.')\n permission_required = 'euth_organisations.initiate_project'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['heading'] = _(\"New project based on\")\n return context\n\n def get_permission_object(self):\n return self.organisation\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['blueprint'] = self.blueprint\n kwargs['organisation'] = self.organisation\n return kwargs\n\n def get_success_url(self):\n return reverse('dashboard-project-list',\n kwargs={\n 'organisation_slug': self.organisation.slug,\n })\n\n\nclass DashboardProjectUpdateView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = project_models.Project\n form_class = forms.ProjectCompleteForm\n template_name = 'euth_dashboard/project_form.html'\n success_message = _('Project successfully updated.')\n permission_required = 'euth_organisations.initiate_project'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['heading'] = _(\"Update project: \" + self.object.name)\n return context\n\n def get_permission_object(self):\n return self.organisation\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated()\n\n def get_success_url(self):\n return reverse('dashboard-project-edit',\n kwargs={\n 'organisation_slug': self.organisation.slug,\n 'slug': self.get_object().slug\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n qs = phase_models.Phase.objects.filter(module__project=self.object)\n kwargs['phases__queryset'] = qs\n return kwargs\n\n\nclass DashboardProjectInviteView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.FormView):\n form_class = forms.ProjectInviteForm\n template_name = 'euth_dashboard/project_invites.html'\n success_message = _(\"Invitations successfully sent.\")\n permission_required = 'euth_organisations.initiate_project'\n\n def get_permission_object(self):\n return self.organisation\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated()\n\n @functional.cached_property\n def project(self):\n return project_models.Project.objects.get(\n slug=self.kwargs['slug']\n )\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['project'] = self.project\n return kwargs\n\n def form_valid(self, form):\n emails = form.cleaned_data['emails']\n user = self.request.user\n project = self.project\n for (name, address) in emails:\n member_models.Invite.objects.invite(user, project, address)\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse('dashboard-project-users',\n kwargs={\n 'organisation_slug': self.organisation.slug,\n 'slug': self.project.slug\n })\n\n\nclass DashboardProjectUserView(DashboardBaseMixin,\n rules_views.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.FormView):\n\n form_class = forms.ProjectUserForm\n template_name = 'euth_dashboard/project_users.html'\n success_message = _(\"User request successfully updated.\")\n permission_required = 'euth_organisations.initiate_project'\n\n def get_permission_object(self):\n return self.organisation\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n qs = member_models.Request.objects.order_by('created').filter(\n project__slug=self.kwargs['slug']\n )\n kwargs['requests__queryset'] = qs\n qs = member_models.Invite.objects.order_by('created').filter(\n project__slug=self.kwargs['slug']\n )\n kwargs['invites__queryset'] = qs\n qs = user_models.User.objects.order_by('email').filter(\n project_participant__slug=self.kwargs['slug']\n )\n kwargs['users__queryset'] = qs\n kwargs['project'] = self.project\n return kwargs\n\n @functional.cached_property\n def project(self):\n return project_models.Project.objects.get(\n slug=self.kwargs['slug']\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['project'] = self.project\n return context\n\n def get_success_url(self):\n return self.request.path\n\n def form_valid(self, form):\n form.save()\n return super().form_valid(form)\n", "path": "euth/dashboard/views.py"}]} | 2,890 | 224 |
gh_patches_debug_14881 | rasdani/github-patches | git_diff | kivy__kivy-4045 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WM touch/pen warning messages after 1.9.1
```
[WARNING ] [Input ] WM_Touch/WM_Pen not supported by your version of Windows
[WARNING ] [Base ] Unknown <wm_touch> provider
[WARNING ] [Base ] Unknown <wm_pen> provider
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/input/providers/wm_touch.py`
Content:
```
1 '''
2 Support for WM_TOUCH messages (Windows platform)
3 ================================================
4 '''
5
6 __all__ = ('WM_MotionEventProvider', 'WM_MotionEvent')
7
8 import os
9 from kivy.input.providers.wm_common import (
10 WM_TABLET_QUERYSYSTEMGESTURE,
11 GWL_WNDPROC, QUERYSYSTEMGESTURE_WNDPROC, WM_TOUCH, WM_MOUSEMOVE,
12 WM_MOUSELAST, PEN_OR_TOUCH_MASK, PEN_OR_TOUCH_SIGNATURE,
13 PEN_EVENT_TOUCH_MASK, TOUCHEVENTF_UP, TOUCHEVENTF_DOWN,
14 TOUCHEVENTF_MOVE, SM_CYCAPTION)
15 from kivy.input.motionevent import MotionEvent
16 from kivy.input.shape import ShapeRect
17 from kivy.core.window import Window
18
19
20 class WM_MotionEvent(MotionEvent):
21 '''MotionEvent representing the WM_MotionEvent event.
22 Supports pos, shape and size profiles.
23 '''
24 __attrs__ = ('size', )
25
26 def depack(self, args):
27 self.is_touch = True
28 self.shape = ShapeRect()
29 self.sx, self.sy = args[0], args[1]
30 self.shape.width = args[2][0]
31 self.shape.height = args[2][1]
32 self.size = self.shape.width * self.shape.height
33 self.profile = ('pos', 'shape', 'size')
34
35 super(WM_MotionEvent, self).depack(args)
36
37 def __str__(self):
38 args = (self.id, self.uid, str(self.spos), self.device)
39 return '<WMMotionEvent id:%d uid:%d pos:%s device:%s>' % args
40
41 if 'KIVY_DOC' in os.environ:
42 # documentation hack
43 WM_MotionEventProvider = None
44
45 else:
46 from ctypes.wintypes import (ULONG, HANDLE, DWORD, LONG, UINT,
47 WPARAM, LPARAM, BOOL)
48 from ctypes import (windll, WINFUNCTYPE, POINTER,
49 c_int, Structure, sizeof, byref)
50 from collections import deque
51 from kivy.input.provider import MotionEventProvider
52 from kivy.input.factory import MotionEventFactory
53
54 # check availability of RegisterTouchWindow
55 if not hasattr(windll.user32, 'RegisterTouchWindow'):
56 raise Exception('Unsupported Window version')
57
58 LRESULT = LPARAM
59 WNDPROC = WINFUNCTYPE(LRESULT, HANDLE, UINT, WPARAM, LPARAM)
60
61 class TOUCHINPUT(Structure):
62 _fields_ = [
63 ('x', LONG),
64 ('y', LONG),
65 ('pSource', HANDLE),
66 ('id', DWORD),
67 ('flags', DWORD),
68 ('mask', DWORD),
69 ('time', DWORD),
70 ('extraInfo', POINTER(ULONG)),
71 ('size_x', DWORD),
72 ('size_y', DWORD)]
73
74 def size(self):
75 return (self.size_x, self.size_y)
76
77 def screen_x(self):
78 return self.x / 100.0
79
80 def screen_y(self):
81 return self.y / 100.0
82
83 def _event_type(self):
84 if self.flags & TOUCHEVENTF_MOVE:
85 return 'update'
86 if self.flags & TOUCHEVENTF_DOWN:
87 return 'begin'
88 if self.flags & TOUCHEVENTF_UP:
89 return 'end'
90 event_type = property(_event_type)
91
92 class RECT(Structure):
93 _fields_ = [
94 ('left', LONG),
95 ('top', LONG),
96 ('right', LONG),
97 ('bottom', LONG)]
98
99 x = property(lambda self: self.left)
100 y = property(lambda self: self.top)
101 w = property(lambda self: self.right - self.left)
102 h = property(lambda self: self.bottom - self.top)
103
104 try:
105 windll.user32.SetWindowLongPtrW.restype = WNDPROC
106 windll.user32.SetWindowLongPtrW.argtypes = [HANDLE, c_int, WNDPROC]
107 SetWindowLong_wrapper = windll.user32.SetWindowLongPtrW
108 except AttributeError:
109 windll.user32.SetWindowLongW.restype = WNDPROC
110 windll.user32.SetWindowLongW.argtypes = [HANDLE, c_int, WNDPROC]
111 SetWindowLong_wrapper = windll.user32.SetWindowLongW
112
113 windll.user32.GetMessageExtraInfo.restype = LPARAM
114 windll.user32.GetMessageExtraInfo.argtypes = []
115 windll.user32.GetClientRect.restype = BOOL
116 windll.user32.GetClientRect.argtypes = [HANDLE, POINTER(RECT)]
117 windll.user32.GetWindowRect.restype = BOOL
118 windll.user32.GetWindowRect.argtypes = [HANDLE, POINTER(RECT)]
119 windll.user32.CallWindowProcW.restype = LRESULT
120 windll.user32.CallWindowProcW.argtypes = [WNDPROC, HANDLE, UINT, WPARAM,
121 LPARAM]
122 windll.user32.GetActiveWindow.restype = HANDLE
123 windll.user32.GetActiveWindow.argtypes = []
124 windll.user32.RegisterTouchWindow.restype = BOOL
125 windll.user32.RegisterTouchWindow.argtypes = [HANDLE, ULONG]
126 windll.user32.UnregisterTouchWindow.restype = BOOL
127 windll.user32.UnregisterTouchWindow.argtypes = [HANDLE]
128 windll.user32.GetTouchInputInfo.restype = BOOL
129 windll.user32.GetTouchInputInfo.argtypes = [HANDLE, UINT,
130 POINTER(TOUCHINPUT), c_int]
131 windll.user32.GetSystemMetrics.restype = c_int
132 windll.user32.GetSystemMetrics.argtypes = [c_int]
133
134 class WM_MotionEventProvider(MotionEventProvider):
135
136 def start(self):
137 self.touch_events = deque()
138 self.touches = {}
139 self.uid = 0
140
141 # get window handle, and register to recive WM_TOUCH messages
142 self.hwnd = windll.user32.GetActiveWindow()
143 windll.user32.RegisterTouchWindow(self.hwnd, 1)
144
145 # inject our own wndProc to handle messages
146 # before window manager does
147 self.new_windProc = WNDPROC(self._touch_wndProc)
148 self.old_windProc = SetWindowLong_wrapper(
149 self.hwnd, GWL_WNDPROC, self.new_windProc)
150
151 if Window.borderless or Window.fullscreen:
152 self.caption_size = 0
153 else:
154 self.caption_size = windll.user32.GetSystemMetrics(SM_CYCAPTION)
155
156 def update(self, dispatch_fn):
157 win_rect = RECT()
158 windll.user32.GetWindowRect(self.hwnd, byref(win_rect))
159 caption = self.caption_size
160
161 while True:
162 try:
163 t = self.touch_events.pop()
164 except:
165 break
166
167 # adjust x,y to window coordinates (0.0 to 1.0)
168 x = (t.screen_x() - win_rect.x) / float(win_rect.w)
169 y = 1.0 - (t.screen_y() - win_rect.y - caption
170 ) / float(win_rect.h)
171
172 # actually dispatch input
173 if t.event_type == 'begin':
174 self.uid += 1
175 self.touches[t.id] = WM_MotionEvent(
176 self.device, self.uid, [x, y, t.size()])
177 dispatch_fn('begin', self.touches[t.id])
178
179 if t.event_type == 'update' and t.id in self.touches:
180 self.touches[t.id].move([x, y, t.size()])
181 dispatch_fn('update', self.touches[t.id])
182
183 if t.event_type == 'end' and t.id in self.touches:
184 touch = self.touches[t.id]
185 touch.move([x, y, t.size()])
186 touch.update_time_end()
187 dispatch_fn('end', touch)
188 del self.touches[t.id]
189
190 def stop(self):
191 windll.user32.UnregisterTouchWindow(self.hwnd)
192 self.new_windProc = SetWindowLong_wrapper(
193 self.hwnd, GWL_WNDPROC, self.old_windProc)
194
195 # we inject this wndProc into our main window, to process
196 # WM_TOUCH and mouse messages before the window manager does
197 def _touch_wndProc(self, hwnd, msg, wParam, lParam):
198 done = False
199 if msg == WM_TABLET_QUERYSYSTEMGESTURE:
200 return QUERYSYSTEMGESTURE_WNDPROC
201
202 if msg == WM_TOUCH:
203 done = self._touch_handler(msg, wParam, lParam)
204
205 if msg >= WM_MOUSEMOVE and msg <= WM_MOUSELAST:
206 done = self._mouse_handler(msg, wParam, lParam)
207
208 if not done:
209 return windll.user32.CallWindowProcW(self.old_windProc,
210 hwnd, msg, wParam,
211 lParam)
212 return 1
213
214 # this on pushes WM_TOUCH messages onto our event stack
215 def _touch_handler(self, msg, wParam, lParam):
216 touches = (TOUCHINPUT * wParam)()
217 windll.user32.GetTouchInputInfo(HANDLE(lParam),
218 wParam,
219 touches,
220 sizeof(TOUCHINPUT))
221 for i in range(wParam):
222 self.touch_events.appendleft(touches[i])
223 windll.user32.CloseTouchInputHandle(HANDLE(lParam))
224 return True
225
226 # filter fake mouse events, because touch and stylus
227 # also make mouse events
228 def _mouse_handler(self, msg, wparam, lParam):
229 info = windll.user32.GetMessageExtraInfo()
230 # its a touch or a pen
231 if (info & PEN_OR_TOUCH_MASK) == PEN_OR_TOUCH_SIGNATURE:
232 if info & PEN_EVENT_TOUCH_MASK:
233 return True
234
235 MotionEventFactory.register('wm_touch', WM_MotionEventProvider)
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/input/providers/wm_touch.py b/kivy/input/providers/wm_touch.py
--- a/kivy/input/providers/wm_touch.py
+++ b/kivy/input/providers/wm_touch.py
@@ -14,7 +14,8 @@
TOUCHEVENTF_MOVE, SM_CYCAPTION)
from kivy.input.motionevent import MotionEvent
from kivy.input.shape import ShapeRect
-from kivy.core.window import Window
+
+Window = None
class WM_MotionEvent(MotionEvent):
@@ -134,6 +135,10 @@
class WM_MotionEventProvider(MotionEventProvider):
def start(self):
+ global Window
+ if not Window:
+ from kivy.core.window import Window
+
self.touch_events = deque()
self.touches = {}
self.uid = 0
| {"golden_diff": "diff --git a/kivy/input/providers/wm_touch.py b/kivy/input/providers/wm_touch.py\n--- a/kivy/input/providers/wm_touch.py\n+++ b/kivy/input/providers/wm_touch.py\n@@ -14,7 +14,8 @@\n TOUCHEVENTF_MOVE, SM_CYCAPTION)\n from kivy.input.motionevent import MotionEvent\n from kivy.input.shape import ShapeRect\n-from kivy.core.window import Window\n+\n+Window = None\n \n \n class WM_MotionEvent(MotionEvent):\n@@ -134,6 +135,10 @@\n class WM_MotionEventProvider(MotionEventProvider):\n \n def start(self):\n+ global Window\n+ if not Window:\n+ from kivy.core.window import Window\n+\n self.touch_events = deque()\n self.touches = {}\n self.uid = 0\n", "issue": "WM touch/pen warning messages after 1.9.1\n```\n[WARNING ] [Input ] WM_Touch/WM_Pen not supported by your version of Windows\n[WARNING ] [Base ] Unknown <wm_touch> provider\n[WARNING ] [Base ] Unknown <wm_pen> provider\n```\n\n", "before_files": [{"content": "'''\nSupport for WM_TOUCH messages (Windows platform)\n================================================\n'''\n\n__all__ = ('WM_MotionEventProvider', 'WM_MotionEvent')\n\nimport os\nfrom kivy.input.providers.wm_common import (\n WM_TABLET_QUERYSYSTEMGESTURE,\n GWL_WNDPROC, QUERYSYSTEMGESTURE_WNDPROC, WM_TOUCH, WM_MOUSEMOVE,\n WM_MOUSELAST, PEN_OR_TOUCH_MASK, PEN_OR_TOUCH_SIGNATURE,\n PEN_EVENT_TOUCH_MASK, TOUCHEVENTF_UP, TOUCHEVENTF_DOWN,\n TOUCHEVENTF_MOVE, SM_CYCAPTION)\nfrom kivy.input.motionevent import MotionEvent\nfrom kivy.input.shape import ShapeRect\nfrom kivy.core.window import Window\n\n\nclass WM_MotionEvent(MotionEvent):\n '''MotionEvent representing the WM_MotionEvent event.\n Supports pos, shape and size profiles.\n '''\n __attrs__ = ('size', )\n\n def depack(self, args):\n self.is_touch = True\n self.shape = ShapeRect()\n self.sx, self.sy = args[0], args[1]\n self.shape.width = args[2][0]\n self.shape.height = args[2][1]\n self.size = self.shape.width * self.shape.height\n self.profile = ('pos', 'shape', 'size')\n\n super(WM_MotionEvent, self).depack(args)\n\n def __str__(self):\n args = (self.id, self.uid, str(self.spos), self.device)\n return '<WMMotionEvent id:%d uid:%d pos:%s device:%s>' % args\n\nif 'KIVY_DOC' in os.environ:\n # documentation hack\n WM_MotionEventProvider = None\n\nelse:\n from ctypes.wintypes import (ULONG, HANDLE, DWORD, LONG, UINT,\n WPARAM, LPARAM, BOOL)\n from ctypes import (windll, WINFUNCTYPE, POINTER,\n c_int, Structure, sizeof, byref)\n from collections import deque\n from kivy.input.provider import MotionEventProvider\n from kivy.input.factory import MotionEventFactory\n\n # check availability of RegisterTouchWindow\n if not hasattr(windll.user32, 'RegisterTouchWindow'):\n raise Exception('Unsupported Window version')\n\n LRESULT = LPARAM\n WNDPROC = WINFUNCTYPE(LRESULT, HANDLE, UINT, WPARAM, LPARAM)\n\n class TOUCHINPUT(Structure):\n _fields_ = [\n ('x', LONG),\n ('y', LONG),\n ('pSource', HANDLE),\n ('id', DWORD),\n ('flags', DWORD),\n ('mask', DWORD),\n ('time', DWORD),\n ('extraInfo', POINTER(ULONG)),\n ('size_x', DWORD),\n ('size_y', DWORD)]\n\n def size(self):\n return (self.size_x, self.size_y)\n\n def screen_x(self):\n return self.x / 100.0\n\n def screen_y(self):\n return self.y / 100.0\n\n def _event_type(self):\n if self.flags & TOUCHEVENTF_MOVE:\n return 'update'\n if self.flags & TOUCHEVENTF_DOWN:\n return 'begin'\n if self.flags & TOUCHEVENTF_UP:\n return 'end'\n event_type = property(_event_type)\n\n class RECT(Structure):\n _fields_ = [\n ('left', LONG),\n ('top', LONG),\n ('right', LONG),\n ('bottom', LONG)]\n\n x = property(lambda self: self.left)\n y = property(lambda self: self.top)\n w = property(lambda self: self.right - self.left)\n h = property(lambda self: self.bottom - self.top)\n\n try:\n windll.user32.SetWindowLongPtrW.restype = WNDPROC\n windll.user32.SetWindowLongPtrW.argtypes = [HANDLE, c_int, WNDPROC]\n SetWindowLong_wrapper = windll.user32.SetWindowLongPtrW\n except AttributeError:\n windll.user32.SetWindowLongW.restype = WNDPROC\n windll.user32.SetWindowLongW.argtypes = [HANDLE, c_int, WNDPROC]\n SetWindowLong_wrapper = windll.user32.SetWindowLongW\n\n windll.user32.GetMessageExtraInfo.restype = LPARAM\n windll.user32.GetMessageExtraInfo.argtypes = []\n windll.user32.GetClientRect.restype = BOOL\n windll.user32.GetClientRect.argtypes = [HANDLE, POINTER(RECT)]\n windll.user32.GetWindowRect.restype = BOOL\n windll.user32.GetWindowRect.argtypes = [HANDLE, POINTER(RECT)]\n windll.user32.CallWindowProcW.restype = LRESULT\n windll.user32.CallWindowProcW.argtypes = [WNDPROC, HANDLE, UINT, WPARAM,\n LPARAM]\n windll.user32.GetActiveWindow.restype = HANDLE\n windll.user32.GetActiveWindow.argtypes = []\n windll.user32.RegisterTouchWindow.restype = BOOL\n windll.user32.RegisterTouchWindow.argtypes = [HANDLE, ULONG]\n windll.user32.UnregisterTouchWindow.restype = BOOL\n windll.user32.UnregisterTouchWindow.argtypes = [HANDLE]\n windll.user32.GetTouchInputInfo.restype = BOOL\n windll.user32.GetTouchInputInfo.argtypes = [HANDLE, UINT,\n POINTER(TOUCHINPUT), c_int]\n windll.user32.GetSystemMetrics.restype = c_int\n windll.user32.GetSystemMetrics.argtypes = [c_int]\n\n class WM_MotionEventProvider(MotionEventProvider):\n\n def start(self):\n self.touch_events = deque()\n self.touches = {}\n self.uid = 0\n\n # get window handle, and register to recive WM_TOUCH messages\n self.hwnd = windll.user32.GetActiveWindow()\n windll.user32.RegisterTouchWindow(self.hwnd, 1)\n\n # inject our own wndProc to handle messages\n # before window manager does\n self.new_windProc = WNDPROC(self._touch_wndProc)\n self.old_windProc = SetWindowLong_wrapper(\n self.hwnd, GWL_WNDPROC, self.new_windProc)\n\n if Window.borderless or Window.fullscreen:\n self.caption_size = 0\n else:\n self.caption_size = windll.user32.GetSystemMetrics(SM_CYCAPTION)\n\n def update(self, dispatch_fn):\n win_rect = RECT()\n windll.user32.GetWindowRect(self.hwnd, byref(win_rect))\n caption = self.caption_size\n\n while True:\n try:\n t = self.touch_events.pop()\n except:\n break\n\n # adjust x,y to window coordinates (0.0 to 1.0)\n x = (t.screen_x() - win_rect.x) / float(win_rect.w)\n y = 1.0 - (t.screen_y() - win_rect.y - caption\n ) / float(win_rect.h)\n\n # actually dispatch input\n if t.event_type == 'begin':\n self.uid += 1\n self.touches[t.id] = WM_MotionEvent(\n self.device, self.uid, [x, y, t.size()])\n dispatch_fn('begin', self.touches[t.id])\n\n if t.event_type == 'update' and t.id in self.touches:\n self.touches[t.id].move([x, y, t.size()])\n dispatch_fn('update', self.touches[t.id])\n\n if t.event_type == 'end' and t.id in self.touches:\n touch = self.touches[t.id]\n touch.move([x, y, t.size()])\n touch.update_time_end()\n dispatch_fn('end', touch)\n del self.touches[t.id]\n\n def stop(self):\n windll.user32.UnregisterTouchWindow(self.hwnd)\n self.new_windProc = SetWindowLong_wrapper(\n self.hwnd, GWL_WNDPROC, self.old_windProc)\n\n # we inject this wndProc into our main window, to process\n # WM_TOUCH and mouse messages before the window manager does\n def _touch_wndProc(self, hwnd, msg, wParam, lParam):\n done = False\n if msg == WM_TABLET_QUERYSYSTEMGESTURE:\n return QUERYSYSTEMGESTURE_WNDPROC\n\n if msg == WM_TOUCH:\n done = self._touch_handler(msg, wParam, lParam)\n\n if msg >= WM_MOUSEMOVE and msg <= WM_MOUSELAST:\n done = self._mouse_handler(msg, wParam, lParam)\n\n if not done:\n return windll.user32.CallWindowProcW(self.old_windProc,\n hwnd, msg, wParam,\n lParam)\n return 1\n\n # this on pushes WM_TOUCH messages onto our event stack\n def _touch_handler(self, msg, wParam, lParam):\n touches = (TOUCHINPUT * wParam)()\n windll.user32.GetTouchInputInfo(HANDLE(lParam),\n wParam,\n touches,\n sizeof(TOUCHINPUT))\n for i in range(wParam):\n self.touch_events.appendleft(touches[i])\n windll.user32.CloseTouchInputHandle(HANDLE(lParam))\n return True\n\n # filter fake mouse events, because touch and stylus\n # also make mouse events\n def _mouse_handler(self, msg, wparam, lParam):\n info = windll.user32.GetMessageExtraInfo()\n # its a touch or a pen\n if (info & PEN_OR_TOUCH_MASK) == PEN_OR_TOUCH_SIGNATURE:\n if info & PEN_EVENT_TOUCH_MASK:\n return True\n\n MotionEventFactory.register('wm_touch', WM_MotionEventProvider)\n", "path": "kivy/input/providers/wm_touch.py"}], "after_files": [{"content": "'''\nSupport for WM_TOUCH messages (Windows platform)\n================================================\n'''\n\n__all__ = ('WM_MotionEventProvider', 'WM_MotionEvent')\n\nimport os\nfrom kivy.input.providers.wm_common import (\n WM_TABLET_QUERYSYSTEMGESTURE,\n GWL_WNDPROC, QUERYSYSTEMGESTURE_WNDPROC, WM_TOUCH, WM_MOUSEMOVE,\n WM_MOUSELAST, PEN_OR_TOUCH_MASK, PEN_OR_TOUCH_SIGNATURE,\n PEN_EVENT_TOUCH_MASK, TOUCHEVENTF_UP, TOUCHEVENTF_DOWN,\n TOUCHEVENTF_MOVE, SM_CYCAPTION)\nfrom kivy.input.motionevent import MotionEvent\nfrom kivy.input.shape import ShapeRect\n\nWindow = None\n\n\nclass WM_MotionEvent(MotionEvent):\n '''MotionEvent representing the WM_MotionEvent event.\n Supports pos, shape and size profiles.\n '''\n __attrs__ = ('size', )\n\n def depack(self, args):\n self.is_touch = True\n self.shape = ShapeRect()\n self.sx, self.sy = args[0], args[1]\n self.shape.width = args[2][0]\n self.shape.height = args[2][1]\n self.size = self.shape.width * self.shape.height\n self.profile = ('pos', 'shape', 'size')\n\n super(WM_MotionEvent, self).depack(args)\n\n def __str__(self):\n args = (self.id, self.uid, str(self.spos), self.device)\n return '<WMMotionEvent id:%d uid:%d pos:%s device:%s>' % args\n\nif 'KIVY_DOC' in os.environ:\n # documentation hack\n WM_MotionEventProvider = None\n\nelse:\n from ctypes.wintypes import (ULONG, HANDLE, DWORD, LONG, UINT,\n WPARAM, LPARAM, BOOL)\n from ctypes import (windll, WINFUNCTYPE, POINTER,\n c_int, Structure, sizeof, byref)\n from collections import deque\n from kivy.input.provider import MotionEventProvider\n from kivy.input.factory import MotionEventFactory\n\n # check availability of RegisterTouchWindow\n if not hasattr(windll.user32, 'RegisterTouchWindow'):\n raise Exception('Unsupported Window version')\n\n LRESULT = LPARAM\n WNDPROC = WINFUNCTYPE(LRESULT, HANDLE, UINT, WPARAM, LPARAM)\n\n class TOUCHINPUT(Structure):\n _fields_ = [\n ('x', LONG),\n ('y', LONG),\n ('pSource', HANDLE),\n ('id', DWORD),\n ('flags', DWORD),\n ('mask', DWORD),\n ('time', DWORD),\n ('extraInfo', POINTER(ULONG)),\n ('size_x', DWORD),\n ('size_y', DWORD)]\n\n def size(self):\n return (self.size_x, self.size_y)\n\n def screen_x(self):\n return self.x / 100.0\n\n def screen_y(self):\n return self.y / 100.0\n\n def _event_type(self):\n if self.flags & TOUCHEVENTF_MOVE:\n return 'update'\n if self.flags & TOUCHEVENTF_DOWN:\n return 'begin'\n if self.flags & TOUCHEVENTF_UP:\n return 'end'\n event_type = property(_event_type)\n\n class RECT(Structure):\n _fields_ = [\n ('left', LONG),\n ('top', LONG),\n ('right', LONG),\n ('bottom', LONG)]\n\n x = property(lambda self: self.left)\n y = property(lambda self: self.top)\n w = property(lambda self: self.right - self.left)\n h = property(lambda self: self.bottom - self.top)\n\n try:\n windll.user32.SetWindowLongPtrW.restype = WNDPROC\n windll.user32.SetWindowLongPtrW.argtypes = [HANDLE, c_int, WNDPROC]\n SetWindowLong_wrapper = windll.user32.SetWindowLongPtrW\n except AttributeError:\n windll.user32.SetWindowLongW.restype = WNDPROC\n windll.user32.SetWindowLongW.argtypes = [HANDLE, c_int, WNDPROC]\n SetWindowLong_wrapper = windll.user32.SetWindowLongW\n\n windll.user32.GetMessageExtraInfo.restype = LPARAM\n windll.user32.GetMessageExtraInfo.argtypes = []\n windll.user32.GetClientRect.restype = BOOL\n windll.user32.GetClientRect.argtypes = [HANDLE, POINTER(RECT)]\n windll.user32.GetWindowRect.restype = BOOL\n windll.user32.GetWindowRect.argtypes = [HANDLE, POINTER(RECT)]\n windll.user32.CallWindowProcW.restype = LRESULT\n windll.user32.CallWindowProcW.argtypes = [WNDPROC, HANDLE, UINT, WPARAM,\n LPARAM]\n windll.user32.GetActiveWindow.restype = HANDLE\n windll.user32.GetActiveWindow.argtypes = []\n windll.user32.RegisterTouchWindow.restype = BOOL\n windll.user32.RegisterTouchWindow.argtypes = [HANDLE, ULONG]\n windll.user32.UnregisterTouchWindow.restype = BOOL\n windll.user32.UnregisterTouchWindow.argtypes = [HANDLE]\n windll.user32.GetTouchInputInfo.restype = BOOL\n windll.user32.GetTouchInputInfo.argtypes = [HANDLE, UINT,\n POINTER(TOUCHINPUT), c_int]\n windll.user32.GetSystemMetrics.restype = c_int\n windll.user32.GetSystemMetrics.argtypes = [c_int]\n\n class WM_MotionEventProvider(MotionEventProvider):\n\n def start(self):\n global Window\n if not Window:\n from kivy.core.window import Window\n\n self.touch_events = deque()\n self.touches = {}\n self.uid = 0\n\n # get window handle, and register to recive WM_TOUCH messages\n self.hwnd = windll.user32.GetActiveWindow()\n windll.user32.RegisterTouchWindow(self.hwnd, 1)\n\n # inject our own wndProc to handle messages\n # before window manager does\n self.new_windProc = WNDPROC(self._touch_wndProc)\n self.old_windProc = SetWindowLong_wrapper(\n self.hwnd, GWL_WNDPROC, self.new_windProc)\n\n if Window.borderless or Window.fullscreen:\n self.caption_size = 0\n else:\n self.caption_size = windll.user32.GetSystemMetrics(SM_CYCAPTION)\n\n def update(self, dispatch_fn):\n win_rect = RECT()\n windll.user32.GetWindowRect(self.hwnd, byref(win_rect))\n caption = self.caption_size\n\n while True:\n try:\n t = self.touch_events.pop()\n except:\n break\n\n # adjust x,y to window coordinates (0.0 to 1.0)\n x = (t.screen_x() - win_rect.x) / float(win_rect.w)\n y = 1.0 - (t.screen_y() - win_rect.y - caption\n ) / float(win_rect.h)\n\n # actually dispatch input\n if t.event_type == 'begin':\n self.uid += 1\n self.touches[t.id] = WM_MotionEvent(\n self.device, self.uid, [x, y, t.size()])\n dispatch_fn('begin', self.touches[t.id])\n\n if t.event_type == 'update' and t.id in self.touches:\n self.touches[t.id].move([x, y, t.size()])\n dispatch_fn('update', self.touches[t.id])\n\n if t.event_type == 'end' and t.id in self.touches:\n touch = self.touches[t.id]\n touch.move([x, y, t.size()])\n touch.update_time_end()\n dispatch_fn('end', touch)\n del self.touches[t.id]\n\n def stop(self):\n windll.user32.UnregisterTouchWindow(self.hwnd)\n self.new_windProc = SetWindowLong_wrapper(\n self.hwnd, GWL_WNDPROC, self.old_windProc)\n\n # we inject this wndProc into our main window, to process\n # WM_TOUCH and mouse messages before the window manager does\n def _touch_wndProc(self, hwnd, msg, wParam, lParam):\n done = False\n if msg == WM_TABLET_QUERYSYSTEMGESTURE:\n return QUERYSYSTEMGESTURE_WNDPROC\n\n if msg == WM_TOUCH:\n done = self._touch_handler(msg, wParam, lParam)\n\n if msg >= WM_MOUSEMOVE and msg <= WM_MOUSELAST:\n done = self._mouse_handler(msg, wParam, lParam)\n\n if not done:\n return windll.user32.CallWindowProcW(self.old_windProc,\n hwnd, msg, wParam,\n lParam)\n return 1\n\n # this on pushes WM_TOUCH messages onto our event stack\n def _touch_handler(self, msg, wParam, lParam):\n touches = (TOUCHINPUT * wParam)()\n windll.user32.GetTouchInputInfo(HANDLE(lParam),\n wParam,\n touches,\n sizeof(TOUCHINPUT))\n for i in range(wParam):\n self.touch_events.appendleft(touches[i])\n windll.user32.CloseTouchInputHandle(HANDLE(lParam))\n return True\n\n # filter fake mouse events, because touch and stylus\n # also make mouse events\n def _mouse_handler(self, msg, wparam, lParam):\n info = windll.user32.GetMessageExtraInfo()\n # its a touch or a pen\n if (info & PEN_OR_TOUCH_MASK) == PEN_OR_TOUCH_SIGNATURE:\n if info & PEN_EVENT_TOUCH_MASK:\n return True\n\n MotionEventFactory.register('wm_touch', WM_MotionEventProvider)\n", "path": "kivy/input/providers/wm_touch.py"}]} | 3,030 | 181 |
gh_patches_debug_16575 | rasdani/github-patches | git_diff | deepchecks__deepchecks-1050 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG][CV] deepchecks should either validate model.eval status or actively create it
**Describe the bug**
Checks can crash or give wrong results if models are set by mistake to training mode.
**To Reproduce**
Run metrics check with model.train() preceding it.
**Expected behavior**
Either we validate that the model is in the correct state, or we actively call model.eval()
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/vision/context.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module for base vision context."""
12 import logging
13 from typing import Mapping, Union, Iterable, Any, Tuple
14
15 import torch
16 from torch import nn
17 from ignite.metrics import Metric
18
19 from deepchecks.core import DatasetKind
20 from deepchecks.vision.vision_data import VisionData, TaskType
21 from deepchecks.vision.utils.validation import apply_to_tensor
22 from deepchecks.core.errors import (
23 DatasetValidationError, DeepchecksNotImplementedError, ModelValidationError,
24 DeepchecksNotSupportedError, DeepchecksValueError
25 )
26
27
28 __all__ = ['Context']
29
30
31 logger = logging.getLogger('deepchecks')
32
33
34 class Batch:
35 """Represents dataset batch returned by the dataloader during iteration."""
36
37 def __init__(
38 self,
39 batch: Tuple[Iterable[Any], Iterable[Any]],
40 context: 'Context',
41 dataset_kind: DatasetKind
42 ):
43 self._context = context
44 self._dataset_kind = dataset_kind
45 self._batch = apply_to_tensor(batch, lambda it: it.to(self._context.device))
46 self._labels = None
47 self._predictions = None
48 self._images = None
49
50 @property
51 def labels(self):
52 if self._labels is None:
53 dataset = self._context.get_data_by_kind(self._dataset_kind)
54 self._labels = dataset.batch_to_labels(self._batch)
55 return self._labels
56
57 @property
58 def predictions(self):
59 if self._predictions is None:
60 dataset = self._context.get_data_by_kind(self._dataset_kind)
61 self._predictions = dataset.infer_on_batch(self._batch, self._context.model, self._context.device)
62 return self._predictions
63
64 @property
65 def images(self):
66 if self._images is None:
67 dataset = self._context.get_data_by_kind(self._dataset_kind)
68 self._images = dataset.batch_to_images(self._batch)
69 return self._images
70
71 def __getitem__(self, index):
72 return self._batch[index]
73
74
75 class Context:
76 """Contains all the data + properties the user has passed to a check/suite, and validates it seamlessly.
77
78 Parameters
79 ----------
80 train : VisionData , default: None
81 Dataset or DataFrame object, representing data an estimator was fitted on
82 test : VisionData , default: None
83 Dataset or DataFrame object, representing data an estimator predicts on
84 model : BasicModel , default: None
85 A scikit-learn-compatible fitted estimator instance
86 model_name: str , default: ''
87 The name of the model
88 scorers : Mapping[str, Metric] , default: None
89 dict of scorers names to a Metric
90 scorers_per_class : Mapping[str, Metric] , default: None
91 dict of scorers for classification without averaging of the classes.
92 See <a href=
93 "https://scikit-learn.org/stable/modules/model_evaluation.html#from-binary-to-multiclass-and-multilabel">
94 scikit-learn docs</a>
95 device : Union[str, torch.device], default: 'cpu'
96 processing unit for use
97 random_state : int
98 A seed to set for pseudo-random functions
99 n_samples : int, default: None
100 """
101
102 def __init__(self,
103 train: VisionData = None,
104 test: VisionData = None,
105 model: nn.Module = None,
106 model_name: str = '',
107 scorers: Mapping[str, Metric] = None,
108 scorers_per_class: Mapping[str, Metric] = None,
109 device: Union[str, torch.device, None] = 'cpu',
110 random_state: int = 42,
111 n_samples: int = None
112 ):
113 # Validations
114 if train is None and test is None and model is None:
115 raise DeepchecksValueError('At least one dataset (or model) must be passed to the method!')
116 if test and not train:
117 raise DatasetValidationError('Can\'t initialize context with only test. if you have single dataset, '
118 'initialize it as train')
119 if train and test:
120 train.validate_shared_label(test)
121
122 self._device = torch.device(device) if isinstance(device, str) else (device if device else torch.device('cpu'))
123
124 if model is not None:
125 for dataset, dataset_type in zip([train, test], ['train', 'test']):
126 if dataset is not None:
127 try:
128 dataset.validate_prediction(next(iter(dataset.data_loader)), model, self._device)
129 except DeepchecksNotImplementedError:
130 logger.warning('validate_prediction() was not implemented in %s dataset, '
131 'some checks will not run', dataset_type)
132
133 # The copy does 2 things: Sample n_samples if parameter exists, and shuffle the data.
134 # we shuffle because the data in VisionData is set to be sampled in a fixed order (in the init), so if the user
135 # wants to run without random_state we need to forcefully shuffle (to have different results on different runs
136 # from the same VisionData object), and if there is a random_state the shuffle will always have same result
137 if train:
138 train = train.copy(shuffle=True, n_samples=n_samples, random_state=random_state)
139 if test:
140 test = test.copy(shuffle=True, n_samples=n_samples, random_state=random_state)
141
142 self._train = train
143 self._test = test
144 self._model = model
145 self._user_scorers = scorers
146 self._user_scorers_per_class = scorers_per_class
147 self._model_name = model_name
148 self.random_state = random_state
149
150 # Properties
151 # Validations note: We know train & test fit each other so all validations can be run only on train
152
153 @property
154 def train(self) -> VisionData:
155 """Return train if exists, otherwise raise error."""
156 if self._train is None:
157 raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without train dataset')
158 return self._train
159
160 @property
161 def test(self) -> VisionData:
162 """Return test if exists, otherwise raise error."""
163 if self._test is None:
164 raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without test dataset')
165 return self._test
166
167 @property
168 def model(self) -> nn.Module:
169 """Return & validate model if model exists, otherwise raise error."""
170 if self._model is None:
171 raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without model')
172 return self._model
173
174 @property
175 def model_name(self):
176 """Return model name."""
177 return self._model_name
178
179 @property
180 def device(self) -> torch.device:
181 """Return device specified by the user."""
182 return self._device
183
184 def have_test(self):
185 """Return whether there is test dataset defined."""
186 return self._test is not None
187
188 def assert_task_type(self, *expected_types: TaskType):
189 """Assert task_type matching given types."""
190 if self.train.task_type not in expected_types:
191 raise ModelValidationError(
192 f'Check is irrelevant for task of type {self.train.task_type}')
193 return True
194
195 def get_data_by_kind(self, kind: DatasetKind):
196 """Return the relevant VisionData by given kind."""
197 if kind == DatasetKind.TRAIN:
198 return self.train
199 elif kind == DatasetKind.TEST:
200 return self.test
201 else:
202 raise DeepchecksValueError(f'Unexpected dataset kind {kind}')
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deepchecks/vision/context.py b/deepchecks/vision/context.py
--- a/deepchecks/vision/context.py
+++ b/deepchecks/vision/context.py
@@ -122,6 +122,13 @@
self._device = torch.device(device) if isinstance(device, str) else (device if device else torch.device('cpu'))
if model is not None:
+ if not isinstance(model, nn.Module):
+ logger.warning('Model is not a torch.nn.Module. Deepchecks can\'t validate that model is in '
+ 'evaluation state.')
+ else:
+ if model.training:
+ raise DatasetValidationError('Model is not in evaluation state. Please set model training '
+ 'parameter to False or run model.eval() before passing it.')
for dataset, dataset_type in zip([train, test], ['train', 'test']):
if dataset is not None:
try:
| {"golden_diff": "diff --git a/deepchecks/vision/context.py b/deepchecks/vision/context.py\n--- a/deepchecks/vision/context.py\n+++ b/deepchecks/vision/context.py\n@@ -122,6 +122,13 @@\n self._device = torch.device(device) if isinstance(device, str) else (device if device else torch.device('cpu'))\n \n if model is not None:\n+ if not isinstance(model, nn.Module):\n+ logger.warning('Model is not a torch.nn.Module. Deepchecks can\\'t validate that model is in '\n+ 'evaluation state.')\n+ else:\n+ if model.training:\n+ raise DatasetValidationError('Model is not in evaluation state. Please set model training '\n+ 'parameter to False or run model.eval() before passing it.')\n for dataset, dataset_type in zip([train, test], ['train', 'test']):\n if dataset is not None:\n try:\n", "issue": "[BUG][CV] deepchecks should either validate model.eval status or actively create it \n**Describe the bug**\r\nChecks can crash or give wrong results if models are set by mistake to training mode. \r\n\r\n**To Reproduce**\r\nRun metrics check with model.train() preceding it.\r\n\r\n**Expected behavior**\r\nEither we validate that the model is in the correct state, or we actively call model.eval()\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module for base vision context.\"\"\"\nimport logging\nfrom typing import Mapping, Union, Iterable, Any, Tuple\n\nimport torch\nfrom torch import nn\nfrom ignite.metrics import Metric\n\nfrom deepchecks.core import DatasetKind\nfrom deepchecks.vision.vision_data import VisionData, TaskType\nfrom deepchecks.vision.utils.validation import apply_to_tensor\nfrom deepchecks.core.errors import (\n DatasetValidationError, DeepchecksNotImplementedError, ModelValidationError,\n DeepchecksNotSupportedError, DeepchecksValueError\n)\n\n\n__all__ = ['Context']\n\n\nlogger = logging.getLogger('deepchecks')\n\n\nclass Batch:\n \"\"\"Represents dataset batch returned by the dataloader during iteration.\"\"\"\n\n def __init__(\n self,\n batch: Tuple[Iterable[Any], Iterable[Any]],\n context: 'Context',\n dataset_kind: DatasetKind\n ):\n self._context = context\n self._dataset_kind = dataset_kind\n self._batch = apply_to_tensor(batch, lambda it: it.to(self._context.device))\n self._labels = None\n self._predictions = None\n self._images = None\n\n @property\n def labels(self):\n if self._labels is None:\n dataset = self._context.get_data_by_kind(self._dataset_kind)\n self._labels = dataset.batch_to_labels(self._batch)\n return self._labels\n\n @property\n def predictions(self):\n if self._predictions is None:\n dataset = self._context.get_data_by_kind(self._dataset_kind)\n self._predictions = dataset.infer_on_batch(self._batch, self._context.model, self._context.device)\n return self._predictions\n\n @property\n def images(self):\n if self._images is None:\n dataset = self._context.get_data_by_kind(self._dataset_kind)\n self._images = dataset.batch_to_images(self._batch)\n return self._images\n\n def __getitem__(self, index):\n return self._batch[index]\n\n\nclass Context:\n \"\"\"Contains all the data + properties the user has passed to a check/suite, and validates it seamlessly.\n\n Parameters\n ----------\n train : VisionData , default: None\n Dataset or DataFrame object, representing data an estimator was fitted on\n test : VisionData , default: None\n Dataset or DataFrame object, representing data an estimator predicts on\n model : BasicModel , default: None\n A scikit-learn-compatible fitted estimator instance\n model_name: str , default: ''\n The name of the model\n scorers : Mapping[str, Metric] , default: None\n dict of scorers names to a Metric\n scorers_per_class : Mapping[str, Metric] , default: None\n dict of scorers for classification without averaging of the classes.\n See <a href=\n \"https://scikit-learn.org/stable/modules/model_evaluation.html#from-binary-to-multiclass-and-multilabel\">\n scikit-learn docs</a>\n device : Union[str, torch.device], default: 'cpu'\n processing unit for use\n random_state : int\n A seed to set for pseudo-random functions\n n_samples : int, default: None\n \"\"\"\n\n def __init__(self,\n train: VisionData = None,\n test: VisionData = None,\n model: nn.Module = None,\n model_name: str = '',\n scorers: Mapping[str, Metric] = None,\n scorers_per_class: Mapping[str, Metric] = None,\n device: Union[str, torch.device, None] = 'cpu',\n random_state: int = 42,\n n_samples: int = None\n ):\n # Validations\n if train is None and test is None and model is None:\n raise DeepchecksValueError('At least one dataset (or model) must be passed to the method!')\n if test and not train:\n raise DatasetValidationError('Can\\'t initialize context with only test. if you have single dataset, '\n 'initialize it as train')\n if train and test:\n train.validate_shared_label(test)\n\n self._device = torch.device(device) if isinstance(device, str) else (device if device else torch.device('cpu'))\n\n if model is not None:\n for dataset, dataset_type in zip([train, test], ['train', 'test']):\n if dataset is not None:\n try:\n dataset.validate_prediction(next(iter(dataset.data_loader)), model, self._device)\n except DeepchecksNotImplementedError:\n logger.warning('validate_prediction() was not implemented in %s dataset, '\n 'some checks will not run', dataset_type)\n\n # The copy does 2 things: Sample n_samples if parameter exists, and shuffle the data.\n # we shuffle because the data in VisionData is set to be sampled in a fixed order (in the init), so if the user\n # wants to run without random_state we need to forcefully shuffle (to have different results on different runs\n # from the same VisionData object), and if there is a random_state the shuffle will always have same result\n if train:\n train = train.copy(shuffle=True, n_samples=n_samples, random_state=random_state)\n if test:\n test = test.copy(shuffle=True, n_samples=n_samples, random_state=random_state)\n\n self._train = train\n self._test = test\n self._model = model\n self._user_scorers = scorers\n self._user_scorers_per_class = scorers_per_class\n self._model_name = model_name\n self.random_state = random_state\n\n # Properties\n # Validations note: We know train & test fit each other so all validations can be run only on train\n\n @property\n def train(self) -> VisionData:\n \"\"\"Return train if exists, otherwise raise error.\"\"\"\n if self._train is None:\n raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without train dataset')\n return self._train\n\n @property\n def test(self) -> VisionData:\n \"\"\"Return test if exists, otherwise raise error.\"\"\"\n if self._test is None:\n raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without test dataset')\n return self._test\n\n @property\n def model(self) -> nn.Module:\n \"\"\"Return & validate model if model exists, otherwise raise error.\"\"\"\n if self._model is None:\n raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without model')\n return self._model\n\n @property\n def model_name(self):\n \"\"\"Return model name.\"\"\"\n return self._model_name\n\n @property\n def device(self) -> torch.device:\n \"\"\"Return device specified by the user.\"\"\"\n return self._device\n\n def have_test(self):\n \"\"\"Return whether there is test dataset defined.\"\"\"\n return self._test is not None\n\n def assert_task_type(self, *expected_types: TaskType):\n \"\"\"Assert task_type matching given types.\"\"\"\n if self.train.task_type not in expected_types:\n raise ModelValidationError(\n f'Check is irrelevant for task of type {self.train.task_type}')\n return True\n\n def get_data_by_kind(self, kind: DatasetKind):\n \"\"\"Return the relevant VisionData by given kind.\"\"\"\n if kind == DatasetKind.TRAIN:\n return self.train\n elif kind == DatasetKind.TEST:\n return self.test\n else:\n raise DeepchecksValueError(f'Unexpected dataset kind {kind}')\n", "path": "deepchecks/vision/context.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module for base vision context.\"\"\"\nimport logging\nfrom typing import Mapping, Union, Iterable, Any, Tuple\n\nimport torch\nfrom torch import nn\nfrom ignite.metrics import Metric\n\nfrom deepchecks.core import DatasetKind\nfrom deepchecks.vision.vision_data import VisionData, TaskType\nfrom deepchecks.vision.utils.validation import apply_to_tensor\nfrom deepchecks.core.errors import (\n DatasetValidationError, DeepchecksNotImplementedError, ModelValidationError,\n DeepchecksNotSupportedError, DeepchecksValueError\n)\n\n\n__all__ = ['Context']\n\n\nlogger = logging.getLogger('deepchecks')\n\n\nclass Batch:\n \"\"\"Represents dataset batch returned by the dataloader during iteration.\"\"\"\n\n def __init__(\n self,\n batch: Tuple[Iterable[Any], Iterable[Any]],\n context: 'Context',\n dataset_kind: DatasetKind\n ):\n self._context = context\n self._dataset_kind = dataset_kind\n self._batch = apply_to_tensor(batch, lambda it: it.to(self._context.device))\n self._labels = None\n self._predictions = None\n self._images = None\n\n @property\n def labels(self):\n if self._labels is None:\n dataset = self._context.get_data_by_kind(self._dataset_kind)\n self._labels = dataset.batch_to_labels(self._batch)\n return self._labels\n\n @property\n def predictions(self):\n if self._predictions is None:\n dataset = self._context.get_data_by_kind(self._dataset_kind)\n self._predictions = dataset.infer_on_batch(self._batch, self._context.model, self._context.device)\n return self._predictions\n\n @property\n def images(self):\n if self._images is None:\n dataset = self._context.get_data_by_kind(self._dataset_kind)\n self._images = dataset.batch_to_images(self._batch)\n return self._images\n\n def __getitem__(self, index):\n return self._batch[index]\n\n\nclass Context:\n \"\"\"Contains all the data + properties the user has passed to a check/suite, and validates it seamlessly.\n\n Parameters\n ----------\n train : VisionData , default: None\n Dataset or DataFrame object, representing data an estimator was fitted on\n test : VisionData , default: None\n Dataset or DataFrame object, representing data an estimator predicts on\n model : BasicModel , default: None\n A scikit-learn-compatible fitted estimator instance\n model_name: str , default: ''\n The name of the model\n scorers : Mapping[str, Metric] , default: None\n dict of scorers names to a Metric\n scorers_per_class : Mapping[str, Metric] , default: None\n dict of scorers for classification without averaging of the classes.\n See <a href=\n \"https://scikit-learn.org/stable/modules/model_evaluation.html#from-binary-to-multiclass-and-multilabel\">\n scikit-learn docs</a>\n device : Union[str, torch.device], default: 'cpu'\n processing unit for use\n random_state : int\n A seed to set for pseudo-random functions\n n_samples : int, default: None\n \"\"\"\n\n def __init__(self,\n train: VisionData = None,\n test: VisionData = None,\n model: nn.Module = None,\n model_name: str = '',\n scorers: Mapping[str, Metric] = None,\n scorers_per_class: Mapping[str, Metric] = None,\n device: Union[str, torch.device, None] = 'cpu',\n random_state: int = 42,\n n_samples: int = None\n ):\n # Validations\n if train is None and test is None and model is None:\n raise DeepchecksValueError('At least one dataset (or model) must be passed to the method!')\n if test and not train:\n raise DatasetValidationError('Can\\'t initialize context with only test. if you have single dataset, '\n 'initialize it as train')\n if train and test:\n train.validate_shared_label(test)\n\n self._device = torch.device(device) if isinstance(device, str) else (device if device else torch.device('cpu'))\n\n if model is not None:\n if not isinstance(model, nn.Module):\n logger.warning('Model is not a torch.nn.Module. Deepchecks can\\'t validate that model is in '\n 'evaluation state.')\n else:\n if model.training:\n raise DatasetValidationError('Model is not in evaluation state. Please set model training '\n 'parameter to False or run model.eval() before passing it.')\n for dataset, dataset_type in zip([train, test], ['train', 'test']):\n if dataset is not None:\n try:\n dataset.validate_prediction(next(iter(dataset.data_loader)), model, self._device)\n except DeepchecksNotImplementedError:\n logger.warning('validate_prediction() was not implemented in %s dataset, '\n 'some checks will not run', dataset_type)\n\n # The copy does 2 things: Sample n_samples if parameter exists, and shuffle the data.\n # we shuffle because the data in VisionData is set to be sampled in a fixed order (in the init), so if the user\n # wants to run without random_state we need to forcefully shuffle (to have different results on different runs\n # from the same VisionData object), and if there is a random_state the shuffle will always have same result\n if train:\n train = train.copy(shuffle=True, n_samples=n_samples, random_state=random_state)\n if test:\n test = test.copy(shuffle=True, n_samples=n_samples, random_state=random_state)\n\n self._train = train\n self._test = test\n self._model = model\n self._user_scorers = scorers\n self._user_scorers_per_class = scorers_per_class\n self._model_name = model_name\n self.random_state = random_state\n\n # Properties\n # Validations note: We know train & test fit each other so all validations can be run only on train\n\n @property\n def train(self) -> VisionData:\n \"\"\"Return train if exists, otherwise raise error.\"\"\"\n if self._train is None:\n raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without train dataset')\n return self._train\n\n @property\n def test(self) -> VisionData:\n \"\"\"Return test if exists, otherwise raise error.\"\"\"\n if self._test is None:\n raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without test dataset')\n return self._test\n\n @property\n def model(self) -> nn.Module:\n \"\"\"Return & validate model if model exists, otherwise raise error.\"\"\"\n if self._model is None:\n raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without model')\n return self._model\n\n @property\n def model_name(self):\n \"\"\"Return model name.\"\"\"\n return self._model_name\n\n @property\n def device(self) -> torch.device:\n \"\"\"Return device specified by the user.\"\"\"\n return self._device\n\n def have_test(self):\n \"\"\"Return whether there is test dataset defined.\"\"\"\n return self._test is not None\n\n def assert_task_type(self, *expected_types: TaskType):\n \"\"\"Assert task_type matching given types.\"\"\"\n if self.train.task_type not in expected_types:\n raise ModelValidationError(\n f'Check is irrelevant for task of type {self.train.task_type}')\n return True\n\n def get_data_by_kind(self, kind: DatasetKind):\n \"\"\"Return the relevant VisionData by given kind.\"\"\"\n if kind == DatasetKind.TRAIN:\n return self.train\n elif kind == DatasetKind.TEST:\n return self.test\n else:\n raise DeepchecksValueError(f'Unexpected dataset kind {kind}')\n", "path": "deepchecks/vision/context.py"}]} | 2,543 | 201 |
gh_patches_debug_18918 | rasdani/github-patches | git_diff | streamlit__streamlit-3501 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Setting `default` on multiselect widget that uses pandas.Series as `options` causes an error
### Summary
[`st.multiselect`](https://docs.streamlit.io/en/stable/api.html?highlight=options#streamlit.multiselect) is supposed to accept `pandas.Series` objects as labels for the select options. Setting a `default` value while using `options=pandas.Series` leads to:
> StreamlitAPIException : Every Multiselect default value must exist in options
### Steps to reproduce
Run the below code snippet.
Code snippet:
```python
import streamlit as st
import pandas as pd
names = pd.DataFrame({'labels':["Green","Yellow","Red","Blue"]})
nameSelect = st.multiselect(
"What are your favorite colors",
options=names['labels'],
default=["Yellow"]
)
```
### Is this a regression?
Possibly a core regression.
### Debug info
- Streamlit version: 0.82.0
- Python version: 3.8.5
- OS version: Ubuntu 20.04.2 LTS
- Browser version: Firefox 89.0 (64-bit)
### Additional information
Original source: https://discuss.streamlit.io/t/setting-default-value-on-multiselect-that-uses-a-series-for-the-options/13630
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/elements/multiselect.py`
Content:
```
1 # Copyright 2018-2021 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import cast, List
16
17 import streamlit
18 from streamlit.errors import StreamlitAPIException
19 from streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto
20 from streamlit.state.widgets import register_widget
21 from streamlit.type_util import is_type, ensure_iterable
22 from .form import current_form_id
23 from .utils import check_callback_rules, check_session_state_rules
24
25
26 class MultiSelectMixin:
27 def multiselect(
28 self,
29 label,
30 options,
31 default=None,
32 format_func=str,
33 key=None,
34 help=None,
35 on_change=None,
36 args=None,
37 kwargs=None,
38 ):
39 """Display a multiselect widget.
40 The multiselect widget starts as empty.
41
42 Parameters
43 ----------
44 label : str
45 A short label explaining to the user what this select widget is for.
46 options : list, tuple, numpy.ndarray, pandas.Series, or pandas.DataFrame
47 Labels for the select options. This will be cast to str internally
48 by default. For pandas.DataFrame, the first column is selected.
49 default: [str] or None
50 List of default values.
51 format_func : function
52 Function to modify the display of selectbox options. It receives
53 the raw option as an argument and should output the label to be
54 shown for that option. This has no impact on the return value of
55 the selectbox.
56 key : str
57 An optional string to use as the unique key for the widget.
58 If this is omitted, a key will be generated for the widget
59 based on its content. Multiple widgets of the same type may
60 not share the same key.
61 help : str
62 An optional tooltip that gets displayed next to the multiselect.
63 on_change : callable
64 An optional callback invoked when this multiselect's value changes.
65 args : tuple
66 An optional tuple of args to pass to the callback.
67 kwargs : dict
68 An optional dict of kwargs to pass to the callback.
69
70 Returns
71 -------
72 list
73 A list with the selected options
74
75 Example
76 -------
77 >>> options = st.multiselect(
78 ... 'What are your favorite colors',
79 ... ['Green', 'Yellow', 'Red', 'Blue'],
80 ... ['Yellow', 'Red'])
81 >>>
82 >>> st.write('You selected:', options)
83
84 .. note::
85 User experience can be degraded for large lists of `options` (100+), as this widget
86 is not designed to handle arbitrary text search efficiently. See this
87 `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_
88 on the Streamlit community forum for more information and
89 `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue.
90
91 """
92 check_callback_rules(self.dg, on_change)
93 check_session_state_rules(default_value=default, key=key)
94
95 options = ensure_iterable(options)
96
97 # Perform validation checks and return indices base on the default values.
98 def _check_and_convert_to_indices(options, default_values):
99 if default_values is None and None not in options:
100 return None
101
102 if not isinstance(default_values, list):
103 # This if is done before others because calling if not x (done
104 # right below) when x is of type pd.Series() or np.array() throws a
105 # ValueError exception.
106 if is_type(default_values, "numpy.ndarray") or is_type(
107 default_values, "pandas.core.series.Series"
108 ):
109 default_values = list(default_values)
110 elif not default_values or default_values in options:
111 default_values = [default_values]
112 else:
113 default_values = list(default_values)
114
115 for value in default_values:
116 if value not in options:
117 raise StreamlitAPIException(
118 "Every Multiselect default value must exist in options"
119 )
120
121 return [options.index(value) for value in default_values]
122
123 indices = _check_and_convert_to_indices(options, default)
124 multiselect_proto = MultiSelectProto()
125 multiselect_proto.label = label
126 default_value = [] if indices is None else indices
127 multiselect_proto.default[:] = default_value
128 multiselect_proto.options[:] = [str(format_func(option)) for option in options]
129 multiselect_proto.form_id = current_form_id(self.dg)
130 if help is not None:
131 multiselect_proto.help = help
132
133 def deserialize_multiselect(ui_value, widget_id="") -> List[str]:
134 current_value = ui_value if ui_value is not None else default_value
135 return [options[i] for i in current_value]
136
137 def serialize_multiselect(value):
138 return _check_and_convert_to_indices(options, value)
139
140 current_value, set_frontend_value = register_widget(
141 "multiselect",
142 multiselect_proto,
143 user_key=key,
144 on_change_handler=on_change,
145 args=args,
146 kwargs=kwargs,
147 deserializer=deserialize_multiselect,
148 serializer=serialize_multiselect,
149 )
150
151 if set_frontend_value:
152 multiselect_proto.value[:] = _check_and_convert_to_indices(
153 options, current_value
154 )
155 multiselect_proto.set_value = True
156
157 self.dg._enqueue("multiselect", multiselect_proto)
158 return current_value
159
160 @property
161 def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
162 """Get our DeltaGenerator."""
163 return cast("streamlit.delta_generator.DeltaGenerator", self)
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/streamlit/elements/multiselect.py b/lib/streamlit/elements/multiselect.py
--- a/lib/streamlit/elements/multiselect.py
+++ b/lib/streamlit/elements/multiselect.py
@@ -19,6 +19,7 @@
from streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto
from streamlit.state.widgets import register_widget
from streamlit.type_util import is_type, ensure_iterable
+
from .form import current_form_id
from .utils import check_callback_rules, check_session_state_rules
@@ -112,6 +113,9 @@
else:
default_values = list(default_values)
+ if not isinstance(options, list):
+ options = list(options)
+
for value in default_values:
if value not in options:
raise StreamlitAPIException(
| {"golden_diff": "diff --git a/lib/streamlit/elements/multiselect.py b/lib/streamlit/elements/multiselect.py\n--- a/lib/streamlit/elements/multiselect.py\n+++ b/lib/streamlit/elements/multiselect.py\n@@ -19,6 +19,7 @@\n from streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto\n from streamlit.state.widgets import register_widget\n from streamlit.type_util import is_type, ensure_iterable\n+\n from .form import current_form_id\n from .utils import check_callback_rules, check_session_state_rules\n \n@@ -112,6 +113,9 @@\n else:\n default_values = list(default_values)\n \n+ if not isinstance(options, list):\n+ options = list(options)\n+\n for value in default_values:\n if value not in options:\n raise StreamlitAPIException(\n", "issue": "Setting `default` on multiselect widget that uses pandas.Series as `options` causes an error\n### Summary\r\n\r\n[`st.multiselect`](https://docs.streamlit.io/en/stable/api.html?highlight=options#streamlit.multiselect) is supposed to accept `pandas.Series` objects as labels for the select options. Setting a `default` value while using `options=pandas.Series` leads to:\r\n\r\n> StreamlitAPIException : Every Multiselect default value must exist in options \r\n\r\n### Steps to reproduce\r\nRun the below code snippet.\r\n\r\nCode snippet:\r\n\r\n```python\r\nimport streamlit as st\r\nimport pandas as pd\r\n\r\nnames = pd.DataFrame({'labels':[\"Green\",\"Yellow\",\"Red\",\"Blue\"]})\r\nnameSelect = st.multiselect(\r\n \"What are your favorite colors\",\r\n options=names['labels'],\r\n default=[\"Yellow\"]\r\n)\r\n```\r\n\r\n### Is this a regression?\r\n\r\nPossibly a core regression.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 0.82.0\r\n- Python version: 3.8.5\r\n- OS version: Ubuntu 20.04.2 LTS\r\n- Browser version: Firefox 89.0 (64-bit)\r\n\r\n### Additional information\r\n\r\nOriginal source: https://discuss.streamlit.io/t/setting-default-value-on-multiselect-that-uses-a-series-for-the-options/13630\r\n\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import cast, List\n\nimport streamlit\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto\nfrom streamlit.state.widgets import register_widget\nfrom streamlit.type_util import is_type, ensure_iterable\nfrom .form import current_form_id\nfrom .utils import check_callback_rules, check_session_state_rules\n\n\nclass MultiSelectMixin:\n def multiselect(\n self,\n label,\n options,\n default=None,\n format_func=str,\n key=None,\n help=None,\n on_change=None,\n args=None,\n kwargs=None,\n ):\n \"\"\"Display a multiselect widget.\n The multiselect widget starts as empty.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this select widget is for.\n options : list, tuple, numpy.ndarray, pandas.Series, or pandas.DataFrame\n Labels for the select options. This will be cast to str internally\n by default. For pandas.DataFrame, the first column is selected.\n default: [str] or None\n List of default values.\n format_func : function\n Function to modify the display of selectbox options. It receives\n the raw option as an argument and should output the label to be\n shown for that option. This has no impact on the return value of\n the selectbox.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n help : str\n An optional tooltip that gets displayed next to the multiselect.\n on_change : callable\n An optional callback invoked when this multiselect's value changes.\n args : tuple\n An optional tuple of args to pass to the callback.\n kwargs : dict\n An optional dict of kwargs to pass to the callback.\n\n Returns\n -------\n list\n A list with the selected options\n\n Example\n -------\n >>> options = st.multiselect(\n ... 'What are your favorite colors',\n ... ['Green', 'Yellow', 'Red', 'Blue'],\n ... ['Yellow', 'Red'])\n >>>\n >>> st.write('You selected:', options)\n\n .. note::\n User experience can be degraded for large lists of `options` (100+), as this widget\n is not designed to handle arbitrary text search efficiently. See this\n `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_\n on the Streamlit community forum for more information and\n `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue.\n\n \"\"\"\n check_callback_rules(self.dg, on_change)\n check_session_state_rules(default_value=default, key=key)\n\n options = ensure_iterable(options)\n\n # Perform validation checks and return indices base on the default values.\n def _check_and_convert_to_indices(options, default_values):\n if default_values is None and None not in options:\n return None\n\n if not isinstance(default_values, list):\n # This if is done before others because calling if not x (done\n # right below) when x is of type pd.Series() or np.array() throws a\n # ValueError exception.\n if is_type(default_values, \"numpy.ndarray\") or is_type(\n default_values, \"pandas.core.series.Series\"\n ):\n default_values = list(default_values)\n elif not default_values or default_values in options:\n default_values = [default_values]\n else:\n default_values = list(default_values)\n\n for value in default_values:\n if value not in options:\n raise StreamlitAPIException(\n \"Every Multiselect default value must exist in options\"\n )\n\n return [options.index(value) for value in default_values]\n\n indices = _check_and_convert_to_indices(options, default)\n multiselect_proto = MultiSelectProto()\n multiselect_proto.label = label\n default_value = [] if indices is None else indices\n multiselect_proto.default[:] = default_value\n multiselect_proto.options[:] = [str(format_func(option)) for option in options]\n multiselect_proto.form_id = current_form_id(self.dg)\n if help is not None:\n multiselect_proto.help = help\n\n def deserialize_multiselect(ui_value, widget_id=\"\") -> List[str]:\n current_value = ui_value if ui_value is not None else default_value\n return [options[i] for i in current_value]\n\n def serialize_multiselect(value):\n return _check_and_convert_to_indices(options, value)\n\n current_value, set_frontend_value = register_widget(\n \"multiselect\",\n multiselect_proto,\n user_key=key,\n on_change_handler=on_change,\n args=args,\n kwargs=kwargs,\n deserializer=deserialize_multiselect,\n serializer=serialize_multiselect,\n )\n\n if set_frontend_value:\n multiselect_proto.value[:] = _check_and_convert_to_indices(\n options, current_value\n )\n multiselect_proto.set_value = True\n\n self.dg._enqueue(\"multiselect\", multiselect_proto)\n return current_value\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/multiselect.py"}], "after_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import cast, List\n\nimport streamlit\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.MultiSelect_pb2 import MultiSelect as MultiSelectProto\nfrom streamlit.state.widgets import register_widget\nfrom streamlit.type_util import is_type, ensure_iterable\n\nfrom .form import current_form_id\nfrom .utils import check_callback_rules, check_session_state_rules\n\n\nclass MultiSelectMixin:\n def multiselect(\n self,\n label,\n options,\n default=None,\n format_func=str,\n key=None,\n help=None,\n on_change=None,\n args=None,\n kwargs=None,\n ):\n \"\"\"Display a multiselect widget.\n The multiselect widget starts as empty.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this select widget is for.\n options : list, tuple, numpy.ndarray, pandas.Series, or pandas.DataFrame\n Labels for the select options. This will be cast to str internally\n by default. For pandas.DataFrame, the first column is selected.\n default: [str] or None\n List of default values.\n format_func : function\n Function to modify the display of selectbox options. It receives\n the raw option as an argument and should output the label to be\n shown for that option. This has no impact on the return value of\n the selectbox.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n help : str\n An optional tooltip that gets displayed next to the multiselect.\n on_change : callable\n An optional callback invoked when this multiselect's value changes.\n args : tuple\n An optional tuple of args to pass to the callback.\n kwargs : dict\n An optional dict of kwargs to pass to the callback.\n\n Returns\n -------\n list\n A list with the selected options\n\n Example\n -------\n >>> options = st.multiselect(\n ... 'What are your favorite colors',\n ... ['Green', 'Yellow', 'Red', 'Blue'],\n ... ['Yellow', 'Red'])\n >>>\n >>> st.write('You selected:', options)\n\n .. note::\n User experience can be degraded for large lists of `options` (100+), as this widget\n is not designed to handle arbitrary text search efficiently. See this\n `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_\n on the Streamlit community forum for more information and\n `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue.\n\n \"\"\"\n check_callback_rules(self.dg, on_change)\n check_session_state_rules(default_value=default, key=key)\n\n options = ensure_iterable(options)\n\n # Perform validation checks and return indices base on the default values.\n def _check_and_convert_to_indices(options, default_values):\n if default_values is None and None not in options:\n return None\n\n if not isinstance(default_values, list):\n # This if is done before others because calling if not x (done\n # right below) when x is of type pd.Series() or np.array() throws a\n # ValueError exception.\n if is_type(default_values, \"numpy.ndarray\") or is_type(\n default_values, \"pandas.core.series.Series\"\n ):\n default_values = list(default_values)\n elif not default_values or default_values in options:\n default_values = [default_values]\n else:\n default_values = list(default_values)\n\n if not isinstance(options, list):\n options = list(options)\n\n for value in default_values:\n if value not in options:\n raise StreamlitAPIException(\n \"Every Multiselect default value must exist in options\"\n )\n\n return [options.index(value) for value in default_values]\n\n indices = _check_and_convert_to_indices(options, default)\n multiselect_proto = MultiSelectProto()\n multiselect_proto.label = label\n default_value = [] if indices is None else indices\n multiselect_proto.default[:] = default_value\n multiselect_proto.options[:] = [str(format_func(option)) for option in options]\n multiselect_proto.form_id = current_form_id(self.dg)\n if help is not None:\n multiselect_proto.help = help\n\n def deserialize_multiselect(ui_value, widget_id=\"\") -> List[str]:\n current_value = ui_value if ui_value is not None else default_value\n return [options[i] for i in current_value]\n\n def serialize_multiselect(value):\n return _check_and_convert_to_indices(options, value)\n\n current_value, set_frontend_value = register_widget(\n \"multiselect\",\n multiselect_proto,\n user_key=key,\n on_change_handler=on_change,\n args=args,\n kwargs=kwargs,\n deserializer=deserialize_multiselect,\n serializer=serialize_multiselect,\n )\n\n if set_frontend_value:\n multiselect_proto.value[:] = _check_and_convert_to_indices(\n options, current_value\n )\n multiselect_proto.set_value = True\n\n self.dg._enqueue(\"multiselect\", multiselect_proto)\n return current_value\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/multiselect.py"}]} | 2,251 | 184 |
gh_patches_debug_11722 | rasdani/github-patches | git_diff | kymatio__kymatio-184 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Warning in `mnist.py`
Specifically, https://github.com/kymatio/kymatio/blob/289bc26551e92456ef7a48fbe83d48e157f7632c/examples/2d/mnist.py#L50 generates a warning saying that `size_average` will be deprecated and says to use `reduction='sum'` instead. Is this ok for us to do?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/2d/mnist.py`
Content:
```
1 """
2 Classification of handwritten digits
3 ====================================
4
5 Based on pytorch example for MNIST
6 """
7
8
9 import torch.nn as nn
10 import torch.optim
11 from torchvision import datasets, transforms
12 import torch.nn.functional as F
13 from kymatio import Scattering2D
14 import kymatio.datasets as scattering_datasets
15 import kymatio
16 import torch
17 import argparse
18 import math
19
20 class View(nn.Module):
21 def __init__(self, *args):
22 super(View, self).__init__()
23 self.shape = args
24
25 def forward(self, x):
26 return x.view(-1,*self.shape)
27
28 def train(model, device, train_loader, optimizer, epoch, scattering):
29 model.train()
30 for batch_idx, (data, target) in enumerate(train_loader):
31 data, target = data.to(device), target.to(device)
32 optimizer.zero_grad()
33 output = model(scattering(data))
34 loss = F.cross_entropy(output, target)
35 loss.backward()
36 optimizer.step()
37 if batch_idx % 50 == 0:
38 print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
39 epoch, batch_idx * len(data), len(train_loader.dataset),
40 100. * batch_idx / len(train_loader), loss.item()))
41
42 def test(model, device, test_loader, scattering):
43 model.eval()
44 test_loss = 0
45 correct = 0
46 with torch.no_grad():
47 for data, target in test_loader:
48 data, target = data.to(device), target.to(device)
49 output = model(scattering(data))
50 test_loss += F.cross_entropy(output, target, size_average=False).item() # sum up batch loss
51 pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
52 correct += pred.eq(target.view_as(pred)).sum().item()
53
54 test_loss /= len(test_loader.dataset)
55 print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
56 test_loss, correct, len(test_loader.dataset),
57 100. * correct / len(test_loader.dataset)))
58
59 def main():
60 """Train a simple Hybrid Scattering + CNN model on MNIST.
61
62 Three models are demoed:
63 'linear' - scattering + linear model
64 'mlp' - scattering + MLP
65 'cnn' - scattering + CNN
66
67 scattering 1st order can also be set by the mode
68 Scattering features are normalized by batch normalization.
69
70 scatter + linear achieves 99.15% in 15 epochs
71 scatter + cnn achieves 99.3% in 15 epochs
72
73 """
74 parser = argparse.ArgumentParser(description='MNIST scattering + hybrid examples')
75 parser.add_argument('--mode', type=int, default=2,help='scattering 1st or 2nd order')
76 parser.add_argument('--classifier', type=str, default='linear',help='classifier model')
77 args = parser.parse_args()
78 assert(args.classifier in ['linear','mlp','cnn'])
79
80 use_cuda = torch.cuda.is_available()
81 device = torch.device("cuda" if use_cuda else "cpu")
82
83 if args.mode == 1:
84 scattering = Scattering2D(M=28, N=28, J=2,order2=False)
85 K = 17
86 else:
87 scattering = Scattering2D(M=28, N=28, J=2)
88 K = 81
89 if use_cuda:
90 scattering = scattering.cuda()
91
92
93
94
95 if args.classifier == 'cnn':
96 model = nn.Sequential(
97 View(K, 7, 7),
98 nn.BatchNorm2d(K),
99 nn.Conv2d(K, 64, 3,padding=1), nn.ReLU(),
100 View(64*7*7),
101 nn.Linear(64 * 7 * 7, 512), nn.ReLU(),
102 nn.Linear(512, 10)
103 ).to(device)
104
105 elif args.classifier == 'mlp':
106 model = nn.Sequential(
107 View(K, 7, 7),
108 nn.BatchNorm2d(K),
109 View(K*7*7),
110 nn.Linear(K*7*7, 512), nn.ReLU(),
111 nn.Linear(512, 512), nn.ReLU(),
112 nn.Linear(512, 10)
113 )
114
115 elif args.classifier == 'linear':
116 model = nn.Sequential(
117 View(K, 7, 7),
118 nn.BatchNorm2d(K),
119 View(K * 7 * 7),
120 nn.Linear(K * 7 * 7, 10)
121 )
122 else:
123 raise ValueError('Classifier should be cnn/mlp/linear')
124
125 model.to(device)
126
127 #initialize
128 for m in model.modules():
129 if isinstance(m, nn.Conv2d):
130 n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
131 m.weight.data.normal_(0, 2./math.sqrt(n))
132 m.bias.data.zero_()
133 if isinstance(m, nn.Linear):
134 m.weight.data.normal_(0, 2./math.sqrt(m.in_features))
135 m.bias.data.zero_()
136
137 # DataLoaders
138 if use_cuda:
139 num_workers = 4
140 pin_memory = True
141 else:
142 num_workers = None
143 pin_memory = False
144
145 train_loader = torch.utils.data.DataLoader(
146 datasets.MNIST(scattering_datasets.get_dataset_dir('MNIST'), train=True, download=True,
147 transform=transforms.Compose([
148 transforms.ToTensor(),
149 transforms.Normalize((0.1307,), (0.3081,))
150 ])),
151 batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
152 test_loader = torch.utils.data.DataLoader(
153 datasets.MNIST(scattering_datasets.get_dataset_dir('MNIST'), train=False, transform=transforms.Compose([
154 transforms.ToTensor(),
155 transforms.Normalize((0.1307,), (0.3081,))
156 ])),
157 batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
158
159 # Optimizer
160 optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9,
161 weight_decay=0.0005)
162
163 for epoch in range(1, 16):
164 train( model, device, train_loader, optimizer, epoch, scattering)
165 test(model, device, test_loader, scattering)
166
167
168 if __name__ == '__main__':
169 main()
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/2d/mnist.py b/examples/2d/mnist.py
--- a/examples/2d/mnist.py
+++ b/examples/2d/mnist.py
@@ -47,7 +47,7 @@
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(scattering(data))
- test_loss += F.cross_entropy(output, target, size_average=False).item() # sum up batch loss
+ test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
| {"golden_diff": "diff --git a/examples/2d/mnist.py b/examples/2d/mnist.py\n--- a/examples/2d/mnist.py\n+++ b/examples/2d/mnist.py\n@@ -47,7 +47,7 @@\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(scattering(data))\n- test_loss += F.cross_entropy(output, target, size_average=False).item() # sum up batch loss\n+ test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n", "issue": "Warning in `mnist.py`\nSpecifically, https://github.com/kymatio/kymatio/blob/289bc26551e92456ef7a48fbe83d48e157f7632c/examples/2d/mnist.py#L50 generates a warning saying that `size_average` will be deprecated and says to use `reduction='sum'` instead. Is this ok for us to do?\n", "before_files": [{"content": "\"\"\"\nClassification of handwritten digits\n====================================\n\nBased on pytorch example for MNIST\n\"\"\"\n\n\nimport torch.nn as nn\nimport torch.optim\nfrom torchvision import datasets, transforms\nimport torch.nn.functional as F\nfrom kymatio import Scattering2D\nimport kymatio.datasets as scattering_datasets\nimport kymatio\nimport torch\nimport argparse\nimport math\n\nclass View(nn.Module):\n def __init__(self, *args):\n super(View, self).__init__()\n self.shape = args\n\n def forward(self, x):\n return x.view(-1,*self.shape)\n\ndef train(model, device, train_loader, optimizer, epoch, scattering):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(scattering(data))\n loss = F.cross_entropy(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % 50 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\ndef test(model, device, test_loader, scattering):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(scattering(data))\n test_loss += F.cross_entropy(output, target, size_average=False).item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\ndef main():\n \"\"\"Train a simple Hybrid Scattering + CNN model on MNIST.\n\n Three models are demoed:\n 'linear' - scattering + linear model\n 'mlp' - scattering + MLP\n 'cnn' - scattering + CNN\n\n scattering 1st order can also be set by the mode\n Scattering features are normalized by batch normalization.\n\n scatter + linear achieves 99.15% in 15 epochs\n scatter + cnn achieves 99.3% in 15 epochs\n\n \"\"\"\n parser = argparse.ArgumentParser(description='MNIST scattering + hybrid examples')\n parser.add_argument('--mode', type=int, default=2,help='scattering 1st or 2nd order')\n parser.add_argument('--classifier', type=str, default='linear',help='classifier model')\n args = parser.parse_args()\n assert(args.classifier in ['linear','mlp','cnn'])\n\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n if args.mode == 1:\n scattering = Scattering2D(M=28, N=28, J=2,order2=False)\n K = 17\n else:\n scattering = Scattering2D(M=28, N=28, J=2)\n K = 81\n if use_cuda:\n scattering = scattering.cuda()\n\n\n\n\n if args.classifier == 'cnn':\n model = nn.Sequential(\n View(K, 7, 7),\n nn.BatchNorm2d(K),\n nn.Conv2d(K, 64, 3,padding=1), nn.ReLU(),\n View(64*7*7),\n nn.Linear(64 * 7 * 7, 512), nn.ReLU(),\n nn.Linear(512, 10)\n ).to(device)\n\n elif args.classifier == 'mlp':\n model = nn.Sequential(\n View(K, 7, 7),\n nn.BatchNorm2d(K),\n View(K*7*7),\n nn.Linear(K*7*7, 512), nn.ReLU(),\n nn.Linear(512, 512), nn.ReLU(),\n nn.Linear(512, 10)\n )\n\n elif args.classifier == 'linear':\n model = nn.Sequential(\n View(K, 7, 7),\n nn.BatchNorm2d(K),\n View(K * 7 * 7),\n nn.Linear(K * 7 * 7, 10)\n )\n else:\n raise ValueError('Classifier should be cnn/mlp/linear')\n\n model.to(device)\n\n #initialize\n for m in model.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels\n m.weight.data.normal_(0, 2./math.sqrt(n))\n m.bias.data.zero_()\n if isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 2./math.sqrt(m.in_features))\n m.bias.data.zero_()\n\n # DataLoaders\n if use_cuda:\n num_workers = 4\n pin_memory = True\n else:\n num_workers = None\n pin_memory = False\n\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(scattering_datasets.get_dataset_dir('MNIST'), train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(scattering_datasets.get_dataset_dir('MNIST'), train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)\n\n # Optimizer\n optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9,\n weight_decay=0.0005)\n\n for epoch in range(1, 16):\n train( model, device, train_loader, optimizer, epoch, scattering)\n test(model, device, test_loader, scattering)\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/2d/mnist.py"}], "after_files": [{"content": "\"\"\"\nClassification of handwritten digits\n====================================\n\nBased on pytorch example for MNIST\n\"\"\"\n\n\nimport torch.nn as nn\nimport torch.optim\nfrom torchvision import datasets, transforms\nimport torch.nn.functional as F\nfrom kymatio import Scattering2D\nimport kymatio.datasets as scattering_datasets\nimport kymatio\nimport torch\nimport argparse\nimport math\n\nclass View(nn.Module):\n def __init__(self, *args):\n super(View, self).__init__()\n self.shape = args\n\n def forward(self, x):\n return x.view(-1,*self.shape)\n\ndef train(model, device, train_loader, optimizer, epoch, scattering):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(scattering(data))\n loss = F.cross_entropy(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % 50 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\ndef test(model, device, test_loader, scattering):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(scattering(data))\n test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\ndef main():\n \"\"\"Train a simple Hybrid Scattering + CNN model on MNIST.\n\n Three models are demoed:\n 'linear' - scattering + linear model\n 'mlp' - scattering + MLP\n 'cnn' - scattering + CNN\n\n scattering 1st order can also be set by the mode\n Scattering features are normalized by batch normalization.\n\n scatter + linear achieves 99.15% in 15 epochs\n scatter + cnn achieves 99.3% in 15 epochs\n\n \"\"\"\n parser = argparse.ArgumentParser(description='MNIST scattering + hybrid examples')\n parser.add_argument('--mode', type=int, default=2,help='scattering 1st or 2nd order')\n parser.add_argument('--classifier', type=str, default='linear',help='classifier model')\n args = parser.parse_args()\n assert(args.classifier in ['linear','mlp','cnn'])\n\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n if args.mode == 1:\n scattering = Scattering2D(M=28, N=28, J=2,order2=False)\n K = 17\n else:\n scattering = Scattering2D(M=28, N=28, J=2)\n K = 81\n if use_cuda:\n scattering = scattering.cuda()\n\n\n\n\n if args.classifier == 'cnn':\n model = nn.Sequential(\n View(K, 7, 7),\n nn.BatchNorm2d(K),\n nn.Conv2d(K, 64, 3,padding=1), nn.ReLU(),\n View(64*7*7),\n nn.Linear(64 * 7 * 7, 512), nn.ReLU(),\n nn.Linear(512, 10)\n ).to(device)\n\n elif args.classifier == 'mlp':\n model = nn.Sequential(\n View(K, 7, 7),\n nn.BatchNorm2d(K),\n View(K*7*7),\n nn.Linear(K*7*7, 512), nn.ReLU(),\n nn.Linear(512, 512), nn.ReLU(),\n nn.Linear(512, 10)\n )\n\n elif args.classifier == 'linear':\n model = nn.Sequential(\n View(K, 7, 7),\n nn.BatchNorm2d(K),\n View(K * 7 * 7),\n nn.Linear(K * 7 * 7, 10)\n )\n else:\n raise ValueError('Classifier should be cnn/mlp/linear')\n\n model.to(device)\n\n #initialize\n for m in model.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels\n m.weight.data.normal_(0, 2./math.sqrt(n))\n m.bias.data.zero_()\n if isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 2./math.sqrt(m.in_features))\n m.bias.data.zero_()\n\n # DataLoaders\n if use_cuda:\n num_workers = 4\n pin_memory = True\n else:\n num_workers = None\n pin_memory = False\n\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(scattering_datasets.get_dataset_dir('MNIST'), train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(scattering_datasets.get_dataset_dir('MNIST'), train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)\n\n # Optimizer\n optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9,\n weight_decay=0.0005)\n\n for epoch in range(1, 16):\n train( model, device, train_loader, optimizer, epoch, scattering)\n test(model, device, test_loader, scattering)\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/2d/mnist.py"}]} | 2,217 | 170 |
gh_patches_debug_9130 | rasdani/github-patches | git_diff | opsdroid__opsdroid-615 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Python 3.7 support
We need to update opsdroid to be fully supported in 3.7.
- [x] Test against Python 3.7.
- [x] Travis
- [x] AppVeyor
- [x] Fix any bugs highlighted.
- [x] Add 3.7 to supported versions in `setup.py`.
- [ ] ~Update docker base image to be latest supported version~.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2 import os
3 from setuptools import setup, find_packages
4 from setuptools.command.build_py import build_py
5 from setuptools.command.sdist import sdist
6 from setuptools.command.develop import develop
7 from opsdroid import __version__
8
9 PACKAGE_NAME = 'opsdroid'
10 HERE = os.path.abspath(os.path.dirname(__file__))
11 README = open(os.path.join(HERE, 'README.md'), encoding="utf8").read()
12
13 PACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',
14 'modules.*', 'docs', 'docs.*'])
15
16
17 # For now we simply define the install_requires based on the contents
18 # of requirements.txt. In the future, install_requires may become much
19 # looser than the (automatically) resolved requirements.txt.
20 with open(os.path.join(HERE, 'requirements.txt'), 'r') as fh:
21 REQUIRES = [line.strip() for line in fh]
22
23
24 class Develop(develop):
25 """Custom `develop` command to always build mo files on install -e."""
26
27 def run(self):
28 self.run_command('compile_catalog')
29 develop.run(self) # old style class
30
31
32 class BuildPy(build_py):
33 """Custom `build_py` command to always build mo files for wheels."""
34
35 def run(self):
36 self.run_command('compile_catalog')
37 build_py.run(self) # old style class
38
39
40 class Sdist(sdist):
41 """Custom `sdist` command to ensure that mo files are always created."""
42
43 def run(self):
44 self.run_command('compile_catalog')
45 sdist.run(self) # old style class
46
47
48 setup(
49 name=PACKAGE_NAME,
50 version=__version__,
51 license='Apache License 2.0',
52 url='https://opsdroid.github.io/',
53 download_url='https://github.com/opsdroid/opsdroid/releases',
54 author='Jacob Tomlinson',
55 author_email='[email protected]',
56 description='An open source ChatOps bot framework.',
57 long_description=README,
58 packages=PACKAGES,
59 include_package_data=True,
60 zip_safe=False,
61 platforms='any',
62 classifiers=[
63 'Development Status :: 4 - Beta',
64 'Environment :: Console',
65 'Framework :: AsyncIO',
66 'Intended Audience :: Developers',
67 'Intended Audience :: System Administrators',
68 'Intended Audience :: Information Technology',
69 'License :: OSI Approved :: Apache Software License',
70 'Programming Language :: Python',
71 'Programming Language :: Python :: 3',
72 'Programming Language :: Python :: 3 :: Only',
73 'Programming Language :: Python :: 3.5',
74 'Programming Language :: Python :: 3.6',
75 'Topic :: Communications :: Chat',
76 'Topic :: Scientific/Engineering :: Artificial Intelligence',
77 'Topic :: Software Development :: Libraries :: Python Modules'
78 ],
79 install_requires=REQUIRES,
80 test_suite='tests',
81 keywords=[
82 'bot',
83 'bot-framework',
84 'opsdroid',
85 'botkit',
86 'python3',
87 'asyncio',
88 'chatops',
89 'devops',
90 'nlu'
91 ],
92 setup_requires=['Babel'],
93 cmdclass={'sdist': Sdist, 'build_py': BuildPy, 'develop': Develop},
94 entry_points={
95 'console_scripts': [
96 'opsdroid = opsdroid.__main__:main'
97 ]
98 },
99 )
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -72,6 +72,7 @@
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
'Topic :: Communications :: Chat',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules'
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -72,6 +72,7 @@\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n+ 'Programming Language :: Python :: 3.7',\n 'Topic :: Communications :: Chat',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n", "issue": "Add Python 3.7 support\nWe need to update opsdroid to be fully supported in 3.7.\r\n\r\n- [x] Test against Python 3.7.\r\n - [x] Travis\r\n - [x] AppVeyor\r\n- [x] Fix any bugs highlighted.\r\n- [x] Add 3.7 to supported versions in `setup.py`.\r\n- [ ] ~Update docker base image to be latest supported version~.\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nfrom opsdroid import __version__\n\nPACKAGE_NAME = 'opsdroid'\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, 'README.md'), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',\n 'modules.*', 'docs', 'docs.*'])\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, 'requirements.txt'), 'r') as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=__version__,\n license='Apache License 2.0',\n url='https://opsdroid.github.io/',\n download_url='https://github.com/opsdroid/opsdroid/releases',\n author='Jacob Tomlinson',\n author_email='[email protected]',\n description='An open source ChatOps bot framework.',\n long_description=README,\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Framework :: AsyncIO',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Communications :: Chat',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n install_requires=REQUIRES,\n test_suite='tests',\n keywords=[\n 'bot',\n 'bot-framework',\n 'opsdroid',\n 'botkit',\n 'python3',\n 'asyncio',\n 'chatops',\n 'devops',\n 'nlu'\n ],\n setup_requires=['Babel'],\n cmdclass={'sdist': Sdist, 'build_py': BuildPy, 'develop': Develop},\n entry_points={\n 'console_scripts': [\n 'opsdroid = opsdroid.__main__:main'\n ]\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nfrom opsdroid import __version__\n\nPACKAGE_NAME = 'opsdroid'\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, 'README.md'), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',\n 'modules.*', 'docs', 'docs.*'])\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, 'requirements.txt'), 'r') as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=__version__,\n license='Apache License 2.0',\n url='https://opsdroid.github.io/',\n download_url='https://github.com/opsdroid/opsdroid/releases',\n author='Jacob Tomlinson',\n author_email='[email protected]',\n description='An open source ChatOps bot framework.',\n long_description=README,\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Framework :: AsyncIO',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Communications :: Chat',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n install_requires=REQUIRES,\n test_suite='tests',\n keywords=[\n 'bot',\n 'bot-framework',\n 'opsdroid',\n 'botkit',\n 'python3',\n 'asyncio',\n 'chatops',\n 'devops',\n 'nlu'\n ],\n setup_requires=['Babel'],\n cmdclass={'sdist': Sdist, 'build_py': BuildPy, 'develop': Develop},\n entry_points={\n 'console_scripts': [\n 'opsdroid = opsdroid.__main__:main'\n ]\n },\n)\n", "path": "setup.py"}]} | 1,276 | 112 |
gh_patches_debug_43223 | rasdani/github-patches | git_diff | ephios-dev__ephios-80 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Benutzer bearbeiten
Als Manager möchte ich einen Nutzer bearbeiten. Dabei möchte ich die selben Optionen haben wie beim Anlegen des Nutzers. In der Liste der Nutzer soll es für jeden Nutzer eine entsprechende Schaltfläche geben. Der Nutzer soll über Änderungen per Mail informiert werden.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `user_management/mail.py`
Content:
```
1 from django.contrib.auth.tokens import default_token_generator
2 from django.core import mail
3 from django.core.mail import EmailMultiAlternatives
4 from django.template.loader import render_to_string
5 from django.urls import reverse
6 from django.utils.encoding import force_bytes
7 from django.utils.http import urlsafe_base64_encode
8 from django.utils.translation import gettext as _
9
10 from jep.settings import SITE_URL
11
12
13 def send_account_creation_info(userprofile):
14 messages = []
15 subject = _("Welcome to JEP!")
16 uid = urlsafe_base64_encode(force_bytes(userprofile.id))
17 token = default_token_generator.make_token(userprofile)
18 reset_link = reverse("password_reset_confirm", kwargs={"uidb64": uid, "token": token})
19 text_content = _(
20 "You're receiving this email because a new account has been created for you at JEP.\n"
21 "Please go to the following page and choose a password: {url}{reset_link}\n"
22 "Your username is your email address: {email}\n"
23 ).format(url=SITE_URL, reset_link=reset_link, email=userprofile.email)
24
25 html_content = render_to_string(
26 "user_management/new_account_email.html",
27 {"uid": uid, "token": token, "site_url": SITE_URL, "email": userprofile.email,},
28 )
29 message = EmailMultiAlternatives(to=[userprofile.email], subject=subject, body=text_content)
30 message.attach_alternative(html_content, "text/html")
31 messages.append(message)
32
33 mail.get_connection().send_messages(messages)
34
```
Path: `user_management/urls.py`
Content:
```
1 from django.urls import path
2
3 from user_management import views
4
5 app_name = "user_management"
6 urlpatterns = [
7 path("profile/", views.ProfileView.as_view(), name="profile"),
8 path("groups/", views.GroupListView.as_view(), name="group_list"),
9 path("groups/<int:pk>/edit", views.GroupUpdateView.as_view(), name="group_edit"),
10 path("groups/<int:pk>/delete", views.GroupDeleteView.as_view(), name="group_delete"),
11 path("groups/create", views.GroupCreateView.as_view(), name="group_add"),
12 path("users/", views.UserProfileListView.as_view(), name="user_list",),
13 path("users/create/", views.UserProfileCreateView.as_view(), name="userprofile_create",),
14 ]
15
```
Path: `user_management/views.py`
Content:
```
1 from django.contrib import messages
2 from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
3 from django.contrib.auth.models import Group
4 from django.urls import reverse
5 from django.views.generic import DetailView, ListView, UpdateView, CreateView, DeleteView
6 from guardian.shortcuts import get_objects_for_group
7
8 from user_management import mail
9 from user_management.forms import GroupForm, UserProfileForm
10 from django.utils.translation import gettext as _
11
12 from user_management.models import UserProfile
13
14
15 class ProfileView(LoginRequiredMixin, DetailView):
16 def get_object(self, queryset=None):
17 return self.request.user
18
19
20 class UserProfileListView(PermissionRequiredMixin, ListView):
21 model = UserProfile
22 permission_required = "user_management.view_userprofile"
23
24
25 class UserProfileCreateView(PermissionRequiredMixin, CreateView):
26 template_name = "user_management/userprofile_form.html"
27 permission_required = "user_management.add_userprofile"
28 model = UserProfile
29 form_class = UserProfileForm
30
31 def get_success_url(self):
32 messages.success(self.request, _("User added successfully."))
33 return reverse("user_management:user_list")
34
35 def form_valid(self, form):
36 response = super().form_valid(form)
37 userprofile = self.object
38 if userprofile.is_active:
39 mail.send_account_creation_info(userprofile)
40 return response
41
42
43 class GroupListView(PermissionRequiredMixin, ListView):
44 model = Group
45 permission_required = "auth.view_group"
46 template_name = "user_management/group_list.html"
47
48
49 class GroupCreateView(PermissionRequiredMixin, CreateView):
50 model = Group
51 permission_required = "auth.add_group"
52 template_name = "user_management/group_form.html"
53 form_class = GroupForm
54
55 def get_form_kwargs(self):
56 kwargs = super().get_form_kwargs()
57 kwargs["initial"] = {
58 "users": UserProfile.objects.none(),
59 "can_add_event": False,
60 "publish_event_for_group": Group.objects.none(),
61 }
62 return kwargs
63
64 def get_success_url(self):
65 messages.success(self.request, _("Group created successfully."))
66 return reverse("user_management:group_list")
67
68
69 class GroupUpdateView(PermissionRequiredMixin, UpdateView):
70 model = Group
71 permission_required = "auth.change_group"
72 template_name = "user_management/group_form.html"
73 form_class = GroupForm
74
75 def get_form_kwargs(self):
76 kwargs = super().get_form_kwargs()
77 kwargs["initial"] = {
78 "users": self.object.user_set.all(),
79 "can_view_past_event": self.object.permissions.filter(
80 codename="view_past_event"
81 ).exists(),
82 "can_add_event": self.object.permissions.filter(codename="add_event").exists(),
83 "publish_event_for_group": get_objects_for_group(
84 self.object, "publish_event_for_group", klass=Group
85 ),
86 }
87 return kwargs
88
89 def get_success_url(self):
90 return reverse("user_management:group_list")
91
92
93 class GroupDeleteView(PermissionRequiredMixin, DeleteView):
94 model = Group
95 permission_required = "auth.delete_group"
96 template_name = "user_management/group_confirm_delete.html"
97
98 def get_success_url(self):
99 return reverse("user_management:group_list")
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/user_management/mail.py b/user_management/mail.py
--- a/user_management/mail.py
+++ b/user_management/mail.py
@@ -11,7 +11,6 @@
def send_account_creation_info(userprofile):
- messages = []
subject = _("Welcome to JEP!")
uid = urlsafe_base64_encode(force_bytes(userprofile.id))
token = default_token_generator.make_token(userprofile)
@@ -24,10 +23,26 @@
html_content = render_to_string(
"user_management/new_account_email.html",
- {"uid": uid, "token": token, "site_url": SITE_URL, "email": userprofile.email,},
+ {"uid": uid, "token": token, "site_url": SITE_URL, "email": userprofile.email},
)
message = EmailMultiAlternatives(to=[userprofile.email], subject=subject, body=text_content)
message.attach_alternative(html_content, "text/html")
- messages.append(message)
+ message.send()
- mail.get_connection().send_messages(messages)
+
+def send_account_update_info(userprofile):
+ subject = _("JEP account updated")
+ url = reverse("user_management:profile")
+ text_content = _(
+ "You're receiving this email because your account at JEP has been updated.\n"
+ "You can see the changes in your profile: {site_url}{url}\n"
+ "Your username is your email address: {email}\n"
+ ).format(site_url=SITE_URL, url=url, email=userprofile.email)
+
+ html_content = render_to_string(
+ "user_management/account_updated_email.html",
+ {"site_url": SITE_URL, "url": url, "email": userprofile.email},
+ )
+ message = EmailMultiAlternatives(to=[userprofile.email], subject=subject, body=text_content)
+ message.attach_alternative(html_content, "text/html")
+ message.send()
diff --git a/user_management/urls.py b/user_management/urls.py
--- a/user_management/urls.py
+++ b/user_management/urls.py
@@ -9,6 +9,7 @@
path("groups/<int:pk>/edit", views.GroupUpdateView.as_view(), name="group_edit"),
path("groups/<int:pk>/delete", views.GroupDeleteView.as_view(), name="group_delete"),
path("groups/create", views.GroupCreateView.as_view(), name="group_add"),
- path("users/", views.UserProfileListView.as_view(), name="user_list",),
+ path("users/", views.UserProfileListView.as_view(), name="userprofile_list",),
+ path("users/<int:pk>/edit", views.UserProfileUpdateView.as_view(), name="userprofile_edit",),
path("users/create/", views.UserProfileCreateView.as_view(), name="userprofile_create",),
]
diff --git a/user_management/views.py b/user_management/views.py
--- a/user_management/views.py
+++ b/user_management/views.py
@@ -30,7 +30,7 @@
def get_success_url(self):
messages.success(self.request, _("User added successfully."))
- return reverse("user_management:user_list")
+ return reverse("user_management:userprofile_list")
def form_valid(self, form):
response = super().form_valid(form)
@@ -40,6 +40,31 @@
return response
+class UserProfileUpdateView(PermissionRequiredMixin, UpdateView):
+ model = UserProfile
+ permission_required = "user_management.change_userprofile"
+ template_name = "user_management/userprofile_form.html"
+ form_class = UserProfileForm
+
+ def get_success_url(self):
+ messages.success(self.request, _("User updated successfully."))
+ return reverse("user_management:userprofile_list")
+
+ def form_valid(self, form):
+ response = super().form_valid(form)
+ userprofile = self.object
+ if userprofile.is_active:
+ mail.send_account_update_info(userprofile)
+ return response
+
+ def get_form_kwargs(self):
+ kwargs = super().get_form_kwargs()
+ kwargs["initial"] = {
+ "groups": self.object.groups.all(),
+ }
+ return kwargs
+
+
class GroupListView(PermissionRequiredMixin, ListView):
model = Group
permission_required = "auth.view_group"
@@ -87,6 +112,7 @@
return kwargs
def get_success_url(self):
+ messages.success(self.request, _("Group updated successfully."))
return reverse("user_management:group_list")
| {"golden_diff": "diff --git a/user_management/mail.py b/user_management/mail.py\n--- a/user_management/mail.py\n+++ b/user_management/mail.py\n@@ -11,7 +11,6 @@\n \n \n def send_account_creation_info(userprofile):\n- messages = []\n subject = _(\"Welcome to JEP!\")\n uid = urlsafe_base64_encode(force_bytes(userprofile.id))\n token = default_token_generator.make_token(userprofile)\n@@ -24,10 +23,26 @@\n \n html_content = render_to_string(\n \"user_management/new_account_email.html\",\n- {\"uid\": uid, \"token\": token, \"site_url\": SITE_URL, \"email\": userprofile.email,},\n+ {\"uid\": uid, \"token\": token, \"site_url\": SITE_URL, \"email\": userprofile.email},\n )\n message = EmailMultiAlternatives(to=[userprofile.email], subject=subject, body=text_content)\n message.attach_alternative(html_content, \"text/html\")\n- messages.append(message)\n+ message.send()\n \n- mail.get_connection().send_messages(messages)\n+\n+def send_account_update_info(userprofile):\n+ subject = _(\"JEP account updated\")\n+ url = reverse(\"user_management:profile\")\n+ text_content = _(\n+ \"You're receiving this email because your account at JEP has been updated.\\n\"\n+ \"You can see the changes in your profile: {site_url}{url}\\n\"\n+ \"Your username is your email address: {email}\\n\"\n+ ).format(site_url=SITE_URL, url=url, email=userprofile.email)\n+\n+ html_content = render_to_string(\n+ \"user_management/account_updated_email.html\",\n+ {\"site_url\": SITE_URL, \"url\": url, \"email\": userprofile.email},\n+ )\n+ message = EmailMultiAlternatives(to=[userprofile.email], subject=subject, body=text_content)\n+ message.attach_alternative(html_content, \"text/html\")\n+ message.send()\ndiff --git a/user_management/urls.py b/user_management/urls.py\n--- a/user_management/urls.py\n+++ b/user_management/urls.py\n@@ -9,6 +9,7 @@\n path(\"groups/<int:pk>/edit\", views.GroupUpdateView.as_view(), name=\"group_edit\"),\n path(\"groups/<int:pk>/delete\", views.GroupDeleteView.as_view(), name=\"group_delete\"),\n path(\"groups/create\", views.GroupCreateView.as_view(), name=\"group_add\"),\n- path(\"users/\", views.UserProfileListView.as_view(), name=\"user_list\",),\n+ path(\"users/\", views.UserProfileListView.as_view(), name=\"userprofile_list\",),\n+ path(\"users/<int:pk>/edit\", views.UserProfileUpdateView.as_view(), name=\"userprofile_edit\",),\n path(\"users/create/\", views.UserProfileCreateView.as_view(), name=\"userprofile_create\",),\n ]\ndiff --git a/user_management/views.py b/user_management/views.py\n--- a/user_management/views.py\n+++ b/user_management/views.py\n@@ -30,7 +30,7 @@\n \n def get_success_url(self):\n messages.success(self.request, _(\"User added successfully.\"))\n- return reverse(\"user_management:user_list\")\n+ return reverse(\"user_management:userprofile_list\")\n \n def form_valid(self, form):\n response = super().form_valid(form)\n@@ -40,6 +40,31 @@\n return response\n \n \n+class UserProfileUpdateView(PermissionRequiredMixin, UpdateView):\n+ model = UserProfile\n+ permission_required = \"user_management.change_userprofile\"\n+ template_name = \"user_management/userprofile_form.html\"\n+ form_class = UserProfileForm\n+\n+ def get_success_url(self):\n+ messages.success(self.request, _(\"User updated successfully.\"))\n+ return reverse(\"user_management:userprofile_list\")\n+\n+ def form_valid(self, form):\n+ response = super().form_valid(form)\n+ userprofile = self.object\n+ if userprofile.is_active:\n+ mail.send_account_update_info(userprofile)\n+ return response\n+\n+ def get_form_kwargs(self):\n+ kwargs = super().get_form_kwargs()\n+ kwargs[\"initial\"] = {\n+ \"groups\": self.object.groups.all(),\n+ }\n+ return kwargs\n+\n+\n class GroupListView(PermissionRequiredMixin, ListView):\n model = Group\n permission_required = \"auth.view_group\"\n@@ -87,6 +112,7 @@\n return kwargs\n \n def get_success_url(self):\n+ messages.success(self.request, _(\"Group updated successfully.\"))\n return reverse(\"user_management:group_list\")\n", "issue": "Benutzer bearbeiten\nAls Manager m\u00f6chte ich einen Nutzer bearbeiten. Dabei m\u00f6chte ich die selben Optionen haben wie beim Anlegen des Nutzers. In der Liste der Nutzer soll es f\u00fcr jeden Nutzer eine entsprechende Schaltfl\u00e4che geben. Der Nutzer soll \u00fcber \u00c4nderungen per Mail informiert werden.\n", "before_files": [{"content": "from django.contrib.auth.tokens import default_token_generator\nfrom django.core import mail\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.utils.translation import gettext as _\n\nfrom jep.settings import SITE_URL\n\n\ndef send_account_creation_info(userprofile):\n messages = []\n subject = _(\"Welcome to JEP!\")\n uid = urlsafe_base64_encode(force_bytes(userprofile.id))\n token = default_token_generator.make_token(userprofile)\n reset_link = reverse(\"password_reset_confirm\", kwargs={\"uidb64\": uid, \"token\": token})\n text_content = _(\n \"You're receiving this email because a new account has been created for you at JEP.\\n\"\n \"Please go to the following page and choose a password: {url}{reset_link}\\n\"\n \"Your username is your email address: {email}\\n\"\n ).format(url=SITE_URL, reset_link=reset_link, email=userprofile.email)\n\n html_content = render_to_string(\n \"user_management/new_account_email.html\",\n {\"uid\": uid, \"token\": token, \"site_url\": SITE_URL, \"email\": userprofile.email,},\n )\n message = EmailMultiAlternatives(to=[userprofile.email], subject=subject, body=text_content)\n message.attach_alternative(html_content, \"text/html\")\n messages.append(message)\n\n mail.get_connection().send_messages(messages)\n", "path": "user_management/mail.py"}, {"content": "from django.urls import path\n\nfrom user_management import views\n\napp_name = \"user_management\"\nurlpatterns = [\n path(\"profile/\", views.ProfileView.as_view(), name=\"profile\"),\n path(\"groups/\", views.GroupListView.as_view(), name=\"group_list\"),\n path(\"groups/<int:pk>/edit\", views.GroupUpdateView.as_view(), name=\"group_edit\"),\n path(\"groups/<int:pk>/delete\", views.GroupDeleteView.as_view(), name=\"group_delete\"),\n path(\"groups/create\", views.GroupCreateView.as_view(), name=\"group_add\"),\n path(\"users/\", views.UserProfileListView.as_view(), name=\"user_list\",),\n path(\"users/create/\", views.UserProfileCreateView.as_view(), name=\"userprofile_create\",),\n]\n", "path": "user_management/urls.py"}, {"content": "from django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin\nfrom django.contrib.auth.models import Group\nfrom django.urls import reverse\nfrom django.views.generic import DetailView, ListView, UpdateView, CreateView, DeleteView\nfrom guardian.shortcuts import get_objects_for_group\n\nfrom user_management import mail\nfrom user_management.forms import GroupForm, UserProfileForm\nfrom django.utils.translation import gettext as _\n\nfrom user_management.models import UserProfile\n\n\nclass ProfileView(LoginRequiredMixin, DetailView):\n def get_object(self, queryset=None):\n return self.request.user\n\n\nclass UserProfileListView(PermissionRequiredMixin, ListView):\n model = UserProfile\n permission_required = \"user_management.view_userprofile\"\n\n\nclass UserProfileCreateView(PermissionRequiredMixin, CreateView):\n template_name = \"user_management/userprofile_form.html\"\n permission_required = \"user_management.add_userprofile\"\n model = UserProfile\n form_class = UserProfileForm\n\n def get_success_url(self):\n messages.success(self.request, _(\"User added successfully.\"))\n return reverse(\"user_management:user_list\")\n\n def form_valid(self, form):\n response = super().form_valid(form)\n userprofile = self.object\n if userprofile.is_active:\n mail.send_account_creation_info(userprofile)\n return response\n\n\nclass GroupListView(PermissionRequiredMixin, ListView):\n model = Group\n permission_required = \"auth.view_group\"\n template_name = \"user_management/group_list.html\"\n\n\nclass GroupCreateView(PermissionRequiredMixin, CreateView):\n model = Group\n permission_required = \"auth.add_group\"\n template_name = \"user_management/group_form.html\"\n form_class = GroupForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"initial\"] = {\n \"users\": UserProfile.objects.none(),\n \"can_add_event\": False,\n \"publish_event_for_group\": Group.objects.none(),\n }\n return kwargs\n\n def get_success_url(self):\n messages.success(self.request, _(\"Group created successfully.\"))\n return reverse(\"user_management:group_list\")\n\n\nclass GroupUpdateView(PermissionRequiredMixin, UpdateView):\n model = Group\n permission_required = \"auth.change_group\"\n template_name = \"user_management/group_form.html\"\n form_class = GroupForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"initial\"] = {\n \"users\": self.object.user_set.all(),\n \"can_view_past_event\": self.object.permissions.filter(\n codename=\"view_past_event\"\n ).exists(),\n \"can_add_event\": self.object.permissions.filter(codename=\"add_event\").exists(),\n \"publish_event_for_group\": get_objects_for_group(\n self.object, \"publish_event_for_group\", klass=Group\n ),\n }\n return kwargs\n\n def get_success_url(self):\n return reverse(\"user_management:group_list\")\n\n\nclass GroupDeleteView(PermissionRequiredMixin, DeleteView):\n model = Group\n permission_required = \"auth.delete_group\"\n template_name = \"user_management/group_confirm_delete.html\"\n\n def get_success_url(self):\n return reverse(\"user_management:group_list\")\n", "path": "user_management/views.py"}], "after_files": [{"content": "from django.contrib.auth.tokens import default_token_generator\nfrom django.core import mail\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.utils.translation import gettext as _\n\nfrom jep.settings import SITE_URL\n\n\ndef send_account_creation_info(userprofile):\n subject = _(\"Welcome to JEP!\")\n uid = urlsafe_base64_encode(force_bytes(userprofile.id))\n token = default_token_generator.make_token(userprofile)\n reset_link = reverse(\"password_reset_confirm\", kwargs={\"uidb64\": uid, \"token\": token})\n text_content = _(\n \"You're receiving this email because a new account has been created for you at JEP.\\n\"\n \"Please go to the following page and choose a password: {url}{reset_link}\\n\"\n \"Your username is your email address: {email}\\n\"\n ).format(url=SITE_URL, reset_link=reset_link, email=userprofile.email)\n\n html_content = render_to_string(\n \"user_management/new_account_email.html\",\n {\"uid\": uid, \"token\": token, \"site_url\": SITE_URL, \"email\": userprofile.email},\n )\n message = EmailMultiAlternatives(to=[userprofile.email], subject=subject, body=text_content)\n message.attach_alternative(html_content, \"text/html\")\n message.send()\n\n\ndef send_account_update_info(userprofile):\n subject = _(\"JEP account updated\")\n url = reverse(\"user_management:profile\")\n text_content = _(\n \"You're receiving this email because your account at JEP has been updated.\\n\"\n \"You can see the changes in your profile: {site_url}{url}\\n\"\n \"Your username is your email address: {email}\\n\"\n ).format(site_url=SITE_URL, url=url, email=userprofile.email)\n\n html_content = render_to_string(\n \"user_management/account_updated_email.html\",\n {\"site_url\": SITE_URL, \"url\": url, \"email\": userprofile.email},\n )\n message = EmailMultiAlternatives(to=[userprofile.email], subject=subject, body=text_content)\n message.attach_alternative(html_content, \"text/html\")\n message.send()\n", "path": "user_management/mail.py"}, {"content": "from django.urls import path\n\nfrom user_management import views\n\napp_name = \"user_management\"\nurlpatterns = [\n path(\"profile/\", views.ProfileView.as_view(), name=\"profile\"),\n path(\"groups/\", views.GroupListView.as_view(), name=\"group_list\"),\n path(\"groups/<int:pk>/edit\", views.GroupUpdateView.as_view(), name=\"group_edit\"),\n path(\"groups/<int:pk>/delete\", views.GroupDeleteView.as_view(), name=\"group_delete\"),\n path(\"groups/create\", views.GroupCreateView.as_view(), name=\"group_add\"),\n path(\"users/\", views.UserProfileListView.as_view(), name=\"userprofile_list\",),\n path(\"users/<int:pk>/edit\", views.UserProfileUpdateView.as_view(), name=\"userprofile_edit\",),\n path(\"users/create/\", views.UserProfileCreateView.as_view(), name=\"userprofile_create\",),\n]\n", "path": "user_management/urls.py"}, {"content": "from django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin\nfrom django.contrib.auth.models import Group\nfrom django.urls import reverse\nfrom django.views.generic import DetailView, ListView, UpdateView, CreateView, DeleteView\nfrom guardian.shortcuts import get_objects_for_group\n\nfrom user_management import mail\nfrom user_management.forms import GroupForm, UserProfileForm\nfrom django.utils.translation import gettext as _\n\nfrom user_management.models import UserProfile\n\n\nclass ProfileView(LoginRequiredMixin, DetailView):\n def get_object(self, queryset=None):\n return self.request.user\n\n\nclass UserProfileListView(PermissionRequiredMixin, ListView):\n model = UserProfile\n permission_required = \"user_management.view_userprofile\"\n\n\nclass UserProfileCreateView(PermissionRequiredMixin, CreateView):\n template_name = \"user_management/userprofile_form.html\"\n permission_required = \"user_management.add_userprofile\"\n model = UserProfile\n form_class = UserProfileForm\n\n def get_success_url(self):\n messages.success(self.request, _(\"User added successfully.\"))\n return reverse(\"user_management:userprofile_list\")\n\n def form_valid(self, form):\n response = super().form_valid(form)\n userprofile = self.object\n if userprofile.is_active:\n mail.send_account_creation_info(userprofile)\n return response\n\n\nclass UserProfileUpdateView(PermissionRequiredMixin, UpdateView):\n model = UserProfile\n permission_required = \"user_management.change_userprofile\"\n template_name = \"user_management/userprofile_form.html\"\n form_class = UserProfileForm\n\n def get_success_url(self):\n messages.success(self.request, _(\"User updated successfully.\"))\n return reverse(\"user_management:userprofile_list\")\n\n def form_valid(self, form):\n response = super().form_valid(form)\n userprofile = self.object\n if userprofile.is_active:\n mail.send_account_update_info(userprofile)\n return response\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"initial\"] = {\n \"groups\": self.object.groups.all(),\n }\n return kwargs\n\n\nclass GroupListView(PermissionRequiredMixin, ListView):\n model = Group\n permission_required = \"auth.view_group\"\n template_name = \"user_management/group_list.html\"\n\n\nclass GroupCreateView(PermissionRequiredMixin, CreateView):\n model = Group\n permission_required = \"auth.add_group\"\n template_name = \"user_management/group_form.html\"\n form_class = GroupForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"initial\"] = {\n \"users\": UserProfile.objects.none(),\n \"can_add_event\": False,\n \"publish_event_for_group\": Group.objects.none(),\n }\n return kwargs\n\n def get_success_url(self):\n messages.success(self.request, _(\"Group created successfully.\"))\n return reverse(\"user_management:group_list\")\n\n\nclass GroupUpdateView(PermissionRequiredMixin, UpdateView):\n model = Group\n permission_required = \"auth.change_group\"\n template_name = \"user_management/group_form.html\"\n form_class = GroupForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"initial\"] = {\n \"users\": self.object.user_set.all(),\n \"can_view_past_event\": self.object.permissions.filter(\n codename=\"view_past_event\"\n ).exists(),\n \"can_add_event\": self.object.permissions.filter(codename=\"add_event\").exists(),\n \"publish_event_for_group\": get_objects_for_group(\n self.object, \"publish_event_for_group\", klass=Group\n ),\n }\n return kwargs\n\n def get_success_url(self):\n messages.success(self.request, _(\"Group updated successfully.\"))\n return reverse(\"user_management:group_list\")\n\n\nclass GroupDeleteView(PermissionRequiredMixin, DeleteView):\n model = Group\n permission_required = \"auth.delete_group\"\n template_name = \"user_management/group_confirm_delete.html\"\n\n def get_success_url(self):\n return reverse(\"user_management:group_list\")\n", "path": "user_management/views.py"}]} | 1,779 | 985 |
gh_patches_debug_27185 | rasdani/github-patches | git_diff | kubeflow__pipelines-5782 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sdk/client/auth - KeyError: 'id_token' from _auth.py in id_token_from_refresh_token
If getting “KeyError: 'id_token'” from “File "/Users/gongyuan/miniconda3/envs/mlpipeline/lib/python3.7/site-packages/kfp/_auth.py", line 192, in id_token_from_refresh_token
return (str(json.loads(res.text)[u"id_token"]))
The request to get id token from refresh token failed, but the client didn’t surface the underlying error message. The http response I got was
```
{
"error": "invalid_grant",
"error_description": "Bad Request"
}
```
And root cause: ~/.config/kfp/credentials.json has expired. I deleted it and got a new token: `rm ~/.config/kfp/credentials.json`.
## Solution
https://github.com/kubeflow/pipelines/blob/2a65eec1fa265ebbda69d5b8b1875e3e4b54ac82/sdk/python/kfp/_auth.py#L184-L185 and https://github.com/kubeflow/pipelines/blob/2a65eec1fa265ebbda69d5b8b1875e3e4b54ac82/sdk/python/kfp/_auth.py#L191-L192, we should first check request response status code by `Response.raise_for_exception`, it could be `401 unauthorized`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/kfp/_auth.py`
Content:
```
1 # Copyright 2018 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 import os
17 import google.auth
18 import google.auth.app_engine
19 import google.auth.compute_engine.credentials
20 import google.auth.iam
21 from google.auth.transport.requests import Request
22 import google.oauth2.credentials
23 import google.oauth2.service_account
24 import requests_toolbelt.adapters.appengine
25 from webbrowser import open_new_tab
26 import requests
27 import json
28
29 IAM_SCOPE = 'https://www.googleapis.com/auth/iam'
30 OAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'
31 LOCAL_KFP_CREDENTIAL = os.path.expanduser('~/.config/kfp/credentials.json')
32
33 def get_gcp_access_token():
34 """Get and return GCP access token for the current Application Default
35 Credentials. If not set, returns None. For more information, see
36 https://cloud.google.com/sdk/gcloud/reference/auth/application-default/print-access-token
37 """
38 token = None
39 try:
40 creds, project = google.auth.default(scopes=["https://www.googleapis.com/auth/cloud-platform"])
41 if not creds.valid:
42 auth_req = Request()
43 creds.refresh(auth_req)
44 if creds.valid:
45 token = creds.token
46 except Exception as e:
47 logging.warning('Failed to get GCP access token: %s', e)
48 return token
49
50 def get_auth_token(client_id, other_client_id, other_client_secret):
51 """Gets auth token from default service account or user account."""
52 if os.path.exists(LOCAL_KFP_CREDENTIAL):
53 # fetch IAP auth token using the locally stored credentials.
54 with open(LOCAL_KFP_CREDENTIAL, 'r') as f:
55 credentials = json.load(f)
56 if client_id in credentials:
57 return id_token_from_refresh_token(credentials[client_id]['other_client_id'],
58 credentials[client_id]['other_client_secret'],
59 credentials[client_id]['refresh_token'],
60 client_id)
61 if other_client_id is None or other_client_secret is None:
62 # fetch IAP auth token: service accounts
63 token = get_auth_token_from_sa(client_id)
64 else:
65 # fetch IAP auth token: user account
66 # Obtain the ID token for provided Client ID with user accounts.
67 # Flow: get authorization code -> exchange for refresh token -> obtain and return ID token
68 refresh_token = get_refresh_token_from_client_id(other_client_id, other_client_secret)
69 credentials = {}
70 if os.path.exists(LOCAL_KFP_CREDENTIAL):
71 with open(LOCAL_KFP_CREDENTIAL, 'r') as f:
72 credentials = json.load(f)
73 credentials[client_id] = {}
74 credentials[client_id]['other_client_id'] = other_client_id
75 credentials[client_id]['other_client_secret'] = other_client_secret
76 credentials[client_id]['refresh_token'] = refresh_token
77 #TODO: handle the case when the refresh_token expires.
78 # which only happens if the refresh_token is not used once for six months.
79 if not os.path.exists(os.path.dirname(LOCAL_KFP_CREDENTIAL)):
80 os.makedirs(os.path.dirname(LOCAL_KFP_CREDENTIAL))
81 with open(LOCAL_KFP_CREDENTIAL, 'w') as f:
82 json.dump(credentials, f)
83 token = id_token_from_refresh_token(other_client_id, other_client_secret, refresh_token, client_id)
84 return token
85
86 def get_auth_token_from_sa(client_id):
87 """Gets auth token from default service account.
88
89 If no service account credential is found, returns None.
90 """
91 service_account_credentials = get_service_account_credentials(client_id)
92 if service_account_credentials:
93 return get_google_open_id_connect_token(service_account_credentials)
94 return None
95
96 def get_service_account_credentials(client_id):
97 # Figure out what environment we're running in and get some preliminary
98 # information about the service account.
99 bootstrap_credentials, _ = google.auth.default(
100 scopes=[IAM_SCOPE])
101 if isinstance(bootstrap_credentials,
102 google.oauth2.credentials.Credentials):
103 logging.info('Found OAuth2 credentials and skip SA auth.')
104 return None
105 elif isinstance(bootstrap_credentials,
106 google.auth.app_engine.Credentials):
107 requests_toolbelt.adapters.appengine.monkeypatch()
108
109 # For service account's using the Compute Engine metadata service,
110 # service_account_email isn't available until refresh is called.
111 bootstrap_credentials.refresh(Request())
112 signer_email = bootstrap_credentials.service_account_email
113 if isinstance(bootstrap_credentials,
114 google.auth.compute_engine.credentials.Credentials):
115 # Since the Compute Engine metadata service doesn't expose the service
116 # account key, we use the IAM signBlob API to sign instead.
117 # In order for this to work:
118 #
119 # 1. Your VM needs the https://www.googleapis.com/auth/iam scope.
120 # You can specify this specific scope when creating a VM
121 # through the API or gcloud. When using Cloud Console,
122 # you'll need to specify the "full access to all Cloud APIs"
123 # scope. A VM's scopes can only be specified at creation time.
124 #
125 # 2. The VM's default service account needs the "Service Account Actor"
126 # role. This can be found under the "Project" category in Cloud
127 # Console, or roles/iam.serviceAccountActor in gcloud.
128 signer = google.auth.iam.Signer(
129 Request(), bootstrap_credentials, signer_email)
130 else:
131 # A Signer object can sign a JWT using the service account's key.
132 signer = bootstrap_credentials.signer
133
134 # Construct OAuth 2.0 service account credentials using the signer
135 # and email acquired from the bootstrap credentials.
136 return google.oauth2.service_account.Credentials(
137 signer, signer_email, token_uri=OAUTH_TOKEN_URI, additional_claims={
138 'target_audience': client_id
139 })
140
141 def get_google_open_id_connect_token(service_account_credentials):
142 """Get an OpenID Connect token issued by Google for the service account.
143 This function:
144 1. Generates a JWT signed with the service account's private key
145 containing a special "target_audience" claim.
146 2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1
147 has a target_audience claim, that endpoint will respond with
148 an OpenID Connect token for the service account -- in other words,
149 a JWT signed by *Google*. The aud claim in this JWT will be
150 set to the value from the target_audience claim in #1.
151 For more information, see
152 https://developers.google.com/identity/protocols/OAuth2ServiceAccount .
153 The HTTP/REST example on that page describes the JWT structure and
154 demonstrates how to call the token endpoint. (The example on that page
155 shows how to get an OAuth2 access token; this code is using a
156 modified version of it to get an OpenID Connect token.)
157 """
158
159 service_account_jwt = (
160 service_account_credentials._make_authorization_grant_assertion())
161 request = google.auth.transport.requests.Request()
162 body = {
163 'assertion': service_account_jwt,
164 'grant_type': google.oauth2._client._JWT_GRANT_TYPE,
165 }
166 token_response = google.oauth2._client._token_endpoint_request(
167 request, OAUTH_TOKEN_URI, body)
168 return token_response['id_token']
169
170 def get_refresh_token_from_client_id(client_id, client_secret):
171 """Obtain the ID token for provided Client ID with user accounts.
172 Flow: get authorization code -> exchange for refresh token -> obtain and return ID token
173 """
174 auth_code = get_auth_code(client_id)
175 return get_refresh_token_from_code(auth_code, client_id, client_secret)
176
177 def get_auth_code(client_id):
178 auth_url = "https://accounts.google.com/o/oauth2/v2/auth?client_id=%s&response_type=code&scope=openid%%20email&access_type=offline&redirect_uri=urn:ietf:wg:oauth:2.0:oob"%client_id
179 print(auth_url)
180 open_new_tab(auth_url)
181 return input("If there's no browser window prompt, please direct to the URL above, then copy and paste the authorization code here: ")
182
183 def get_refresh_token_from_code(auth_code, client_id, client_secret):
184 payload = {"code": auth_code, "client_id": client_id, "client_secret": client_secret,
185 "redirect_uri": "urn:ietf:wg:oauth:2.0:oob", "grant_type": "authorization_code"}
186 res = requests.post(OAUTH_TOKEN_URI, data=payload)
187 return (str(json.loads(res.text)[u"refresh_token"]))
188
189 def id_token_from_refresh_token(client_id, client_secret, refresh_token, audience):
190 payload = {"client_id": client_id, "client_secret": client_secret,
191 "refresh_token": refresh_token, "grant_type": "refresh_token",
192 "audience": audience}
193 res = requests.post(OAUTH_TOKEN_URI, data=payload)
194 return (str(json.loads(res.text)[u"id_token"]))
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/kfp/_auth.py b/sdk/python/kfp/_auth.py
--- a/sdk/python/kfp/_auth.py
+++ b/sdk/python/kfp/_auth.py
@@ -180,15 +180,19 @@
open_new_tab(auth_url)
return input("If there's no browser window prompt, please direct to the URL above, then copy and paste the authorization code here: ")
+
def get_refresh_token_from_code(auth_code, client_id, client_secret):
payload = {"code": auth_code, "client_id": client_id, "client_secret": client_secret,
"redirect_uri": "urn:ietf:wg:oauth:2.0:oob", "grant_type": "authorization_code"}
res = requests.post(OAUTH_TOKEN_URI, data=payload)
- return (str(json.loads(res.text)[u"refresh_token"]))
+ res.raise_for_status()
+ return str(json.loads(res.text)[u"refresh_token"])
+
def id_token_from_refresh_token(client_id, client_secret, refresh_token, audience):
payload = {"client_id": client_id, "client_secret": client_secret,
"refresh_token": refresh_token, "grant_type": "refresh_token",
"audience": audience}
res = requests.post(OAUTH_TOKEN_URI, data=payload)
- return (str(json.loads(res.text)[u"id_token"]))
+ res.raise_for_status()
+ return str(json.loads(res.text)[u"id_token"])
| {"golden_diff": "diff --git a/sdk/python/kfp/_auth.py b/sdk/python/kfp/_auth.py\n--- a/sdk/python/kfp/_auth.py\n+++ b/sdk/python/kfp/_auth.py\n@@ -180,15 +180,19 @@\n open_new_tab(auth_url)\n return input(\"If there's no browser window prompt, please direct to the URL above, then copy and paste the authorization code here: \")\n \n+\n def get_refresh_token_from_code(auth_code, client_id, client_secret):\n payload = {\"code\": auth_code, \"client_id\": client_id, \"client_secret\": client_secret,\n \"redirect_uri\": \"urn:ietf:wg:oauth:2.0:oob\", \"grant_type\": \"authorization_code\"}\n res = requests.post(OAUTH_TOKEN_URI, data=payload)\n- return (str(json.loads(res.text)[u\"refresh_token\"]))\n+ res.raise_for_status()\n+ return str(json.loads(res.text)[u\"refresh_token\"])\n+\n \n def id_token_from_refresh_token(client_id, client_secret, refresh_token, audience):\n payload = {\"client_id\": client_id, \"client_secret\": client_secret,\n \"refresh_token\": refresh_token, \"grant_type\": \"refresh_token\",\n \"audience\": audience}\n res = requests.post(OAUTH_TOKEN_URI, data=payload)\n- return (str(json.loads(res.text)[u\"id_token\"]))\n+ res.raise_for_status()\n+ return str(json.loads(res.text)[u\"id_token\"])\n", "issue": "sdk/client/auth - KeyError: 'id_token' from _auth.py in id_token_from_refresh_token\nIf getting \u201cKeyError: 'id_token'\u201d from \u201cFile \"/Users/gongyuan/miniconda3/envs/mlpipeline/lib/python3.7/site-packages/kfp/_auth.py\", line 192, in id_token_from_refresh_token\r\n return (str(json.loads(res.text)[u\"id_token\"]))\r\n\r\nThe request to get id token from refresh token failed, but the client didn\u2019t surface the underlying error message. The http response I got was\r\n```\r\n{\r\n \"error\": \"invalid_grant\",\r\n \"error_description\": \"Bad Request\"\r\n}\r\n```\r\n\r\nAnd root cause: ~/.config/kfp/credentials.json has expired. I deleted it and got a new token: `rm ~/.config/kfp/credentials.json`.\r\n\r\n## Solution\r\n\r\nhttps://github.com/kubeflow/pipelines/blob/2a65eec1fa265ebbda69d5b8b1875e3e4b54ac82/sdk/python/kfp/_auth.py#L184-L185 and https://github.com/kubeflow/pipelines/blob/2a65eec1fa265ebbda69d5b8b1875e3e4b54ac82/sdk/python/kfp/_auth.py#L191-L192, we should first check request response status code by `Response.raise_for_exception`, it could be `401 unauthorized`.\n", "before_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport google.auth\nimport google.auth.app_engine\nimport google.auth.compute_engine.credentials\nimport google.auth.iam\nfrom google.auth.transport.requests import Request\nimport google.oauth2.credentials\nimport google.oauth2.service_account\nimport requests_toolbelt.adapters.appengine\nfrom webbrowser import open_new_tab\nimport requests\nimport json\n\nIAM_SCOPE = 'https://www.googleapis.com/auth/iam'\nOAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'\nLOCAL_KFP_CREDENTIAL = os.path.expanduser('~/.config/kfp/credentials.json')\n\ndef get_gcp_access_token():\n \"\"\"Get and return GCP access token for the current Application Default\n Credentials. If not set, returns None. For more information, see\n https://cloud.google.com/sdk/gcloud/reference/auth/application-default/print-access-token\n \"\"\"\n token = None\n try:\n creds, project = google.auth.default(scopes=[\"https://www.googleapis.com/auth/cloud-platform\"])\n if not creds.valid:\n auth_req = Request()\n creds.refresh(auth_req)\n if creds.valid:\n token = creds.token\n except Exception as e:\n logging.warning('Failed to get GCP access token: %s', e)\n return token\n\ndef get_auth_token(client_id, other_client_id, other_client_secret):\n \"\"\"Gets auth token from default service account or user account.\"\"\"\n if os.path.exists(LOCAL_KFP_CREDENTIAL):\n # fetch IAP auth token using the locally stored credentials.\n with open(LOCAL_KFP_CREDENTIAL, 'r') as f:\n credentials = json.load(f)\n if client_id in credentials:\n return id_token_from_refresh_token(credentials[client_id]['other_client_id'],\n credentials[client_id]['other_client_secret'],\n credentials[client_id]['refresh_token'],\n client_id)\n if other_client_id is None or other_client_secret is None:\n # fetch IAP auth token: service accounts\n token = get_auth_token_from_sa(client_id)\n else:\n # fetch IAP auth token: user account\n # Obtain the ID token for provided Client ID with user accounts.\n # Flow: get authorization code -> exchange for refresh token -> obtain and return ID token\n refresh_token = get_refresh_token_from_client_id(other_client_id, other_client_secret)\n credentials = {}\n if os.path.exists(LOCAL_KFP_CREDENTIAL):\n with open(LOCAL_KFP_CREDENTIAL, 'r') as f:\n credentials = json.load(f)\n credentials[client_id] = {}\n credentials[client_id]['other_client_id'] = other_client_id\n credentials[client_id]['other_client_secret'] = other_client_secret\n credentials[client_id]['refresh_token'] = refresh_token\n #TODO: handle the case when the refresh_token expires.\n # which only happens if the refresh_token is not used once for six months.\n if not os.path.exists(os.path.dirname(LOCAL_KFP_CREDENTIAL)):\n os.makedirs(os.path.dirname(LOCAL_KFP_CREDENTIAL))\n with open(LOCAL_KFP_CREDENTIAL, 'w') as f:\n json.dump(credentials, f)\n token = id_token_from_refresh_token(other_client_id, other_client_secret, refresh_token, client_id)\n return token\n\ndef get_auth_token_from_sa(client_id):\n \"\"\"Gets auth token from default service account.\n\n If no service account credential is found, returns None.\n \"\"\"\n service_account_credentials = get_service_account_credentials(client_id)\n if service_account_credentials:\n return get_google_open_id_connect_token(service_account_credentials)\n return None\n\ndef get_service_account_credentials(client_id):\n # Figure out what environment we're running in and get some preliminary\n # information about the service account.\n bootstrap_credentials, _ = google.auth.default(\n scopes=[IAM_SCOPE])\n if isinstance(bootstrap_credentials,\n google.oauth2.credentials.Credentials):\n logging.info('Found OAuth2 credentials and skip SA auth.')\n return None\n elif isinstance(bootstrap_credentials,\n google.auth.app_engine.Credentials):\n requests_toolbelt.adapters.appengine.monkeypatch()\n\n # For service account's using the Compute Engine metadata service,\n # service_account_email isn't available until refresh is called.\n bootstrap_credentials.refresh(Request())\n signer_email = bootstrap_credentials.service_account_email\n if isinstance(bootstrap_credentials,\n google.auth.compute_engine.credentials.Credentials):\n # Since the Compute Engine metadata service doesn't expose the service\n # account key, we use the IAM signBlob API to sign instead.\n # In order for this to work:\n #\n # 1. Your VM needs the https://www.googleapis.com/auth/iam scope.\n # You can specify this specific scope when creating a VM\n # through the API or gcloud. When using Cloud Console,\n # you'll need to specify the \"full access to all Cloud APIs\"\n # scope. A VM's scopes can only be specified at creation time.\n #\n # 2. The VM's default service account needs the \"Service Account Actor\"\n # role. This can be found under the \"Project\" category in Cloud\n # Console, or roles/iam.serviceAccountActor in gcloud.\n signer = google.auth.iam.Signer(\n Request(), bootstrap_credentials, signer_email)\n else:\n # A Signer object can sign a JWT using the service account's key.\n signer = bootstrap_credentials.signer\n\n # Construct OAuth 2.0 service account credentials using the signer\n # and email acquired from the bootstrap credentials.\n return google.oauth2.service_account.Credentials(\n signer, signer_email, token_uri=OAUTH_TOKEN_URI, additional_claims={\n 'target_audience': client_id\n })\n\ndef get_google_open_id_connect_token(service_account_credentials):\n \"\"\"Get an OpenID Connect token issued by Google for the service account.\n This function:\n 1. Generates a JWT signed with the service account's private key\n containing a special \"target_audience\" claim.\n 2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1\n has a target_audience claim, that endpoint will respond with\n an OpenID Connect token for the service account -- in other words,\n a JWT signed by *Google*. The aud claim in this JWT will be\n set to the value from the target_audience claim in #1.\n For more information, see\n https://developers.google.com/identity/protocols/OAuth2ServiceAccount .\n The HTTP/REST example on that page describes the JWT structure and\n demonstrates how to call the token endpoint. (The example on that page\n shows how to get an OAuth2 access token; this code is using a\n modified version of it to get an OpenID Connect token.)\n \"\"\"\n\n service_account_jwt = (\n service_account_credentials._make_authorization_grant_assertion())\n request = google.auth.transport.requests.Request()\n body = {\n 'assertion': service_account_jwt,\n 'grant_type': google.oauth2._client._JWT_GRANT_TYPE,\n }\n token_response = google.oauth2._client._token_endpoint_request(\n request, OAUTH_TOKEN_URI, body)\n return token_response['id_token']\n\ndef get_refresh_token_from_client_id(client_id, client_secret):\n \"\"\"Obtain the ID token for provided Client ID with user accounts.\n Flow: get authorization code -> exchange for refresh token -> obtain and return ID token\n \"\"\"\n auth_code = get_auth_code(client_id)\n return get_refresh_token_from_code(auth_code, client_id, client_secret)\n\ndef get_auth_code(client_id):\n auth_url = \"https://accounts.google.com/o/oauth2/v2/auth?client_id=%s&response_type=code&scope=openid%%20email&access_type=offline&redirect_uri=urn:ietf:wg:oauth:2.0:oob\"%client_id\n print(auth_url)\n open_new_tab(auth_url)\n return input(\"If there's no browser window prompt, please direct to the URL above, then copy and paste the authorization code here: \")\n\ndef get_refresh_token_from_code(auth_code, client_id, client_secret):\n payload = {\"code\": auth_code, \"client_id\": client_id, \"client_secret\": client_secret,\n \"redirect_uri\": \"urn:ietf:wg:oauth:2.0:oob\", \"grant_type\": \"authorization_code\"}\n res = requests.post(OAUTH_TOKEN_URI, data=payload)\n return (str(json.loads(res.text)[u\"refresh_token\"]))\n\ndef id_token_from_refresh_token(client_id, client_secret, refresh_token, audience):\n payload = {\"client_id\": client_id, \"client_secret\": client_secret,\n \"refresh_token\": refresh_token, \"grant_type\": \"refresh_token\",\n \"audience\": audience}\n res = requests.post(OAUTH_TOKEN_URI, data=payload)\n return (str(json.loads(res.text)[u\"id_token\"]))\n", "path": "sdk/python/kfp/_auth.py"}], "after_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport google.auth\nimport google.auth.app_engine\nimport google.auth.compute_engine.credentials\nimport google.auth.iam\nfrom google.auth.transport.requests import Request\nimport google.oauth2.credentials\nimport google.oauth2.service_account\nimport requests_toolbelt.adapters.appengine\nfrom webbrowser import open_new_tab\nimport requests\nimport json\n\nIAM_SCOPE = 'https://www.googleapis.com/auth/iam'\nOAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'\nLOCAL_KFP_CREDENTIAL = os.path.expanduser('~/.config/kfp/credentials.json')\n\ndef get_gcp_access_token():\n \"\"\"Get and return GCP access token for the current Application Default\n Credentials. If not set, returns None. For more information, see\n https://cloud.google.com/sdk/gcloud/reference/auth/application-default/print-access-token\n \"\"\"\n token = None\n try:\n creds, project = google.auth.default(scopes=[\"https://www.googleapis.com/auth/cloud-platform\"])\n if not creds.valid:\n auth_req = Request()\n creds.refresh(auth_req)\n if creds.valid:\n token = creds.token\n except Exception as e:\n logging.warning('Failed to get GCP access token: %s', e)\n return token\n\ndef get_auth_token(client_id, other_client_id, other_client_secret):\n \"\"\"Gets auth token from default service account or user account.\"\"\"\n if os.path.exists(LOCAL_KFP_CREDENTIAL):\n # fetch IAP auth token using the locally stored credentials.\n with open(LOCAL_KFP_CREDENTIAL, 'r') as f:\n credentials = json.load(f)\n if client_id in credentials:\n return id_token_from_refresh_token(credentials[client_id]['other_client_id'],\n credentials[client_id]['other_client_secret'],\n credentials[client_id]['refresh_token'],\n client_id)\n if other_client_id is None or other_client_secret is None:\n # fetch IAP auth token: service accounts\n token = get_auth_token_from_sa(client_id)\n else:\n # fetch IAP auth token: user account\n # Obtain the ID token for provided Client ID with user accounts.\n # Flow: get authorization code -> exchange for refresh token -> obtain and return ID token\n refresh_token = get_refresh_token_from_client_id(other_client_id, other_client_secret)\n credentials = {}\n if os.path.exists(LOCAL_KFP_CREDENTIAL):\n with open(LOCAL_KFP_CREDENTIAL, 'r') as f:\n credentials = json.load(f)\n credentials[client_id] = {}\n credentials[client_id]['other_client_id'] = other_client_id\n credentials[client_id]['other_client_secret'] = other_client_secret\n credentials[client_id]['refresh_token'] = refresh_token\n #TODO: handle the case when the refresh_token expires.\n # which only happens if the refresh_token is not used once for six months.\n if not os.path.exists(os.path.dirname(LOCAL_KFP_CREDENTIAL)):\n os.makedirs(os.path.dirname(LOCAL_KFP_CREDENTIAL))\n with open(LOCAL_KFP_CREDENTIAL, 'w') as f:\n json.dump(credentials, f)\n token = id_token_from_refresh_token(other_client_id, other_client_secret, refresh_token, client_id)\n return token\n\ndef get_auth_token_from_sa(client_id):\n \"\"\"Gets auth token from default service account.\n\n If no service account credential is found, returns None.\n \"\"\"\n service_account_credentials = get_service_account_credentials(client_id)\n if service_account_credentials:\n return get_google_open_id_connect_token(service_account_credentials)\n return None\n\ndef get_service_account_credentials(client_id):\n # Figure out what environment we're running in and get some preliminary\n # information about the service account.\n bootstrap_credentials, _ = google.auth.default(\n scopes=[IAM_SCOPE])\n if isinstance(bootstrap_credentials,\n google.oauth2.credentials.Credentials):\n logging.info('Found OAuth2 credentials and skip SA auth.')\n return None\n elif isinstance(bootstrap_credentials,\n google.auth.app_engine.Credentials):\n requests_toolbelt.adapters.appengine.monkeypatch()\n\n # For service account's using the Compute Engine metadata service,\n # service_account_email isn't available until refresh is called.\n bootstrap_credentials.refresh(Request())\n signer_email = bootstrap_credentials.service_account_email\n if isinstance(bootstrap_credentials,\n google.auth.compute_engine.credentials.Credentials):\n # Since the Compute Engine metadata service doesn't expose the service\n # account key, we use the IAM signBlob API to sign instead.\n # In order for this to work:\n #\n # 1. Your VM needs the https://www.googleapis.com/auth/iam scope.\n # You can specify this specific scope when creating a VM\n # through the API or gcloud. When using Cloud Console,\n # you'll need to specify the \"full access to all Cloud APIs\"\n # scope. A VM's scopes can only be specified at creation time.\n #\n # 2. The VM's default service account needs the \"Service Account Actor\"\n # role. This can be found under the \"Project\" category in Cloud\n # Console, or roles/iam.serviceAccountActor in gcloud.\n signer = google.auth.iam.Signer(\n Request(), bootstrap_credentials, signer_email)\n else:\n # A Signer object can sign a JWT using the service account's key.\n signer = bootstrap_credentials.signer\n\n # Construct OAuth 2.0 service account credentials using the signer\n # and email acquired from the bootstrap credentials.\n return google.oauth2.service_account.Credentials(\n signer, signer_email, token_uri=OAUTH_TOKEN_URI, additional_claims={\n 'target_audience': client_id\n })\n\ndef get_google_open_id_connect_token(service_account_credentials):\n \"\"\"Get an OpenID Connect token issued by Google for the service account.\n This function:\n 1. Generates a JWT signed with the service account's private key\n containing a special \"target_audience\" claim.\n 2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1\n has a target_audience claim, that endpoint will respond with\n an OpenID Connect token for the service account -- in other words,\n a JWT signed by *Google*. The aud claim in this JWT will be\n set to the value from the target_audience claim in #1.\n For more information, see\n https://developers.google.com/identity/protocols/OAuth2ServiceAccount .\n The HTTP/REST example on that page describes the JWT structure and\n demonstrates how to call the token endpoint. (The example on that page\n shows how to get an OAuth2 access token; this code is using a\n modified version of it to get an OpenID Connect token.)\n \"\"\"\n\n service_account_jwt = (\n service_account_credentials._make_authorization_grant_assertion())\n request = google.auth.transport.requests.Request()\n body = {\n 'assertion': service_account_jwt,\n 'grant_type': google.oauth2._client._JWT_GRANT_TYPE,\n }\n token_response = google.oauth2._client._token_endpoint_request(\n request, OAUTH_TOKEN_URI, body)\n return token_response['id_token']\n\ndef get_refresh_token_from_client_id(client_id, client_secret):\n \"\"\"Obtain the ID token for provided Client ID with user accounts.\n Flow: get authorization code -> exchange for refresh token -> obtain and return ID token\n \"\"\"\n auth_code = get_auth_code(client_id)\n return get_refresh_token_from_code(auth_code, client_id, client_secret)\n\ndef get_auth_code(client_id):\n auth_url = \"https://accounts.google.com/o/oauth2/v2/auth?client_id=%s&response_type=code&scope=openid%%20email&access_type=offline&redirect_uri=urn:ietf:wg:oauth:2.0:oob\"%client_id\n print(auth_url)\n open_new_tab(auth_url)\n return input(\"If there's no browser window prompt, please direct to the URL above, then copy and paste the authorization code here: \")\n\n\ndef get_refresh_token_from_code(auth_code, client_id, client_secret):\n payload = {\"code\": auth_code, \"client_id\": client_id, \"client_secret\": client_secret,\n \"redirect_uri\": \"urn:ietf:wg:oauth:2.0:oob\", \"grant_type\": \"authorization_code\"}\n res = requests.post(OAUTH_TOKEN_URI, data=payload)\n res.raise_for_status()\n return str(json.loads(res.text)[u\"refresh_token\"])\n\n\ndef id_token_from_refresh_token(client_id, client_secret, refresh_token, audience):\n payload = {\"client_id\": client_id, \"client_secret\": client_secret,\n \"refresh_token\": refresh_token, \"grant_type\": \"refresh_token\",\n \"audience\": audience}\n res = requests.post(OAUTH_TOKEN_URI, data=payload)\n res.raise_for_status()\n return str(json.loads(res.text)[u\"id_token\"])\n", "path": "sdk/python/kfp/_auth.py"}]} | 3,097 | 317 |
gh_patches_debug_35503 | rasdani/github-patches | git_diff | falconry__falcon-1925 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make JSONHandler customization docs clearer
As pointed out by @Stargateur in https://github.com/falconry/falcon/issues/1906#issuecomment-817374057, our [`JSONHandler`](https://falcon.readthedocs.io/en/stable/api/media.html#falcon.media.JSONHandler) customization docs could be made clearer by separately illustrating different (albeit closely related) concepts:
* Use a custom JSON library (such as the exemplified `rapidjson`). Customize parameters.
* Use the stdlib's `json` module, just provide custom serialization or deserialization parameters. Also link to the ["Prettifying JSON Responses" recipe](https://falcon.readthedocs.io/en/stable/user/recipes/pretty-json.html), which illustrates customization of `dumps` parameters.
* Add a sentence or two about replacing the default JSON handlers, not just toss in a code snippet as it is at the time of writing this. Also link to [Replacing the Default Handlers](https://falcon.readthedocs.io/en/stable/api/media.html#custom-media-handlers) from that explanation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/media/json.py`
Content:
```
1 from functools import partial
2 import json
3
4 from falcon import errors
5 from falcon import http_error
6 from falcon.media.base import BaseHandler
7 from falcon.media.base import TextBaseHandlerWS
8
9
10 class JSONHandler(BaseHandler):
11 """JSON media handler.
12
13 This handler uses Python's standard :py:mod:`json` library by default, but
14 can be easily configured to use any of a number of third-party JSON
15 libraries, depending on your needs. For example, you can often
16 realize a significant performance boost under CPython by using an
17 alternative library. Good options in this respect include `orjson`,
18 `python-rapidjson`, and `mujson`.
19
20 This handler will raise a :class:`falcon.MediaNotFoundError` when attempting
21 to parse an empty body, or a :class:`falcon.MediaMalformedError`
22 if an error happens while parsing the body.
23
24 Note:
25 If you are deploying to PyPy, we recommend sticking with the standard
26 library's JSON implementation, since it will be faster in most cases
27 as compared to a third-party library.
28
29 Overriding the default JSON implementation is simply a matter of specifying
30 the desired ``dumps`` and ``loads`` functions::
31
32 import falcon
33 from falcon import media
34
35 import rapidjson
36
37 json_handler = media.JSONHandler(
38 dumps=rapidjson.dumps,
39 loads=rapidjson.loads,
40 )
41 extra_handlers = {
42 'application/json': json_handler,
43 }
44
45 app = falcon.App()
46 app.req_options.media_handlers.update(extra_handlers)
47 app.resp_options.media_handlers.update(extra_handlers)
48
49 By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.
50 If you override the ``dumps`` function, you will need to explicitly set
51 ``ensure_ascii`` to ``False`` in order to enable the serialization of
52 Unicode characters to UTF-8. This is easily done by using
53 :any:`functools.partial` to apply the desired keyword argument. In fact, you
54 can use this same technique to customize any option supported by the
55 ``dumps`` and ``loads`` functions::
56
57 from functools import partial
58
59 from falcon import media
60 import rapidjson
61
62 json_handler = media.JSONHandler(
63 dumps=partial(
64 rapidjson.dumps,
65 ensure_ascii=False, sort_keys=True
66 ),
67 )
68
69 Keyword Arguments:
70 dumps (func): Function to use when serializing JSON responses.
71 loads (func): Function to use when deserializing JSON requests.
72 """
73
74 def __init__(self, dumps=None, loads=None):
75 self._dumps = dumps or partial(json.dumps, ensure_ascii=False)
76 self._loads = loads or json.loads
77
78 # PERF(kgriffs): Test dumps once up front so we can set the
79 # proper serialize implementation.
80 result = self._dumps({'message': 'Hello World'})
81 if isinstance(result, str):
82 self.serialize = self._serialize_s
83 self.serialize_async = self._serialize_async_s
84 else:
85 self.serialize = self._serialize_b
86 self.serialize_async = self._serialize_async_b
87
88 # NOTE(kgriffs): To be safe, only enable the optimized protocol when
89 # not subclassed.
90 if type(self) is JSONHandler:
91 self._serialize_sync = self.serialize
92 self._deserialize_sync = self._deserialize
93
94 def _deserialize(self, data):
95 if not data:
96 raise errors.MediaNotFoundError('JSON')
97 try:
98 return self._loads(data.decode())
99 except ValueError as err:
100 raise errors.MediaMalformedError('JSON') from err
101
102 def deserialize(self, stream, content_type, content_length):
103 return self._deserialize(stream.read())
104
105 async def deserialize_async(self, stream, content_type, content_length):
106 return self._deserialize(await stream.read())
107
108 # NOTE(kgriffs): Make content_type a kwarg to support the
109 # Request.render_body() shortcut optimization.
110 def _serialize_s(self, media, content_type=None) -> bytes:
111 return self._dumps(media).encode()
112
113 async def _serialize_async_s(self, media, content_type) -> bytes:
114 return self._dumps(media).encode()
115
116 def _serialize_b(self, media, content_type) -> bytes:
117 return self._dumps(media)
118
119 async def _serialize_async_b(self, media, content_type) -> bytes:
120 return self._dumps(media)
121
122
123 class JSONHandlerWS(TextBaseHandlerWS):
124 """WebSocket media handler for de(serializing) JSON to/from TEXT payloads.
125
126 This handler uses Python's standard :py:mod:`json` library by default, but
127 can be easily configured to use any of a number of third-party JSON
128 libraries, depending on your needs. For example, you can often
129 realize a significant performance boost under CPython by using an
130 alternative library. Good options in this respect include `orjson`,
131 `python-rapidjson`, and `mujson`.
132
133 Note:
134 If you are deploying to PyPy, we recommend sticking with the standard
135 library's JSON implementation, since it will be faster in most cases
136 as compared to a third-party library.
137
138 Overriding the default JSON implementation is simply a matter of specifying
139 the desired ``dumps`` and ``loads`` functions::
140
141 import falcon
142 from falcon import media
143
144 import rapidjson
145
146 json_handler = media.JSONHandlerWS(
147 dumps=rapidjson.dumps,
148 loads=rapidjson.loads,
149 )
150
151 app = falcon.asgi.App()
152 app.ws_options.media_handlers[falcon.WebSocketPayloadType.TEXT] = json_handler
153
154 By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.
155 If you override the ``dumps`` function, you will need to explicitly set
156 ``ensure_ascii`` to ``False`` in order to enable the serialization of
157 Unicode characters to UTF-8. This is easily done by using
158 :any:`functools.partial` to apply the desired keyword argument. In fact, you
159 can use this same technique to customize any option supported by the
160 ``dumps`` and ``loads`` functions::
161
162 from functools import partial
163
164 from falcon import media
165 import rapidjson
166
167 json_handler = media.JSONHandlerWS(
168 dumps=partial(
169 rapidjson.dumps,
170 ensure_ascii=False, sort_keys=True
171 ),
172 )
173
174 Keyword Arguments:
175 dumps (func): Function to use when serializing JSON.
176 loads (func): Function to use when deserializing JSON.
177 """
178
179 __slots__ = ['dumps', 'loads']
180
181 def __init__(self, dumps=None, loads=None):
182 self._dumps = dumps or partial(json.dumps, ensure_ascii=False)
183 self._loads = loads or json.loads
184
185 def serialize(self, media: object) -> str:
186 return self._dumps(media)
187
188 def deserialize(self, payload: str) -> object:
189 return self._loads(payload)
190
191
192 http_error._DEFAULT_JSON_HANDLER = _DEFAULT_JSON_HANDLER = JSONHandler() # type: ignore
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/falcon/media/json.py b/falcon/media/json.py
--- a/falcon/media/json.py
+++ b/falcon/media/json.py
@@ -26,8 +26,10 @@
library's JSON implementation, since it will be faster in most cases
as compared to a third-party library.
- Overriding the default JSON implementation is simply a matter of specifying
- the desired ``dumps`` and ``loads`` functions::
+ You can replace the default JSON handler by using a custom JSON library
+ (see also: :ref:`custom_media_handlers`). Overriding the default JSON
+ implementation is simply a matter of specifying the desired ``dumps`` and
+ ``loads`` functions::
import falcon
from falcon import media
@@ -46,13 +48,39 @@
app.req_options.media_handlers.update(extra_handlers)
app.resp_options.media_handlers.update(extra_handlers)
+ Even if you decide to stick with the stdlib's :any:`json.dump` and
+ :any:`json.loads`, you can wrap them using :any:`functools.partial` to
+ provide custom serialization or deserialization parameters supported by the
+ ``dumps`` and ``loads`` functions, respectively
+ (see also: :ref:`prettifying-json-responses`)::
+
+ import falcon
+ from falcon import media
+
+ from functools import partial
+
+ json_handler = media.JSONHandler(
+ dumps=partial(
+ json.dumps,
+ default=str,
+ sort_keys=True,
+ ),
+ )
+ extra_handlers = {
+ 'application/json': json_handler,
+ }
+
+ app = falcon.App()
+ app.req_options.media_handlers.update(extra_handlers)
+ app.resp_options.media_handlers.update(extra_handlers)
+
By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.
If you override the ``dumps`` function, you will need to explicitly set
``ensure_ascii`` to ``False`` in order to enable the serialization of
Unicode characters to UTF-8. This is easily done by using
- :any:`functools.partial` to apply the desired keyword argument. In fact, you
- can use this same technique to customize any option supported by the
- ``dumps`` and ``loads`` functions::
+ :any:`functools.partial` to apply the desired keyword argument. As also
+ demonstrated in the previous paragraph, you can use this same technique to
+ customize any option supported by the ``dumps`` and ``loads`` functions::
from functools import partial
| {"golden_diff": "diff --git a/falcon/media/json.py b/falcon/media/json.py\n--- a/falcon/media/json.py\n+++ b/falcon/media/json.py\n@@ -26,8 +26,10 @@\n library's JSON implementation, since it will be faster in most cases\n as compared to a third-party library.\n \n- Overriding the default JSON implementation is simply a matter of specifying\n- the desired ``dumps`` and ``loads`` functions::\n+ You can replace the default JSON handler by using a custom JSON library\n+ (see also: :ref:`custom_media_handlers`). Overriding the default JSON\n+ implementation is simply a matter of specifying the desired ``dumps`` and\n+ ``loads`` functions::\n \n import falcon\n from falcon import media\n@@ -46,13 +48,39 @@\n app.req_options.media_handlers.update(extra_handlers)\n app.resp_options.media_handlers.update(extra_handlers)\n \n+ Even if you decide to stick with the stdlib's :any:`json.dump` and\n+ :any:`json.loads`, you can wrap them using :any:`functools.partial` to\n+ provide custom serialization or deserialization parameters supported by the\n+ ``dumps`` and ``loads`` functions, respectively\n+ (see also: :ref:`prettifying-json-responses`)::\n+\n+ import falcon\n+ from falcon import media\n+\n+ from functools import partial\n+\n+ json_handler = media.JSONHandler(\n+ dumps=partial(\n+ json.dumps,\n+ default=str,\n+ sort_keys=True,\n+ ),\n+ )\n+ extra_handlers = {\n+ 'application/json': json_handler,\n+ }\n+\n+ app = falcon.App()\n+ app.req_options.media_handlers.update(extra_handlers)\n+ app.resp_options.media_handlers.update(extra_handlers)\n+\n By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.\n If you override the ``dumps`` function, you will need to explicitly set\n ``ensure_ascii`` to ``False`` in order to enable the serialization of\n Unicode characters to UTF-8. This is easily done by using\n- :any:`functools.partial` to apply the desired keyword argument. In fact, you\n- can use this same technique to customize any option supported by the\n- ``dumps`` and ``loads`` functions::\n+ :any:`functools.partial` to apply the desired keyword argument. As also\n+ demonstrated in the previous paragraph, you can use this same technique to\n+ customize any option supported by the ``dumps`` and ``loads`` functions::\n \n from functools import partial\n", "issue": "Make JSONHandler customization docs clearer\nAs pointed out by @Stargateur in https://github.com/falconry/falcon/issues/1906#issuecomment-817374057, our [`JSONHandler`](https://falcon.readthedocs.io/en/stable/api/media.html#falcon.media.JSONHandler) customization docs could be made clearer by separately illustrating different (albeit closely related) concepts:\r\n* Use a custom JSON library (such as the exemplified `rapidjson`). Customize parameters.\r\n* Use the stdlib's `json` module, just provide custom serialization or deserialization parameters. Also link to the [\"Prettifying JSON Responses\" recipe](https://falcon.readthedocs.io/en/stable/user/recipes/pretty-json.html), which illustrates customization of `dumps` parameters.\r\n* Add a sentence or two about replacing the default JSON handlers, not just toss in a code snippet as it is at the time of writing this. Also link to [Replacing the Default Handlers](https://falcon.readthedocs.io/en/stable/api/media.html#custom-media-handlers) from that explanation.\n", "before_files": [{"content": "from functools import partial\nimport json\n\nfrom falcon import errors\nfrom falcon import http_error\nfrom falcon.media.base import BaseHandler\nfrom falcon.media.base import TextBaseHandlerWS\n\n\nclass JSONHandler(BaseHandler):\n \"\"\"JSON media handler.\n\n This handler uses Python's standard :py:mod:`json` library by default, but\n can be easily configured to use any of a number of third-party JSON\n libraries, depending on your needs. For example, you can often\n realize a significant performance boost under CPython by using an\n alternative library. Good options in this respect include `orjson`,\n `python-rapidjson`, and `mujson`.\n\n This handler will raise a :class:`falcon.MediaNotFoundError` when attempting\n to parse an empty body, or a :class:`falcon.MediaMalformedError`\n if an error happens while parsing the body.\n\n Note:\n If you are deploying to PyPy, we recommend sticking with the standard\n library's JSON implementation, since it will be faster in most cases\n as compared to a third-party library.\n\n Overriding the default JSON implementation is simply a matter of specifying\n the desired ``dumps`` and ``loads`` functions::\n\n import falcon\n from falcon import media\n\n import rapidjson\n\n json_handler = media.JSONHandler(\n dumps=rapidjson.dumps,\n loads=rapidjson.loads,\n )\n extra_handlers = {\n 'application/json': json_handler,\n }\n\n app = falcon.App()\n app.req_options.media_handlers.update(extra_handlers)\n app.resp_options.media_handlers.update(extra_handlers)\n\n By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.\n If you override the ``dumps`` function, you will need to explicitly set\n ``ensure_ascii`` to ``False`` in order to enable the serialization of\n Unicode characters to UTF-8. This is easily done by using\n :any:`functools.partial` to apply the desired keyword argument. In fact, you\n can use this same technique to customize any option supported by the\n ``dumps`` and ``loads`` functions::\n\n from functools import partial\n\n from falcon import media\n import rapidjson\n\n json_handler = media.JSONHandler(\n dumps=partial(\n rapidjson.dumps,\n ensure_ascii=False, sort_keys=True\n ),\n )\n\n Keyword Arguments:\n dumps (func): Function to use when serializing JSON responses.\n loads (func): Function to use when deserializing JSON requests.\n \"\"\"\n\n def __init__(self, dumps=None, loads=None):\n self._dumps = dumps or partial(json.dumps, ensure_ascii=False)\n self._loads = loads or json.loads\n\n # PERF(kgriffs): Test dumps once up front so we can set the\n # proper serialize implementation.\n result = self._dumps({'message': 'Hello World'})\n if isinstance(result, str):\n self.serialize = self._serialize_s\n self.serialize_async = self._serialize_async_s\n else:\n self.serialize = self._serialize_b\n self.serialize_async = self._serialize_async_b\n\n # NOTE(kgriffs): To be safe, only enable the optimized protocol when\n # not subclassed.\n if type(self) is JSONHandler:\n self._serialize_sync = self.serialize\n self._deserialize_sync = self._deserialize\n\n def _deserialize(self, data):\n if not data:\n raise errors.MediaNotFoundError('JSON')\n try:\n return self._loads(data.decode())\n except ValueError as err:\n raise errors.MediaMalformedError('JSON') from err\n\n def deserialize(self, stream, content_type, content_length):\n return self._deserialize(stream.read())\n\n async def deserialize_async(self, stream, content_type, content_length):\n return self._deserialize(await stream.read())\n\n # NOTE(kgriffs): Make content_type a kwarg to support the\n # Request.render_body() shortcut optimization.\n def _serialize_s(self, media, content_type=None) -> bytes:\n return self._dumps(media).encode()\n\n async def _serialize_async_s(self, media, content_type) -> bytes:\n return self._dumps(media).encode()\n\n def _serialize_b(self, media, content_type) -> bytes:\n return self._dumps(media)\n\n async def _serialize_async_b(self, media, content_type) -> bytes:\n return self._dumps(media)\n\n\nclass JSONHandlerWS(TextBaseHandlerWS):\n \"\"\"WebSocket media handler for de(serializing) JSON to/from TEXT payloads.\n\n This handler uses Python's standard :py:mod:`json` library by default, but\n can be easily configured to use any of a number of third-party JSON\n libraries, depending on your needs. For example, you can often\n realize a significant performance boost under CPython by using an\n alternative library. Good options in this respect include `orjson`,\n `python-rapidjson`, and `mujson`.\n\n Note:\n If you are deploying to PyPy, we recommend sticking with the standard\n library's JSON implementation, since it will be faster in most cases\n as compared to a third-party library.\n\n Overriding the default JSON implementation is simply a matter of specifying\n the desired ``dumps`` and ``loads`` functions::\n\n import falcon\n from falcon import media\n\n import rapidjson\n\n json_handler = media.JSONHandlerWS(\n dumps=rapidjson.dumps,\n loads=rapidjson.loads,\n )\n\n app = falcon.asgi.App()\n app.ws_options.media_handlers[falcon.WebSocketPayloadType.TEXT] = json_handler\n\n By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.\n If you override the ``dumps`` function, you will need to explicitly set\n ``ensure_ascii`` to ``False`` in order to enable the serialization of\n Unicode characters to UTF-8. This is easily done by using\n :any:`functools.partial` to apply the desired keyword argument. In fact, you\n can use this same technique to customize any option supported by the\n ``dumps`` and ``loads`` functions::\n\n from functools import partial\n\n from falcon import media\n import rapidjson\n\n json_handler = media.JSONHandlerWS(\n dumps=partial(\n rapidjson.dumps,\n ensure_ascii=False, sort_keys=True\n ),\n )\n\n Keyword Arguments:\n dumps (func): Function to use when serializing JSON.\n loads (func): Function to use when deserializing JSON.\n \"\"\"\n\n __slots__ = ['dumps', 'loads']\n\n def __init__(self, dumps=None, loads=None):\n self._dumps = dumps or partial(json.dumps, ensure_ascii=False)\n self._loads = loads or json.loads\n\n def serialize(self, media: object) -> str:\n return self._dumps(media)\n\n def deserialize(self, payload: str) -> object:\n return self._loads(payload)\n\n\nhttp_error._DEFAULT_JSON_HANDLER = _DEFAULT_JSON_HANDLER = JSONHandler() # type: ignore\n", "path": "falcon/media/json.py"}], "after_files": [{"content": "from functools import partial\nimport json\n\nfrom falcon import errors\nfrom falcon import http_error\nfrom falcon.media.base import BaseHandler\nfrom falcon.media.base import TextBaseHandlerWS\n\n\nclass JSONHandler(BaseHandler):\n \"\"\"JSON media handler.\n\n This handler uses Python's standard :py:mod:`json` library by default, but\n can be easily configured to use any of a number of third-party JSON\n libraries, depending on your needs. For example, you can often\n realize a significant performance boost under CPython by using an\n alternative library. Good options in this respect include `orjson`,\n `python-rapidjson`, and `mujson`.\n\n This handler will raise a :class:`falcon.MediaNotFoundError` when attempting\n to parse an empty body, or a :class:`falcon.MediaMalformedError`\n if an error happens while parsing the body.\n\n Note:\n If you are deploying to PyPy, we recommend sticking with the standard\n library's JSON implementation, since it will be faster in most cases\n as compared to a third-party library.\n\n You can replace the default JSON handler by using a custom JSON library\n (see also: :ref:`custom_media_handlers`). Overriding the default JSON\n implementation is simply a matter of specifying the desired ``dumps`` and\n ``loads`` functions::\n\n import falcon\n from falcon import media\n\n import rapidjson\n\n json_handler = media.JSONHandler(\n dumps=rapidjson.dumps,\n loads=rapidjson.loads,\n )\n extra_handlers = {\n 'application/json': json_handler,\n }\n\n app = falcon.App()\n app.req_options.media_handlers.update(extra_handlers)\n app.resp_options.media_handlers.update(extra_handlers)\n\n Even if you decide to stick with the stdlib's :any:`json.dump` and\n :any:`json.loads`, you can wrap them using :any:`functools.partial` to\n provide custom serialization or deserialization parameters supported by the\n ``dumps`` and ``loads`` functions, respectively\n (see also: :ref:`prettifying-json-responses`)::\n\n import falcon\n from falcon import media\n\n from functools import partial\n\n json_handler = media.JSONHandler(\n dumps=partial(\n json.dumps,\n default=str,\n sort_keys=True,\n ),\n )\n extra_handlers = {\n 'application/json': json_handler,\n }\n\n app = falcon.App()\n app.req_options.media_handlers.update(extra_handlers)\n app.resp_options.media_handlers.update(extra_handlers)\n\n By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.\n If you override the ``dumps`` function, you will need to explicitly set\n ``ensure_ascii`` to ``False`` in order to enable the serialization of\n Unicode characters to UTF-8. This is easily done by using\n :any:`functools.partial` to apply the desired keyword argument. As also\n demonstrated in the previous paragraph, you can use this same technique to\n customize any option supported by the ``dumps`` and ``loads`` functions::\n\n from functools import partial\n\n from falcon import media\n import rapidjson\n\n json_handler = media.JSONHandler(\n dumps=partial(\n rapidjson.dumps,\n ensure_ascii=False, sort_keys=True\n ),\n )\n\n Keyword Arguments:\n dumps (func): Function to use when serializing JSON responses.\n loads (func): Function to use when deserializing JSON requests.\n \"\"\"\n\n def __init__(self, dumps=None, loads=None):\n self._dumps = dumps or partial(json.dumps, ensure_ascii=False)\n self._loads = loads or json.loads\n\n # PERF(kgriffs): Test dumps once up front so we can set the\n # proper serialize implementation.\n result = self._dumps({'message': 'Hello World'})\n if isinstance(result, str):\n self.serialize = self._serialize_s\n self.serialize_async = self._serialize_async_s\n else:\n self.serialize = self._serialize_b\n self.serialize_async = self._serialize_async_b\n\n # NOTE(kgriffs): To be safe, only enable the optimized protocol when\n # not subclassed.\n if type(self) is JSONHandler:\n self._serialize_sync = self.serialize\n self._deserialize_sync = self._deserialize\n\n def _deserialize(self, data):\n if not data:\n raise errors.MediaNotFoundError('JSON')\n try:\n return self._loads(data.decode())\n except ValueError as err:\n raise errors.MediaMalformedError('JSON') from err\n\n def deserialize(self, stream, content_type, content_length):\n return self._deserialize(stream.read())\n\n async def deserialize_async(self, stream, content_type, content_length):\n return self._deserialize(await stream.read())\n\n # NOTE(kgriffs): Make content_type a kwarg to support the\n # Request.render_body() shortcut optimization.\n def _serialize_s(self, media, content_type=None) -> bytes:\n return self._dumps(media).encode()\n\n async def _serialize_async_s(self, media, content_type) -> bytes:\n return self._dumps(media).encode()\n\n def _serialize_b(self, media, content_type) -> bytes:\n return self._dumps(media)\n\n async def _serialize_async_b(self, media, content_type) -> bytes:\n return self._dumps(media)\n\n\nclass JSONHandlerWS(TextBaseHandlerWS):\n \"\"\"WebSocket media handler for de(serializing) JSON to/from TEXT payloads.\n\n This handler uses Python's standard :py:mod:`json` library by default, but\n can be easily configured to use any of a number of third-party JSON\n libraries, depending on your needs. For example, you can often\n realize a significant performance boost under CPython by using an\n alternative library. Good options in this respect include `orjson`,\n `python-rapidjson`, and `mujson`.\n\n Note:\n If you are deploying to PyPy, we recommend sticking with the standard\n library's JSON implementation, since it will be faster in most cases\n as compared to a third-party library.\n\n Overriding the default JSON implementation is simply a matter of specifying\n the desired ``dumps`` and ``loads`` functions::\n\n import falcon\n from falcon import media\n\n import rapidjson\n\n json_handler = media.JSONHandlerWS(\n dumps=rapidjson.dumps,\n loads=rapidjson.loads,\n )\n\n app = falcon.asgi.App()\n app.ws_options.media_handlers[falcon.WebSocketPayloadType.TEXT] = json_handler\n\n By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.\n If you override the ``dumps`` function, you will need to explicitly set\n ``ensure_ascii`` to ``False`` in order to enable the serialization of\n Unicode characters to UTF-8. This is easily done by using\n :any:`functools.partial` to apply the desired keyword argument. In fact, you\n can use this same technique to customize any option supported by the\n ``dumps`` and ``loads`` functions::\n\n from functools import partial\n\n from falcon import media\n import rapidjson\n\n json_handler = media.JSONHandlerWS(\n dumps=partial(\n rapidjson.dumps,\n ensure_ascii=False, sort_keys=True\n ),\n )\n\n Keyword Arguments:\n dumps (func): Function to use when serializing JSON.\n loads (func): Function to use when deserializing JSON.\n \"\"\"\n\n __slots__ = ['dumps', 'loads']\n\n def __init__(self, dumps=None, loads=None):\n self._dumps = dumps or partial(json.dumps, ensure_ascii=False)\n self._loads = loads or json.loads\n\n def serialize(self, media: object) -> str:\n return self._dumps(media)\n\n def deserialize(self, payload: str) -> object:\n return self._loads(payload)\n\n\nhttp_error._DEFAULT_JSON_HANDLER = _DEFAULT_JSON_HANDLER = JSONHandler() # type: ignore\n", "path": "falcon/media/json.py"}]} | 2,516 | 583 |
gh_patches_debug_17857 | rasdani/github-patches | git_diff | python-discord__bot-723 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow for throw away words after the rules command call
fixes #723
This simply catches all strings after a sequence of ints. This allows us to write a message after the list of rules we wish to display.
Example:
`!rules 5 6 We do not allow for paid work, and that will break ToS of x and y`
Disclaimer, didn't get site to respond properly so haven't tested this with bot+site.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/alias.py`
Content:
```
1 import inspect
2 import logging
3 from typing import Union
4
5 from discord import Colour, Embed, Member, User
6 from discord.ext.commands import Cog, Command, Context, clean_content, command, group
7
8 from bot.bot import Bot
9 from bot.cogs.extensions import Extension
10 from bot.cogs.watchchannels.watchchannel import proxy_user
11 from bot.converters import TagNameConverter
12 from bot.pagination import LinePaginator
13
14 log = logging.getLogger(__name__)
15
16
17 class Alias (Cog):
18 """Aliases for commonly used commands."""
19
20 def __init__(self, bot: Bot):
21 self.bot = bot
22
23 async def invoke(self, ctx: Context, cmd_name: str, *args, **kwargs) -> None:
24 """Invokes a command with args and kwargs."""
25 log.debug(f"{cmd_name} was invoked through an alias")
26 cmd = self.bot.get_command(cmd_name)
27 if not cmd:
28 return log.warning(f'Did not find command "{cmd_name}" to invoke.')
29 elif not await cmd.can_run(ctx):
30 return log.warning(
31 f'{str(ctx.author)} tried to run the command "{cmd_name}"'
32 )
33
34 await ctx.invoke(cmd, *args, **kwargs)
35
36 @command(name='aliases')
37 async def aliases_command(self, ctx: Context) -> None:
38 """Show configured aliases on the bot."""
39 embed = Embed(
40 title='Configured aliases',
41 colour=Colour.blue()
42 )
43 await LinePaginator.paginate(
44 (
45 f"• `{ctx.prefix}{value.name}` "
46 f"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`"
47 for name, value in inspect.getmembers(self)
48 if isinstance(value, Command) and name.endswith('_alias')
49 ),
50 ctx, embed, empty=False, max_lines=20
51 )
52
53 @command(name="resources", aliases=("resource",), hidden=True)
54 async def site_resources_alias(self, ctx: Context) -> None:
55 """Alias for invoking <prefix>site resources."""
56 await self.invoke(ctx, "site resources")
57
58 @command(name="tools", hidden=True)
59 async def site_tools_alias(self, ctx: Context) -> None:
60 """Alias for invoking <prefix>site tools."""
61 await self.invoke(ctx, "site tools")
62
63 @command(name="watch", hidden=True)
64 async def bigbrother_watch_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:
65 """Alias for invoking <prefix>bigbrother watch [user] [reason]."""
66 await self.invoke(ctx, "bigbrother watch", user, reason=reason)
67
68 @command(name="unwatch", hidden=True)
69 async def bigbrother_unwatch_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:
70 """Alias for invoking <prefix>bigbrother unwatch [user] [reason]."""
71 await self.invoke(ctx, "bigbrother unwatch", user, reason=reason)
72
73 @command(name="home", hidden=True)
74 async def site_home_alias(self, ctx: Context) -> None:
75 """Alias for invoking <prefix>site home."""
76 await self.invoke(ctx, "site home")
77
78 @command(name="faq", hidden=True)
79 async def site_faq_alias(self, ctx: Context) -> None:
80 """Alias for invoking <prefix>site faq."""
81 await self.invoke(ctx, "site faq")
82
83 @command(name="rules", aliases=("rule",), hidden=True)
84 async def site_rules_alias(self, ctx: Context, *rules: int) -> None:
85 """Alias for invoking <prefix>site rules."""
86 await self.invoke(ctx, "site rules", *rules)
87
88 @command(name="reload", hidden=True)
89 async def extensions_reload_alias(self, ctx: Context, *extensions: Extension) -> None:
90 """Alias for invoking <prefix>extensions reload [extensions...]."""
91 await self.invoke(ctx, "extensions reload", *extensions)
92
93 @command(name="defon", hidden=True)
94 async def defcon_enable_alias(self, ctx: Context) -> None:
95 """Alias for invoking <prefix>defcon enable."""
96 await self.invoke(ctx, "defcon enable")
97
98 @command(name="defoff", hidden=True)
99 async def defcon_disable_alias(self, ctx: Context) -> None:
100 """Alias for invoking <prefix>defcon disable."""
101 await self.invoke(ctx, "defcon disable")
102
103 @command(name="exception", hidden=True)
104 async def tags_get_traceback_alias(self, ctx: Context) -> None:
105 """Alias for invoking <prefix>tags get traceback."""
106 await self.invoke(ctx, "tags get", tag_name="traceback")
107
108 @group(name="get",
109 aliases=("show", "g"),
110 hidden=True,
111 invoke_without_command=True)
112 async def get_group_alias(self, ctx: Context) -> None:
113 """Group for reverse aliases for commands like `tags get`, allowing for `get tags` or `get docs`."""
114 pass
115
116 @get_group_alias.command(name="tags", aliases=("tag", "t"), hidden=True)
117 async def tags_get_alias(
118 self, ctx: Context, *, tag_name: TagNameConverter = None
119 ) -> None:
120 """
121 Alias for invoking <prefix>tags get [tag_name].
122
123 tag_name: str - tag to be viewed.
124 """
125 await self.invoke(ctx, "tags get", tag_name=tag_name)
126
127 @get_group_alias.command(name="docs", aliases=("doc", "d"), hidden=True)
128 async def docs_get_alias(
129 self, ctx: Context, symbol: clean_content = None
130 ) -> None:
131 """Alias for invoking <prefix>docs get [symbol]."""
132 await self.invoke(ctx, "docs get", symbol)
133
134 @command(name="nominate", hidden=True)
135 async def nomination_add_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:
136 """Alias for invoking <prefix>talentpool add [user] [reason]."""
137 await self.invoke(ctx, "talentpool add", user, reason=reason)
138
139 @command(name="unnominate", hidden=True)
140 async def nomination_end_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:
141 """Alias for invoking <prefix>nomination end [user] [reason]."""
142 await self.invoke(ctx, "nomination end", user, reason=reason)
143
144 @command(name="nominees", hidden=True)
145 async def nominees_alias(self, ctx: Context) -> None:
146 """Alias for invoking <prefix>tp watched."""
147 await self.invoke(ctx, "talentpool watched")
148
149
150 def setup(bot: Bot) -> None:
151 """Load the Alias cog."""
152 bot.add_cog(Alias(bot))
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/cogs/alias.py b/bot/cogs/alias.py
--- a/bot/cogs/alias.py
+++ b/bot/cogs/alias.py
@@ -3,7 +3,10 @@
from typing import Union
from discord import Colour, Embed, Member, User
-from discord.ext.commands import Cog, Command, Context, clean_content, command, group
+from discord.ext.commands import (
+ Cog, Command, Context, Greedy,
+ clean_content, command, group,
+)
from bot.bot import Bot
from bot.cogs.extensions import Extension
@@ -81,7 +84,7 @@
await self.invoke(ctx, "site faq")
@command(name="rules", aliases=("rule",), hidden=True)
- async def site_rules_alias(self, ctx: Context, *rules: int) -> None:
+ async def site_rules_alias(self, ctx: Context, rules: Greedy[int], *_: str) -> None:
"""Alias for invoking <prefix>site rules."""
await self.invoke(ctx, "site rules", *rules)
| {"golden_diff": "diff --git a/bot/cogs/alias.py b/bot/cogs/alias.py\n--- a/bot/cogs/alias.py\n+++ b/bot/cogs/alias.py\n@@ -3,7 +3,10 @@\n from typing import Union\n \n from discord import Colour, Embed, Member, User\n-from discord.ext.commands import Cog, Command, Context, clean_content, command, group\n+from discord.ext.commands import (\n+ Cog, Command, Context, Greedy,\n+ clean_content, command, group,\n+)\n \n from bot.bot import Bot\n from bot.cogs.extensions import Extension\n@@ -81,7 +84,7 @@\n await self.invoke(ctx, \"site faq\")\n \n @command(name=\"rules\", aliases=(\"rule\",), hidden=True)\n- async def site_rules_alias(self, ctx: Context, *rules: int) -> None:\n+ async def site_rules_alias(self, ctx: Context, rules: Greedy[int], *_: str) -> None:\n \"\"\"Alias for invoking <prefix>site rules.\"\"\"\n await self.invoke(ctx, \"site rules\", *rules)\n", "issue": "Allow for throw away words after the rules command call\nfixes #723 \r\nThis simply catches all strings after a sequence of ints. This allows us to write a message after the list of rules we wish to display. \r\nExample:\r\n`!rules 5 6 We do not allow for paid work, and that will break ToS of x and y` \r\n\r\nDisclaimer, didn't get site to respond properly so haven't tested this with bot+site.\n", "before_files": [{"content": "import inspect\nimport logging\nfrom typing import Union\n\nfrom discord import Colour, Embed, Member, User\nfrom discord.ext.commands import Cog, Command, Context, clean_content, command, group\n\nfrom bot.bot import Bot\nfrom bot.cogs.extensions import Extension\nfrom bot.cogs.watchchannels.watchchannel import proxy_user\nfrom bot.converters import TagNameConverter\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\n\nclass Alias (Cog):\n \"\"\"Aliases for commonly used commands.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n async def invoke(self, ctx: Context, cmd_name: str, *args, **kwargs) -> None:\n \"\"\"Invokes a command with args and kwargs.\"\"\"\n log.debug(f\"{cmd_name} was invoked through an alias\")\n cmd = self.bot.get_command(cmd_name)\n if not cmd:\n return log.warning(f'Did not find command \"{cmd_name}\" to invoke.')\n elif not await cmd.can_run(ctx):\n return log.warning(\n f'{str(ctx.author)} tried to run the command \"{cmd_name}\"'\n )\n\n await ctx.invoke(cmd, *args, **kwargs)\n\n @command(name='aliases')\n async def aliases_command(self, ctx: Context) -> None:\n \"\"\"Show configured aliases on the bot.\"\"\"\n embed = Embed(\n title='Configured aliases',\n colour=Colour.blue()\n )\n await LinePaginator.paginate(\n (\n f\"\u2022 `{ctx.prefix}{value.name}` \"\n f\"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`\"\n for name, value in inspect.getmembers(self)\n if isinstance(value, Command) and name.endswith('_alias')\n ),\n ctx, embed, empty=False, max_lines=20\n )\n\n @command(name=\"resources\", aliases=(\"resource\",), hidden=True)\n async def site_resources_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site resources.\"\"\"\n await self.invoke(ctx, \"site resources\")\n\n @command(name=\"tools\", hidden=True)\n async def site_tools_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site tools.\"\"\"\n await self.invoke(ctx, \"site tools\")\n\n @command(name=\"watch\", hidden=True)\n async def bigbrother_watch_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>bigbrother watch [user] [reason].\"\"\"\n await self.invoke(ctx, \"bigbrother watch\", user, reason=reason)\n\n @command(name=\"unwatch\", hidden=True)\n async def bigbrother_unwatch_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>bigbrother unwatch [user] [reason].\"\"\"\n await self.invoke(ctx, \"bigbrother unwatch\", user, reason=reason)\n\n @command(name=\"home\", hidden=True)\n async def site_home_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site home.\"\"\"\n await self.invoke(ctx, \"site home\")\n\n @command(name=\"faq\", hidden=True)\n async def site_faq_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site faq.\"\"\"\n await self.invoke(ctx, \"site faq\")\n\n @command(name=\"rules\", aliases=(\"rule\",), hidden=True)\n async def site_rules_alias(self, ctx: Context, *rules: int) -> None:\n \"\"\"Alias for invoking <prefix>site rules.\"\"\"\n await self.invoke(ctx, \"site rules\", *rules)\n\n @command(name=\"reload\", hidden=True)\n async def extensions_reload_alias(self, ctx: Context, *extensions: Extension) -> None:\n \"\"\"Alias for invoking <prefix>extensions reload [extensions...].\"\"\"\n await self.invoke(ctx, \"extensions reload\", *extensions)\n\n @command(name=\"defon\", hidden=True)\n async def defcon_enable_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>defcon enable.\"\"\"\n await self.invoke(ctx, \"defcon enable\")\n\n @command(name=\"defoff\", hidden=True)\n async def defcon_disable_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>defcon disable.\"\"\"\n await self.invoke(ctx, \"defcon disable\")\n\n @command(name=\"exception\", hidden=True)\n async def tags_get_traceback_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>tags get traceback.\"\"\"\n await self.invoke(ctx, \"tags get\", tag_name=\"traceback\")\n\n @group(name=\"get\",\n aliases=(\"show\", \"g\"),\n hidden=True,\n invoke_without_command=True)\n async def get_group_alias(self, ctx: Context) -> None:\n \"\"\"Group for reverse aliases for commands like `tags get`, allowing for `get tags` or `get docs`.\"\"\"\n pass\n\n @get_group_alias.command(name=\"tags\", aliases=(\"tag\", \"t\"), hidden=True)\n async def tags_get_alias(\n self, ctx: Context, *, tag_name: TagNameConverter = None\n ) -> None:\n \"\"\"\n Alias for invoking <prefix>tags get [tag_name].\n\n tag_name: str - tag to be viewed.\n \"\"\"\n await self.invoke(ctx, \"tags get\", tag_name=tag_name)\n\n @get_group_alias.command(name=\"docs\", aliases=(\"doc\", \"d\"), hidden=True)\n async def docs_get_alias(\n self, ctx: Context, symbol: clean_content = None\n ) -> None:\n \"\"\"Alias for invoking <prefix>docs get [symbol].\"\"\"\n await self.invoke(ctx, \"docs get\", symbol)\n\n @command(name=\"nominate\", hidden=True)\n async def nomination_add_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>talentpool add [user] [reason].\"\"\"\n await self.invoke(ctx, \"talentpool add\", user, reason=reason)\n\n @command(name=\"unnominate\", hidden=True)\n async def nomination_end_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>nomination end [user] [reason].\"\"\"\n await self.invoke(ctx, \"nomination end\", user, reason=reason)\n\n @command(name=\"nominees\", hidden=True)\n async def nominees_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>tp watched.\"\"\"\n await self.invoke(ctx, \"talentpool watched\")\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Alias cog.\"\"\"\n bot.add_cog(Alias(bot))\n", "path": "bot/cogs/alias.py"}], "after_files": [{"content": "import inspect\nimport logging\nfrom typing import Union\n\nfrom discord import Colour, Embed, Member, User\nfrom discord.ext.commands import (\n Cog, Command, Context, Greedy,\n clean_content, command, group,\n)\n\nfrom bot.bot import Bot\nfrom bot.cogs.extensions import Extension\nfrom bot.cogs.watchchannels.watchchannel import proxy_user\nfrom bot.converters import TagNameConverter\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\n\nclass Alias (Cog):\n \"\"\"Aliases for commonly used commands.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n async def invoke(self, ctx: Context, cmd_name: str, *args, **kwargs) -> None:\n \"\"\"Invokes a command with args and kwargs.\"\"\"\n log.debug(f\"{cmd_name} was invoked through an alias\")\n cmd = self.bot.get_command(cmd_name)\n if not cmd:\n return log.warning(f'Did not find command \"{cmd_name}\" to invoke.')\n elif not await cmd.can_run(ctx):\n return log.warning(\n f'{str(ctx.author)} tried to run the command \"{cmd_name}\"'\n )\n\n await ctx.invoke(cmd, *args, **kwargs)\n\n @command(name='aliases')\n async def aliases_command(self, ctx: Context) -> None:\n \"\"\"Show configured aliases on the bot.\"\"\"\n embed = Embed(\n title='Configured aliases',\n colour=Colour.blue()\n )\n await LinePaginator.paginate(\n (\n f\"\u2022 `{ctx.prefix}{value.name}` \"\n f\"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`\"\n for name, value in inspect.getmembers(self)\n if isinstance(value, Command) and name.endswith('_alias')\n ),\n ctx, embed, empty=False, max_lines=20\n )\n\n @command(name=\"resources\", aliases=(\"resource\",), hidden=True)\n async def site_resources_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site resources.\"\"\"\n await self.invoke(ctx, \"site resources\")\n\n @command(name=\"tools\", hidden=True)\n async def site_tools_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site tools.\"\"\"\n await self.invoke(ctx, \"site tools\")\n\n @command(name=\"watch\", hidden=True)\n async def bigbrother_watch_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>bigbrother watch [user] [reason].\"\"\"\n await self.invoke(ctx, \"bigbrother watch\", user, reason=reason)\n\n @command(name=\"unwatch\", hidden=True)\n async def bigbrother_unwatch_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>bigbrother unwatch [user] [reason].\"\"\"\n await self.invoke(ctx, \"bigbrother unwatch\", user, reason=reason)\n\n @command(name=\"home\", hidden=True)\n async def site_home_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site home.\"\"\"\n await self.invoke(ctx, \"site home\")\n\n @command(name=\"faq\", hidden=True)\n async def site_faq_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site faq.\"\"\"\n await self.invoke(ctx, \"site faq\")\n\n @command(name=\"rules\", aliases=(\"rule\",), hidden=True)\n async def site_rules_alias(self, ctx: Context, rules: Greedy[int], *_: str) -> None:\n \"\"\"Alias for invoking <prefix>site rules.\"\"\"\n await self.invoke(ctx, \"site rules\", *rules)\n\n @command(name=\"reload\", hidden=True)\n async def extensions_reload_alias(self, ctx: Context, *extensions: Extension) -> None:\n \"\"\"Alias for invoking <prefix>extensions reload [extensions...].\"\"\"\n await self.invoke(ctx, \"extensions reload\", *extensions)\n\n @command(name=\"defon\", hidden=True)\n async def defcon_enable_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>defcon enable.\"\"\"\n await self.invoke(ctx, \"defcon enable\")\n\n @command(name=\"defoff\", hidden=True)\n async def defcon_disable_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>defcon disable.\"\"\"\n await self.invoke(ctx, \"defcon disable\")\n\n @command(name=\"exception\", hidden=True)\n async def tags_get_traceback_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>tags get traceback.\"\"\"\n await self.invoke(ctx, \"tags get\", tag_name=\"traceback\")\n\n @group(name=\"get\",\n aliases=(\"show\", \"g\"),\n hidden=True,\n invoke_without_command=True)\n async def get_group_alias(self, ctx: Context) -> None:\n \"\"\"Group for reverse aliases for commands like `tags get`, allowing for `get tags` or `get docs`.\"\"\"\n pass\n\n @get_group_alias.command(name=\"tags\", aliases=(\"tag\", \"t\"), hidden=True)\n async def tags_get_alias(\n self, ctx: Context, *, tag_name: TagNameConverter = None\n ) -> None:\n \"\"\"\n Alias for invoking <prefix>tags get [tag_name].\n\n tag_name: str - tag to be viewed.\n \"\"\"\n await self.invoke(ctx, \"tags get\", tag_name=tag_name)\n\n @get_group_alias.command(name=\"docs\", aliases=(\"doc\", \"d\"), hidden=True)\n async def docs_get_alias(\n self, ctx: Context, symbol: clean_content = None\n ) -> None:\n \"\"\"Alias for invoking <prefix>docs get [symbol].\"\"\"\n await self.invoke(ctx, \"docs get\", symbol)\n\n @command(name=\"nominate\", hidden=True)\n async def nomination_add_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>talentpool add [user] [reason].\"\"\"\n await self.invoke(ctx, \"talentpool add\", user, reason=reason)\n\n @command(name=\"unnominate\", hidden=True)\n async def nomination_end_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>nomination end [user] [reason].\"\"\"\n await self.invoke(ctx, \"nomination end\", user, reason=reason)\n\n @command(name=\"nominees\", hidden=True)\n async def nominees_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>tp watched.\"\"\"\n await self.invoke(ctx, \"talentpool watched\")\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Alias cog.\"\"\"\n bot.add_cog(Alias(bot))\n", "path": "bot/cogs/alias.py"}]} | 2,202 | 241 |
gh_patches_debug_10891 | rasdani/github-patches | git_diff | openfun__marsha-2578 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
check_live_state pop from empty list
## Bug Report
**Problematic Behavior**
The management command check_live_state has a recurring error, it tries to pop a value from an empty list. This list comes from cloudwatch service : https://github.com/openfun/marsha/blob/29e1f78ed6e288f7bba3c198bb7b7179e7af4fe0/src/backend/marsha/core/management/commands/check_live_state.py#L100
**Expected behavior/code**
This error seems to occur when a live has no activity anymore. We try to compare alerts set and the clear to determine if they are still active.
**Steps to Reproduce**
1. Start a webinar
2. Once started, stop all activity
3. Run the management command `check_live_state`
4. And then the bug happens!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/backend/marsha/core/management/commands/check_live_state.py`
Content:
```
1 """Check live state management command."""
2
3 from datetime import datetime, timedelta, timezone
4 import json
5 import re
6
7 from django.conf import settings
8 from django.core.management.base import BaseCommand
9
10 import boto3
11 from dateutil.parser import isoparse
12
13 from marsha.core.defaults import RUNNING, STOPPING
14 from marsha.core.models import Video
15 from marsha.core.utils.medialive_utils import stop_live_channel
16
17
18 aws_credentials = {
19 "aws_access_key_id": settings.AWS_ACCESS_KEY_ID,
20 "aws_secret_access_key": settings.AWS_SECRET_ACCESS_KEY,
21 "region_name": settings.AWS_S3_REGION_NAME,
22 }
23
24 # Configure medialive client
25 medialive_client = boto3.client("medialive", **aws_credentials)
26
27 # Configure cloudwatch logs client
28 logs_client = boto3.client("logs", **aws_credentials)
29
30
31 def parse_iso_date(iso_date):
32 """Parse an iso 8601 date and return a datetime object."""
33 return isoparse(iso_date)
34
35
36 def generate_expired_date():
37 """Generate a datetime object 25 minutes in the past."""
38 return datetime.now(tz=timezone.utc) - timedelta(minutes=25)
39
40
41 # pylint: disable=too-many-locals
42 class Command(BaseCommand):
43 """Check every live streaming running state on AWS."""
44
45 help = (
46 "Check activity on AWS for every live streaming running"
47 "and close them if there is not."
48 )
49
50 def handle(self, *args, **options):
51 """Execute management command."""
52 extract_message_pattern = (
53 r"^(?P<ingestion_time>.*)\t"
54 r"(?P<request_id>.*)\t"
55 r"(?P<level>.*)\t"
56 r"Received event:(?P<message>.*)$"
57 )
58 extract_message_regex = re.compile(extract_message_pattern)
59
60 videos = Video.objects.filter(live_state=RUNNING)
61 for video in videos:
62 # For each running live video, we query cloudwatch on the current live
63 # to search messages having detail.alert_type set to `RTMP Has No Audio/Video`.
64 # This alert tell us there is no stream and the live can be stopped if the message is
65 # older than 25 minutes.
66 self.stdout.write(f"Checking video {video.id}")
67 live_info = video.live_info
68 logs = logs_client.filter_log_events(
69 logGroupName=live_info["cloudwatch"]["logGroupName"],
70 startTime=int(int(video.live_info.get("started_at")) * 1000),
71 filterPattern=(
72 "{"
73 '($.detail-type = "MediaLive Channel Alert") && '
74 f"($.resources[0] = \"{live_info['medialive']['channel']['arn']}\") &&"
75 '($.detail.alert_type = "RTMP Has No Audio/Video")'
76 "}"
77 ),
78 )
79
80 pipelines_queue = {"0": [], "1": []}
81
82 for event in logs["events"]:
83 # All events must be parsed to extract the JSON message. When an alert is added,
84 # the `alarm_state` property value is `SET` and when the alert is removed,
85 # the `alarm_state` property value is `CLEARED`.
86 # We have 2 pipelines, a live is over when the 2 pipeline have `SET` value
87 # in `alarm_state`.
88 # Alarm state act like a list with all the event history. It means a `CLEARED`
89 # event is related to a `SET` one. So we have to look over all events, put in
90 # a list all `SET` events and remove it if a `CLEARED` event is here. At the
91 # end if we have 2 `SET` events, the live has no activity and we have to check
92 # the time of the last `SET` event. If this time is older than 25 minutes we
93 # stop the channel.
94 log = extract_message_regex.match(event["message"])
95 message = json.loads(log.group("message"))
96
97 if message["detail"]["alarm_state"] == "SET":
98 pipelines_queue[message["detail"]["pipeline"]].append(message)
99 else:
100 pipelines_queue[message["detail"]["pipeline"]].pop()
101
102 if len(pipelines_queue["0"]) == 1 and len(pipelines_queue["1"]) == 1:
103 # Both pipelines receive no stream, we have to check the more recent one
104 # and if the time is older than 25 minutes we stop the channel.
105 datetime_pipeline0 = parse_iso_date(pipelines_queue["0"][0]["time"])
106 datetime_pipeline1 = parse_iso_date(pipelines_queue["1"][0]["time"])
107 expired_date = generate_expired_date()
108
109 if (
110 datetime_pipeline0 < expired_date
111 or datetime_pipeline1 < expired_date
112 ):
113 # Stop this channel
114 self.stdout.write(
115 f"Stopping channel with id {live_info['medialive']['channel']['id']}"
116 )
117 stop_live_channel(live_info["medialive"]["channel"]["id"])
118
119 video.live_state = STOPPING
120 video.save()
121 self.stdout.write("Channel stopped")
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/backend/marsha/core/management/commands/check_live_state.py b/src/backend/marsha/core/management/commands/check_live_state.py
--- a/src/backend/marsha/core/management/commands/check_live_state.py
+++ b/src/backend/marsha/core/management/commands/check_live_state.py
@@ -67,7 +67,7 @@
live_info = video.live_info
logs = logs_client.filter_log_events(
logGroupName=live_info["cloudwatch"]["logGroupName"],
- startTime=int(int(video.live_info.get("started_at")) * 1000),
+ startTime=int((int(video.live_info.get("started_at")) - 60) * 1000),
filterPattern=(
"{"
'($.detail-type = "MediaLive Channel Alert") && '
| {"golden_diff": "diff --git a/src/backend/marsha/core/management/commands/check_live_state.py b/src/backend/marsha/core/management/commands/check_live_state.py\n--- a/src/backend/marsha/core/management/commands/check_live_state.py\n+++ b/src/backend/marsha/core/management/commands/check_live_state.py\n@@ -67,7 +67,7 @@\n live_info = video.live_info\n logs = logs_client.filter_log_events(\n logGroupName=live_info[\"cloudwatch\"][\"logGroupName\"],\n- startTime=int(int(video.live_info.get(\"started_at\")) * 1000),\n+ startTime=int((int(video.live_info.get(\"started_at\")) - 60) * 1000),\n filterPattern=(\n \"{\"\n '($.detail-type = \"MediaLive Channel Alert\") && '\n", "issue": "check_live_state pop from empty list\n## Bug Report\r\n\r\n**Problematic Behavior**\r\n\r\nThe management command check_live_state has a recurring error, it tries to pop a value from an empty list. This list comes from cloudwatch service : https://github.com/openfun/marsha/blob/29e1f78ed6e288f7bba3c198bb7b7179e7af4fe0/src/backend/marsha/core/management/commands/check_live_state.py#L100\r\n\r\n**Expected behavior/code**\r\n\r\nThis error seems to occur when a live has no activity anymore. We try to compare alerts set and the clear to determine if they are still active.\r\n\r\n\r\n**Steps to Reproduce**\r\n1. Start a webinar\r\n2. Once started, stop all activity\r\n3. Run the management command `check_live_state`\r\n4. And then the bug happens!\r\n\r\n\n", "before_files": [{"content": "\"\"\"Check live state management command.\"\"\"\n\nfrom datetime import datetime, timedelta, timezone\nimport json\nimport re\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nimport boto3\nfrom dateutil.parser import isoparse\n\nfrom marsha.core.defaults import RUNNING, STOPPING\nfrom marsha.core.models import Video\nfrom marsha.core.utils.medialive_utils import stop_live_channel\n\n\naws_credentials = {\n \"aws_access_key_id\": settings.AWS_ACCESS_KEY_ID,\n \"aws_secret_access_key\": settings.AWS_SECRET_ACCESS_KEY,\n \"region_name\": settings.AWS_S3_REGION_NAME,\n}\n\n# Configure medialive client\nmedialive_client = boto3.client(\"medialive\", **aws_credentials)\n\n# Configure cloudwatch logs client\nlogs_client = boto3.client(\"logs\", **aws_credentials)\n\n\ndef parse_iso_date(iso_date):\n \"\"\"Parse an iso 8601 date and return a datetime object.\"\"\"\n return isoparse(iso_date)\n\n\ndef generate_expired_date():\n \"\"\"Generate a datetime object 25 minutes in the past.\"\"\"\n return datetime.now(tz=timezone.utc) - timedelta(minutes=25)\n\n\n# pylint: disable=too-many-locals\nclass Command(BaseCommand):\n \"\"\"Check every live streaming running state on AWS.\"\"\"\n\n help = (\n \"Check activity on AWS for every live streaming running\"\n \"and close them if there is not.\"\n )\n\n def handle(self, *args, **options):\n \"\"\"Execute management command.\"\"\"\n extract_message_pattern = (\n r\"^(?P<ingestion_time>.*)\\t\"\n r\"(?P<request_id>.*)\\t\"\n r\"(?P<level>.*)\\t\"\n r\"Received event:(?P<message>.*)$\"\n )\n extract_message_regex = re.compile(extract_message_pattern)\n\n videos = Video.objects.filter(live_state=RUNNING)\n for video in videos:\n # For each running live video, we query cloudwatch on the current live\n # to search messages having detail.alert_type set to `RTMP Has No Audio/Video`.\n # This alert tell us there is no stream and the live can be stopped if the message is\n # older than 25 minutes.\n self.stdout.write(f\"Checking video {video.id}\")\n live_info = video.live_info\n logs = logs_client.filter_log_events(\n logGroupName=live_info[\"cloudwatch\"][\"logGroupName\"],\n startTime=int(int(video.live_info.get(\"started_at\")) * 1000),\n filterPattern=(\n \"{\"\n '($.detail-type = \"MediaLive Channel Alert\") && '\n f\"($.resources[0] = \\\"{live_info['medialive']['channel']['arn']}\\\") &&\"\n '($.detail.alert_type = \"RTMP Has No Audio/Video\")'\n \"}\"\n ),\n )\n\n pipelines_queue = {\"0\": [], \"1\": []}\n\n for event in logs[\"events\"]:\n # All events must be parsed to extract the JSON message. When an alert is added,\n # the `alarm_state` property value is `SET` and when the alert is removed,\n # the `alarm_state` property value is `CLEARED`.\n # We have 2 pipelines, a live is over when the 2 pipeline have `SET` value\n # in `alarm_state`.\n # Alarm state act like a list with all the event history. It means a `CLEARED`\n # event is related to a `SET` one. So we have to look over all events, put in\n # a list all `SET` events and remove it if a `CLEARED` event is here. At the\n # end if we have 2 `SET` events, the live has no activity and we have to check\n # the time of the last `SET` event. If this time is older than 25 minutes we\n # stop the channel.\n log = extract_message_regex.match(event[\"message\"])\n message = json.loads(log.group(\"message\"))\n\n if message[\"detail\"][\"alarm_state\"] == \"SET\":\n pipelines_queue[message[\"detail\"][\"pipeline\"]].append(message)\n else:\n pipelines_queue[message[\"detail\"][\"pipeline\"]].pop()\n\n if len(pipelines_queue[\"0\"]) == 1 and len(pipelines_queue[\"1\"]) == 1:\n # Both pipelines receive no stream, we have to check the more recent one\n # and if the time is older than 25 minutes we stop the channel.\n datetime_pipeline0 = parse_iso_date(pipelines_queue[\"0\"][0][\"time\"])\n datetime_pipeline1 = parse_iso_date(pipelines_queue[\"1\"][0][\"time\"])\n expired_date = generate_expired_date()\n\n if (\n datetime_pipeline0 < expired_date\n or datetime_pipeline1 < expired_date\n ):\n # Stop this channel\n self.stdout.write(\n f\"Stopping channel with id {live_info['medialive']['channel']['id']}\"\n )\n stop_live_channel(live_info[\"medialive\"][\"channel\"][\"id\"])\n\n video.live_state = STOPPING\n video.save()\n self.stdout.write(\"Channel stopped\")\n", "path": "src/backend/marsha/core/management/commands/check_live_state.py"}], "after_files": [{"content": "\"\"\"Check live state management command.\"\"\"\n\nfrom datetime import datetime, timedelta, timezone\nimport json\nimport re\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nimport boto3\nfrom dateutil.parser import isoparse\n\nfrom marsha.core.defaults import RUNNING, STOPPING\nfrom marsha.core.models import Video\nfrom marsha.core.utils.medialive_utils import stop_live_channel\n\n\naws_credentials = {\n \"aws_access_key_id\": settings.AWS_ACCESS_KEY_ID,\n \"aws_secret_access_key\": settings.AWS_SECRET_ACCESS_KEY,\n \"region_name\": settings.AWS_S3_REGION_NAME,\n}\n\n# Configure medialive client\nmedialive_client = boto3.client(\"medialive\", **aws_credentials)\n\n# Configure cloudwatch logs client\nlogs_client = boto3.client(\"logs\", **aws_credentials)\n\n\ndef parse_iso_date(iso_date):\n \"\"\"Parse an iso 8601 date and return a datetime object.\"\"\"\n return isoparse(iso_date)\n\n\ndef generate_expired_date():\n \"\"\"Generate a datetime object 25 minutes in the past.\"\"\"\n return datetime.now(tz=timezone.utc) - timedelta(minutes=25)\n\n\n# pylint: disable=too-many-locals\nclass Command(BaseCommand):\n \"\"\"Check every live streaming running state on AWS.\"\"\"\n\n help = (\n \"Check activity on AWS for every live streaming running\"\n \"and close them if there is not.\"\n )\n\n def handle(self, *args, **options):\n \"\"\"Execute management command.\"\"\"\n extract_message_pattern = (\n r\"^(?P<ingestion_time>.*)\\t\"\n r\"(?P<request_id>.*)\\t\"\n r\"(?P<level>.*)\\t\"\n r\"Received event:(?P<message>.*)$\"\n )\n extract_message_regex = re.compile(extract_message_pattern)\n\n videos = Video.objects.filter(live_state=RUNNING)\n for video in videos:\n # For each running live video, we query cloudwatch on the current live\n # to search messages having detail.alert_type set to `RTMP Has No Audio/Video`.\n # This alert tell us there is no stream and the live can be stopped if the message is\n # older than 25 minutes.\n self.stdout.write(f\"Checking video {video.id}\")\n live_info = video.live_info\n logs = logs_client.filter_log_events(\n logGroupName=live_info[\"cloudwatch\"][\"logGroupName\"],\n startTime=int((int(video.live_info.get(\"started_at\")) - 60) * 1000),\n filterPattern=(\n \"{\"\n '($.detail-type = \"MediaLive Channel Alert\") && '\n f\"($.resources[0] = \\\"{live_info['medialive']['channel']['arn']}\\\") &&\"\n '($.detail.alert_type = \"RTMP Has No Audio/Video\")'\n \"}\"\n ),\n )\n\n pipelines_queue = {\"0\": [], \"1\": []}\n\n for event in logs[\"events\"]:\n # All events must be parsed to extract the JSON message. When an alert is added,\n # the `alarm_state` property value is `SET` and when the alert is removed,\n # the `alarm_state` property value is `CLEARED`.\n # We have 2 pipelines, a live is over when the 2 pipeline have `SET` value\n # in `alarm_state`.\n # Alarm state act like a list with all the event history. It means a `CLEARED`\n # event is related to a `SET` one. So we have to look over all events, put in\n # a list all `SET` events and remove it if a `CLEARED` event is here. At the\n # end if we have 2 `SET` events, the live has no activity and we have to check\n # the time of the last `SET` event. If this time is older than 25 minutes we\n # stop the channel.\n log = extract_message_regex.match(event[\"message\"])\n message = json.loads(log.group(\"message\"))\n\n if message[\"detail\"][\"alarm_state\"] == \"SET\":\n pipelines_queue[message[\"detail\"][\"pipeline\"]].append(message)\n else:\n pipelines_queue[message[\"detail\"][\"pipeline\"]].pop()\n\n if len(pipelines_queue[\"0\"]) == 1 and len(pipelines_queue[\"1\"]) == 1:\n # Both pipelines receive no stream, we have to check the more recent one\n # and if the time is older than 25 minutes we stop the channel.\n datetime_pipeline0 = parse_iso_date(pipelines_queue[\"0\"][0][\"time\"])\n datetime_pipeline1 = parse_iso_date(pipelines_queue[\"1\"][0][\"time\"])\n expired_date = generate_expired_date()\n\n if (\n datetime_pipeline0 < expired_date\n or datetime_pipeline1 < expired_date\n ):\n # Stop this channel\n self.stdout.write(\n f\"Stopping channel with id {live_info['medialive']['channel']['id']}\"\n )\n stop_live_channel(live_info[\"medialive\"][\"channel\"][\"id\"])\n\n video.live_state = STOPPING\n video.save()\n self.stdout.write(\"Channel stopped\")\n", "path": "src/backend/marsha/core/management/commands/check_live_state.py"}]} | 1,827 | 177 |
gh_patches_debug_23423 | rasdani/github-patches | git_diff | biolab__orange3-4389 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Transpose remembers old unexisting data
I have a table file that I overwrite with different values (rows and columns stay the same). When using a certain version of the file with certain workflow the workflow remembers old data, that was previously overwritten (and does not exist on my disk anymore). I could not replicate this in a new workflow made from scratch or with Orange data sets. However, it occurs even when I reopen Orange or when I copy the workflow to a new file.
Below are the workflow and the data and an image of what is happening.

[transpose_remembering.zip](https://github.com/biolab/orange3/files/4102632/transpose_remembering.zip)
Orange: Last master.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Orange/widgets/data/owtranspose.py`
Content:
```
1 from Orange.data import Table, ContinuousVariable, StringVariable
2 from Orange.widgets.settings import (Setting, ContextSetting,
3 DomainContextHandler)
4 from Orange.widgets.utils.itemmodels import DomainModel
5 from Orange.widgets.utils.widgetpreview import WidgetPreview
6 from Orange.widgets.widget import OWWidget, Msg
7 from Orange.widgets import gui
8 from Orange.widgets.widget import Input, Output
9
10
11 class OWTranspose(OWWidget):
12 name = "Transpose"
13 description = "Transpose data table."
14 icon = "icons/Transpose.svg"
15 priority = 2000
16 keywords = []
17
18 class Inputs:
19 data = Input("Data", Table)
20
21 class Outputs:
22 data = Output("Data", Table, dynamic=False)
23
24 GENERIC, FROM_VAR = range(2)
25
26 resizing_enabled = False
27 want_main_area = False
28
29 DEFAULT_PREFIX = "Feature"
30
31 settingsHandler = DomainContextHandler()
32 feature_type = ContextSetting(GENERIC)
33 feature_name = ContextSetting("")
34 feature_names_column = ContextSetting(None)
35 auto_apply = Setting(True)
36
37 class Warning(OWWidget.Warning):
38 duplicate_names = Msg("Values are not unique.\nTo avoid multiple "
39 "features with the same name, values \nof "
40 "'{}' have been augmented with indices.")
41
42 class Error(OWWidget.Error):
43 value_error = Msg("{}")
44
45 def __init__(self):
46 super().__init__()
47 self.data = None
48
49 # self.apply is changed later, pylint: disable=unnecessary-lambda
50 box = gui.radioButtons(
51 self.controlArea, self, "feature_type", box="Feature names",
52 callback=lambda: self.apply())
53
54 button = gui.appendRadioButton(box, "Generic")
55 edit = gui.lineEdit(
56 gui.indentedBox(box, gui.checkButtonOffsetHint(button)), self,
57 "feature_name",
58 placeholderText="Type a prefix ...", toolTip="Custom feature name")
59 edit.editingFinished.connect(self._apply_editing)
60
61 self.meta_button = gui.appendRadioButton(box, "From variable:")
62 self.feature_model = DomainModel(
63 valid_types=(ContinuousVariable, StringVariable),
64 alphabetical=False)
65 self.feature_combo = gui.comboBox(
66 gui.indentedBox(box, gui.checkButtonOffsetHint(button)), self,
67 "feature_names_column", contentsLength=12,
68 callback=self._feature_combo_changed, model=self.feature_model)
69
70 self.apply_button = gui.auto_apply(self.controlArea, self, box=False, commit=self.apply)
71 self.apply_button.button.setAutoDefault(False)
72
73 self.info.set_output_summary(self.info.NoInput)
74 self.info.set_input_summary(self.info.NoInput)
75
76 self.set_controls()
77
78 def _apply_editing(self):
79 self.feature_type = self.GENERIC
80 self.feature_name = self.feature_name.strip()
81 self.apply()
82
83 def _feature_combo_changed(self):
84 self.feature_type = self.FROM_VAR
85 self.apply()
86
87 @Inputs.data
88 def set_data(self, data):
89 # Skip the context if the combo is empty: a context with
90 # feature_model == None would then match all domains
91 if self.feature_model:
92 self.closeContext()
93 self.data = data
94 if data:
95 self.info.set_input_summary(len(data))
96 else:
97 self.info.set_input_summary(self.info.NoInput)
98 self.set_controls()
99 if self.feature_model:
100 self.openContext(data)
101 self.unconditional_apply()
102
103 def set_controls(self):
104 self.feature_model.set_domain(self.data and self.data.domain)
105 self.meta_button.setEnabled(bool(self.feature_model))
106 if self.feature_model:
107 self.feature_names_column = self.feature_model[0]
108 self.feature_type = self.FROM_VAR
109 else:
110 self.feature_names_column = None
111
112 def apply(self):
113 self.clear_messages()
114 transposed = None
115 if self.data:
116 try:
117 variable = self.feature_type == self.FROM_VAR and \
118 self.feature_names_column
119 transposed = Table.transpose(
120 self.data, variable,
121 feature_name=self.feature_name or self.DEFAULT_PREFIX)
122 if variable:
123 names = self.data.get_column_view(variable)[0]
124 if len(names) != len(set(names)):
125 self.Warning.duplicate_names(variable)
126 self.info.set_output_summary(len(transposed))
127 except ValueError as e:
128 self.Error.value_error(e)
129 else:
130 self.info.set_output_summary(self.info.NoInput)
131 self.Outputs.data.send(transposed)
132
133 def send_report(self):
134 if self.feature_type == self.GENERIC:
135 names = self.feature_name or self.DEFAULT_PREFIX
136 else:
137 names = "from variable"
138 if self.feature_names_column:
139 names += " '{}'".format(self.feature_names_column.name)
140 self.report_items("", [("Feature names", names)])
141 if self.data:
142 self.report_data("Data", self.data)
143
144
145 if __name__ == "__main__": # pragma: no cover
146 WidgetPreview(OWTranspose).run(Table("iris"))
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Orange/widgets/data/owtranspose.py b/Orange/widgets/data/owtranspose.py
--- a/Orange/widgets/data/owtranspose.py
+++ b/Orange/widgets/data/owtranspose.py
@@ -38,6 +38,7 @@
duplicate_names = Msg("Values are not unique.\nTo avoid multiple "
"features with the same name, values \nof "
"'{}' have been augmented with indices.")
+ discrete_attrs = Msg("Categorical features have been encoded as numbers.")
class Error(OWWidget.Error):
value_error = Msg("{}")
@@ -123,6 +124,8 @@
names = self.data.get_column_view(variable)[0]
if len(names) != len(set(names)):
self.Warning.duplicate_names(variable)
+ if self.data.domain.has_discrete_attributes():
+ self.Warning.discrete_attrs()
self.info.set_output_summary(len(transposed))
except ValueError as e:
self.Error.value_error(e)
| {"golden_diff": "diff --git a/Orange/widgets/data/owtranspose.py b/Orange/widgets/data/owtranspose.py\n--- a/Orange/widgets/data/owtranspose.py\n+++ b/Orange/widgets/data/owtranspose.py\n@@ -38,6 +38,7 @@\n duplicate_names = Msg(\"Values are not unique.\\nTo avoid multiple \"\n \"features with the same name, values \\nof \"\n \"'{}' have been augmented with indices.\")\n+ discrete_attrs = Msg(\"Categorical features have been encoded as numbers.\")\n \n class Error(OWWidget.Error):\n value_error = Msg(\"{}\")\n@@ -123,6 +124,8 @@\n names = self.data.get_column_view(variable)[0]\n if len(names) != len(set(names)):\n self.Warning.duplicate_names(variable)\n+ if self.data.domain.has_discrete_attributes():\n+ self.Warning.discrete_attrs()\n self.info.set_output_summary(len(transposed))\n except ValueError as e:\n self.Error.value_error(e)\n", "issue": "Transpose remembers old unexisting data\nI have a table file that I overwrite with different values (rows and columns stay the same). When using a certain version of the file with certain workflow the workflow remembers old data, that was previously overwritten (and does not exist on my disk anymore). I could not replicate this in a new workflow made from scratch or with Orange data sets. However, it occurs even when I reopen Orange or when I copy the workflow to a new file. \r\nBelow are the workflow and the data and an image of what is happening. \r\n\r\n\r\n\r\n[transpose_remembering.zip](https://github.com/biolab/orange3/files/4102632/transpose_remembering.zip)\r\n\r\nOrange: Last master.\r\n\n", "before_files": [{"content": "from Orange.data import Table, ContinuousVariable, StringVariable\nfrom Orange.widgets.settings import (Setting, ContextSetting,\n DomainContextHandler)\nfrom Orange.widgets.utils.itemmodels import DomainModel\nfrom Orange.widgets.utils.widgetpreview import WidgetPreview\nfrom Orange.widgets.widget import OWWidget, Msg\nfrom Orange.widgets import gui\nfrom Orange.widgets.widget import Input, Output\n\n\nclass OWTranspose(OWWidget):\n name = \"Transpose\"\n description = \"Transpose data table.\"\n icon = \"icons/Transpose.svg\"\n priority = 2000\n keywords = []\n\n class Inputs:\n data = Input(\"Data\", Table)\n\n class Outputs:\n data = Output(\"Data\", Table, dynamic=False)\n\n GENERIC, FROM_VAR = range(2)\n\n resizing_enabled = False\n want_main_area = False\n\n DEFAULT_PREFIX = \"Feature\"\n\n settingsHandler = DomainContextHandler()\n feature_type = ContextSetting(GENERIC)\n feature_name = ContextSetting(\"\")\n feature_names_column = ContextSetting(None)\n auto_apply = Setting(True)\n\n class Warning(OWWidget.Warning):\n duplicate_names = Msg(\"Values are not unique.\\nTo avoid multiple \"\n \"features with the same name, values \\nof \"\n \"'{}' have been augmented with indices.\")\n\n class Error(OWWidget.Error):\n value_error = Msg(\"{}\")\n\n def __init__(self):\n super().__init__()\n self.data = None\n\n # self.apply is changed later, pylint: disable=unnecessary-lambda\n box = gui.radioButtons(\n self.controlArea, self, \"feature_type\", box=\"Feature names\",\n callback=lambda: self.apply())\n\n button = gui.appendRadioButton(box, \"Generic\")\n edit = gui.lineEdit(\n gui.indentedBox(box, gui.checkButtonOffsetHint(button)), self,\n \"feature_name\",\n placeholderText=\"Type a prefix ...\", toolTip=\"Custom feature name\")\n edit.editingFinished.connect(self._apply_editing)\n\n self.meta_button = gui.appendRadioButton(box, \"From variable:\")\n self.feature_model = DomainModel(\n valid_types=(ContinuousVariable, StringVariable),\n alphabetical=False)\n self.feature_combo = gui.comboBox(\n gui.indentedBox(box, gui.checkButtonOffsetHint(button)), self,\n \"feature_names_column\", contentsLength=12,\n callback=self._feature_combo_changed, model=self.feature_model)\n\n self.apply_button = gui.auto_apply(self.controlArea, self, box=False, commit=self.apply)\n self.apply_button.button.setAutoDefault(False)\n\n self.info.set_output_summary(self.info.NoInput)\n self.info.set_input_summary(self.info.NoInput)\n\n self.set_controls()\n\n def _apply_editing(self):\n self.feature_type = self.GENERIC\n self.feature_name = self.feature_name.strip()\n self.apply()\n\n def _feature_combo_changed(self):\n self.feature_type = self.FROM_VAR\n self.apply()\n\n @Inputs.data\n def set_data(self, data):\n # Skip the context if the combo is empty: a context with\n # feature_model == None would then match all domains\n if self.feature_model:\n self.closeContext()\n self.data = data\n if data:\n self.info.set_input_summary(len(data))\n else:\n self.info.set_input_summary(self.info.NoInput)\n self.set_controls()\n if self.feature_model:\n self.openContext(data)\n self.unconditional_apply()\n\n def set_controls(self):\n self.feature_model.set_domain(self.data and self.data.domain)\n self.meta_button.setEnabled(bool(self.feature_model))\n if self.feature_model:\n self.feature_names_column = self.feature_model[0]\n self.feature_type = self.FROM_VAR\n else:\n self.feature_names_column = None\n\n def apply(self):\n self.clear_messages()\n transposed = None\n if self.data:\n try:\n variable = self.feature_type == self.FROM_VAR and \\\n self.feature_names_column\n transposed = Table.transpose(\n self.data, variable,\n feature_name=self.feature_name or self.DEFAULT_PREFIX)\n if variable:\n names = self.data.get_column_view(variable)[0]\n if len(names) != len(set(names)):\n self.Warning.duplicate_names(variable)\n self.info.set_output_summary(len(transposed))\n except ValueError as e:\n self.Error.value_error(e)\n else:\n self.info.set_output_summary(self.info.NoInput)\n self.Outputs.data.send(transposed)\n\n def send_report(self):\n if self.feature_type == self.GENERIC:\n names = self.feature_name or self.DEFAULT_PREFIX\n else:\n names = \"from variable\"\n if self.feature_names_column:\n names += \" '{}'\".format(self.feature_names_column.name)\n self.report_items(\"\", [(\"Feature names\", names)])\n if self.data:\n self.report_data(\"Data\", self.data)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n WidgetPreview(OWTranspose).run(Table(\"iris\"))\n", "path": "Orange/widgets/data/owtranspose.py"}], "after_files": [{"content": "from Orange.data import Table, ContinuousVariable, StringVariable\nfrom Orange.widgets.settings import (Setting, ContextSetting,\n DomainContextHandler)\nfrom Orange.widgets.utils.itemmodels import DomainModel\nfrom Orange.widgets.utils.widgetpreview import WidgetPreview\nfrom Orange.widgets.widget import OWWidget, Msg\nfrom Orange.widgets import gui\nfrom Orange.widgets.widget import Input, Output\n\n\nclass OWTranspose(OWWidget):\n name = \"Transpose\"\n description = \"Transpose data table.\"\n icon = \"icons/Transpose.svg\"\n priority = 2000\n keywords = []\n\n class Inputs:\n data = Input(\"Data\", Table)\n\n class Outputs:\n data = Output(\"Data\", Table, dynamic=False)\n\n GENERIC, FROM_VAR = range(2)\n\n resizing_enabled = False\n want_main_area = False\n\n DEFAULT_PREFIX = \"Feature\"\n\n settingsHandler = DomainContextHandler()\n feature_type = ContextSetting(GENERIC)\n feature_name = ContextSetting(\"\")\n feature_names_column = ContextSetting(None)\n auto_apply = Setting(True)\n\n class Warning(OWWidget.Warning):\n duplicate_names = Msg(\"Values are not unique.\\nTo avoid multiple \"\n \"features with the same name, values \\nof \"\n \"'{}' have been augmented with indices.\")\n discrete_attrs = Msg(\"Categorical features have been encoded as numbers.\")\n\n class Error(OWWidget.Error):\n value_error = Msg(\"{}\")\n\n def __init__(self):\n super().__init__()\n self.data = None\n\n # self.apply is changed later, pylint: disable=unnecessary-lambda\n box = gui.radioButtons(\n self.controlArea, self, \"feature_type\", box=\"Feature names\",\n callback=lambda: self.apply())\n\n button = gui.appendRadioButton(box, \"Generic\")\n edit = gui.lineEdit(\n gui.indentedBox(box, gui.checkButtonOffsetHint(button)), self,\n \"feature_name\",\n placeholderText=\"Type a prefix ...\", toolTip=\"Custom feature name\")\n edit.editingFinished.connect(self._apply_editing)\n\n self.meta_button = gui.appendRadioButton(box, \"From variable:\")\n self.feature_model = DomainModel(\n valid_types=(ContinuousVariable, StringVariable),\n alphabetical=False)\n self.feature_combo = gui.comboBox(\n gui.indentedBox(box, gui.checkButtonOffsetHint(button)), self,\n \"feature_names_column\", contentsLength=12,\n callback=self._feature_combo_changed, model=self.feature_model)\n\n self.apply_button = gui.auto_apply(self.controlArea, self, box=False, commit=self.apply)\n self.apply_button.button.setAutoDefault(False)\n\n self.info.set_output_summary(self.info.NoInput)\n self.info.set_input_summary(self.info.NoInput)\n\n self.set_controls()\n\n def _apply_editing(self):\n self.feature_type = self.GENERIC\n self.feature_name = self.feature_name.strip()\n self.apply()\n\n def _feature_combo_changed(self):\n self.feature_type = self.FROM_VAR\n self.apply()\n\n @Inputs.data\n def set_data(self, data):\n # Skip the context if the combo is empty: a context with\n # feature_model == None would then match all domains\n if self.feature_model:\n self.closeContext()\n self.data = data\n if data:\n self.info.set_input_summary(len(data))\n else:\n self.info.set_input_summary(self.info.NoInput)\n self.set_controls()\n if self.feature_model:\n self.openContext(data)\n self.unconditional_apply()\n\n def set_controls(self):\n self.feature_model.set_domain(self.data and self.data.domain)\n self.meta_button.setEnabled(bool(self.feature_model))\n if self.feature_model:\n self.feature_names_column = self.feature_model[0]\n self.feature_type = self.FROM_VAR\n else:\n self.feature_names_column = None\n\n def apply(self):\n self.clear_messages()\n transposed = None\n if self.data:\n try:\n variable = self.feature_type == self.FROM_VAR and \\\n self.feature_names_column\n transposed = Table.transpose(\n self.data, variable,\n feature_name=self.feature_name or self.DEFAULT_PREFIX)\n if variable:\n names = self.data.get_column_view(variable)[0]\n if len(names) != len(set(names)):\n self.Warning.duplicate_names(variable)\n if self.data.domain.has_discrete_attributes():\n self.Warning.discrete_attrs()\n self.info.set_output_summary(len(transposed))\n except ValueError as e:\n self.Error.value_error(e)\n else:\n self.info.set_output_summary(self.info.NoInput)\n self.Outputs.data.send(transposed)\n\n def send_report(self):\n if self.feature_type == self.GENERIC:\n names = self.feature_name or self.DEFAULT_PREFIX\n else:\n names = \"from variable\"\n if self.feature_names_column:\n names += \" '{}'\".format(self.feature_names_column.name)\n self.report_items(\"\", [(\"Feature names\", names)])\n if self.data:\n self.report_data(\"Data\", self.data)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n WidgetPreview(OWTranspose).run(Table(\"iris\"))\n", "path": "Orange/widgets/data/owtranspose.py"}]} | 1,895 | 212 |
gh_patches_debug_38 | rasdani/github-patches | git_diff | ipython__ipython-5701 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move ssh out of external and into lib
This module does not belong in external - it cannot be replaced by an external system module.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/external/ssh/__init__.py`
Content:
```
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/external/ssh/__init__.py b/IPython/external/ssh/__init__.py
--- a/IPython/external/ssh/__init__.py
+++ b/IPython/external/ssh/__init__.py
@@ -0,0 +1,7 @@
+"""This is a copy of zmq.ssh"""
+
+try:
+ from zmq.ssh import *
+except ImportError:
+ from . import tunnel
+ from .tunnel import *
| {"golden_diff": "diff --git a/IPython/external/ssh/__init__.py b/IPython/external/ssh/__init__.py\n--- a/IPython/external/ssh/__init__.py\n+++ b/IPython/external/ssh/__init__.py\n@@ -0,0 +1,7 @@\n+\"\"\"This is a copy of zmq.ssh\"\"\"\n+\n+try:\n+ from zmq.ssh import *\n+except ImportError:\n+ from . import tunnel\n+ from .tunnel import *\n", "issue": "Move ssh out of external and into lib\nThis module does not belong in external - it cannot be replaced by an external system module.\n\n", "before_files": [{"content": "", "path": "IPython/external/ssh/__init__.py"}], "after_files": [{"content": "\"\"\"This is a copy of zmq.ssh\"\"\"\n\ntry:\n from zmq.ssh import *\nexcept ImportError:\n from . import tunnel\n from .tunnel import *\n", "path": "IPython/external/ssh/__init__.py"}]} | 290 | 104 |
gh_patches_debug_37778 | rasdani/github-patches | git_diff | googleapis__python-bigquery-47 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BigQuery: test with all optional dependencies in Python 3.8
Blocked on:
- Apache Arrow: https://issues.apache.org/jira/browse/ARROW-6920
- fastparquet: https://github.com/dask/fastparquet/issues/468
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 import os
18 import shutil
19
20 import nox
21
22
23 BLACK_PATHS = ("docs", "google", "samples", "tests", "noxfile.py", "setup.py")
24
25
26 def default(session):
27 """Default unit test session.
28
29 This is intended to be run **without** an interpreter set, so
30 that the current ``python`` (on the ``PATH``) or the version of
31 Python corresponding to the ``nox`` binary the ``PATH`` can
32 run the tests.
33 """
34 # Install all test dependencies, then install local packages in-place.
35 session.install("mock", "pytest", "pytest-cov", "freezegun")
36 session.install("grpcio")
37 session.install("-e", "test_utils")
38
39 coverage_fail_under = "--cov-fail-under=97"
40
41 # fastparquet is not included in .[all] because, in general, it's redundant
42 # with pyarrow. We still want to run some unit tests with fastparquet
43 # serialization, though.
44 dev_install = ".[all,fastparquet]"
45
46 # There is no pyarrow or fastparquet wheel for Python 3.8.
47 if session.python == "3.8":
48 # Since many tests are skipped due to missing dependencies, test
49 # coverage is much lower in Python 3.8. Remove once we can test with
50 # pyarrow.
51 coverage_fail_under = "--cov-fail-under=91"
52 dev_install = ".[pandas,tqdm]"
53
54 session.install("-e", dev_install)
55
56 # IPython does not support Python 2 after version 5.x
57 if session.python == "2.7":
58 session.install("ipython==5.5")
59 else:
60 session.install("ipython")
61
62 # Run py.test against the unit tests.
63 session.run(
64 "py.test",
65 "--quiet",
66 "--cov=google.cloud.bigquery",
67 "--cov=tests.unit",
68 "--cov-append",
69 "--cov-config=.coveragerc",
70 "--cov-report=",
71 coverage_fail_under,
72 os.path.join("tests", "unit"),
73 *session.posargs,
74 )
75
76
77 @nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"])
78 def unit(session):
79 """Run the unit test suite."""
80 default(session)
81
82
83 @nox.session(python=["2.7", "3.7"])
84 def system(session):
85 """Run the system test suite."""
86
87 # Sanity check: Only run system tests if the environment variable is set.
88 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
89 session.skip("Credentials must be set via environment variable.")
90
91 # Use pre-release gRPC for system tests.
92 session.install("--pre", "grpcio")
93
94 # Install all test dependencies, then install local packages in place.
95 session.install("mock", "pytest", "psutil")
96 session.install("google-cloud-storage")
97 session.install("fastavro")
98 session.install("-e", "test_utils")
99 session.install("-e", ".[all]")
100
101 # IPython does not support Python 2 after version 5.x
102 if session.python == "2.7":
103 session.install("ipython==5.5")
104 else:
105 session.install("ipython")
106
107 # Run py.test against the system tests.
108 session.run(
109 "py.test", "--quiet", os.path.join("tests", "system.py"), *session.posargs
110 )
111
112
113 @nox.session(python=["2.7", "3.7"])
114 def snippets(session):
115 """Run the snippets test suite."""
116
117 # Sanity check: Only run snippets tests if the environment variable is set.
118 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
119 session.skip("Credentials must be set via environment variable.")
120
121 # Install all test dependencies, then install local packages in place.
122 session.install("mock", "pytest")
123 session.install("google-cloud-storage")
124 session.install("grpcio")
125 session.install("-e", "test_utils")
126 session.install("-e", ".[all]")
127
128 # Run py.test against the snippets tests.
129 session.run("py.test", os.path.join("docs", "snippets.py"), *session.posargs)
130 session.run("py.test", "samples", *session.posargs)
131
132
133 @nox.session(python="3.7")
134 def cover(session):
135 """Run the final coverage report.
136
137 This outputs the coverage report aggregating coverage from the unit
138 test runs (not system test runs), and then erases coverage data.
139 """
140 session.install("coverage", "pytest-cov")
141 session.run("coverage", "report", "--show-missing", "--fail-under=100")
142 session.run("coverage", "erase")
143
144
145 @nox.session(python="3.7")
146 def lint(session):
147 """Run linters.
148
149 Returns a failure if the linters find linting errors or sufficiently
150 serious code quality issues.
151 """
152
153 session.install("black", "flake8")
154 session.install("-e", ".")
155 session.run("flake8", os.path.join("google", "cloud", "bigquery"))
156 session.run("flake8", "tests")
157 session.run("flake8", os.path.join("docs", "samples"))
158 session.run("flake8", os.path.join("docs", "snippets.py"))
159 session.run("black", "--check", *BLACK_PATHS)
160
161
162 @nox.session(python="3.7")
163 def lint_setup_py(session):
164 """Verify that setup.py is valid (including RST check)."""
165
166 session.install("docutils", "Pygments")
167 session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
168
169
170 @nox.session(python="3.6")
171 def blacken(session):
172 """Run black.
173 Format code to uniform standard.
174
175 This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
176 That run uses an image that doesn't have 3.6 installed. Before updating this
177 check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
178 """
179 session.install("black")
180 session.run("black", *BLACK_PATHS)
181
182
183 @nox.session(python="3.7")
184 def docs(session):
185 """Build the docs."""
186
187 session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme")
188 session.install("google-cloud-storage")
189 session.install("-e", ".[all]")
190
191 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
192 session.run(
193 "sphinx-build",
194 "-W", # warnings as errors
195 "-T", # show full traceback on exception
196 "-N", # no colors
197 "-b",
198 "html",
199 "-d",
200 os.path.join("docs", "_build", "doctrees", ""),
201 os.path.join("docs", ""),
202 os.path.join("docs", "_build", "html", ""),
203 )
204
```
Path: `setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25 version = "1.24.0"
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 'enum34; python_version < "3.4"',
33 "google-auth >= 1.9.0, < 2.0dev",
34 "google-api-core >= 1.15.0, < 2.0dev",
35 "google-cloud-core >= 1.1.0, < 2.0dev",
36 "google-resumable-media >= 0.5.0, < 0.6dev",
37 "protobuf >= 3.6.0",
38 "six >=1.13.0,< 2.0.0dev",
39 ]
40 extras = {
41 "bqstorage": [
42 "google-cloud-bigquery-storage >= 0.6.0, <2.0.0dev",
43 "pyarrow>=0.16.0, < 2.0dev",
44 ],
45 "pandas": ["pandas>=0.17.1"],
46 # Exclude PyArrow dependency from Windows Python 2.7.
47 'pyarrow: platform_system != "Windows" or python_version >= "3.4"': [
48 # Bad Linux release for 0.14.0.
49 # https://issues.apache.org/jira/browse/ARROW-5868
50 "pyarrow>=0.4.1, != 0.14.0"
51 ],
52 "tqdm": ["tqdm >= 4.0.0, <5.0.0dev"],
53 "fastparquet": ["fastparquet", "python-snappy"],
54 }
55
56 all_extras = []
57
58 for extra in extras:
59 if extra == "fastparquet":
60 # Skip fastparquet from "all" because it is redundant with pyarrow and
61 # creates a dependency on pre-release versions of numpy. See:
62 # https://github.com/googleapis/google-cloud-python/issues/8549
63 continue
64 all_extras.extend(extras[extra])
65
66 extras["all"] = all_extras
67
68 # Setup boilerplate below this line.
69
70 package_root = os.path.abspath(os.path.dirname(__file__))
71
72 readme_filename = os.path.join(package_root, "README.rst")
73 with io.open(readme_filename, encoding="utf-8") as readme_file:
74 readme = readme_file.read()
75
76 # Only include packages under the 'google' namespace. Do not include tests,
77 # benchmarks, etc.
78 packages = [
79 package for package in setuptools.find_packages() if package.startswith("google")
80 ]
81
82 # Determine which namespaces are needed.
83 namespaces = ["google"]
84 if "google.cloud" in packages:
85 namespaces.append("google.cloud")
86
87
88 setuptools.setup(
89 name=name,
90 version=version,
91 description=description,
92 long_description=readme,
93 author="Google LLC",
94 author_email="[email protected]",
95 license="Apache 2.0",
96 url="https://github.com/googleapis/python-bigquery",
97 classifiers=[
98 release_status,
99 "Intended Audience :: Developers",
100 "License :: OSI Approved :: Apache Software License",
101 "Programming Language :: Python",
102 "Programming Language :: Python :: 2",
103 "Programming Language :: Python :: 2.7",
104 "Programming Language :: Python :: 3",
105 "Programming Language :: Python :: 3.5",
106 "Programming Language :: Python :: 3.6",
107 "Programming Language :: Python :: 3.7",
108 "Operating System :: OS Independent",
109 "Topic :: Internet",
110 ],
111 platforms="Posix; MacOS X; Windows",
112 packages=packages,
113 namespace_packages=namespaces,
114 install_requires=dependencies,
115 extras_require=extras,
116 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
117 include_package_data=True,
118 zip_safe=False,
119 )
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -43,14 +43,6 @@
# serialization, though.
dev_install = ".[all,fastparquet]"
- # There is no pyarrow or fastparquet wheel for Python 3.8.
- if session.python == "3.8":
- # Since many tests are skipped due to missing dependencies, test
- # coverage is much lower in Python 3.8. Remove once we can test with
- # pyarrow.
- coverage_fail_under = "--cov-fail-under=91"
- dev_install = ".[pandas,tqdm]"
-
session.install("-e", dev_install)
# IPython does not support Python 2 after version 5.x
@@ -80,7 +72,7 @@
default(session)
[email protected](python=["2.7", "3.7"])
[email protected](python=["2.7", "3.8"])
def system(session):
"""Run the system test suite."""
@@ -110,7 +102,7 @@
)
[email protected](python=["2.7", "3.7"])
[email protected](python=["2.7", "3.8"])
def snippets(session):
"""Run the snippets test suite."""
@@ -130,7 +122,7 @@
session.run("py.test", "samples", *session.posargs)
[email protected](python="3.7")
[email protected](python="3.8")
def cover(session):
"""Run the final coverage report.
@@ -142,7 +134,7 @@
session.run("coverage", "erase")
[email protected](python="3.7")
[email protected](python="3.8")
def lint(session):
"""Run linters.
@@ -159,7 +151,7 @@
session.run("black", "--check", *BLACK_PATHS)
[email protected](python="3.7")
[email protected](python="3.8")
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
@@ -180,7 +172,7 @@
session.run("black", *BLACK_PATHS)
[email protected](python="3.7")
[email protected](python="3.8")
def docs(session):
"""Build the docs."""
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -105,6 +105,7 @@
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Topic :: Internet",
],
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -43,14 +43,6 @@\n # serialization, though.\n dev_install = \".[all,fastparquet]\"\n \n- # There is no pyarrow or fastparquet wheel for Python 3.8.\n- if session.python == \"3.8\":\n- # Since many tests are skipped due to missing dependencies, test\n- # coverage is much lower in Python 3.8. Remove once we can test with\n- # pyarrow.\n- coverage_fail_under = \"--cov-fail-under=91\"\n- dev_install = \".[pandas,tqdm]\"\n-\n session.install(\"-e\", dev_install)\n \n # IPython does not support Python 2 after version 5.x\n@@ -80,7 +72,7 @@\n default(session)\n \n \[email protected](python=[\"2.7\", \"3.7\"])\[email protected](python=[\"2.7\", \"3.8\"])\n def system(session):\n \"\"\"Run the system test suite.\"\"\"\n \n@@ -110,7 +102,7 @@\n )\n \n \[email protected](python=[\"2.7\", \"3.7\"])\[email protected](python=[\"2.7\", \"3.8\"])\n def snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n \n@@ -130,7 +122,7 @@\n session.run(\"py.test\", \"samples\", *session.posargs)\n \n \[email protected](python=\"3.7\")\[email protected](python=\"3.8\")\n def cover(session):\n \"\"\"Run the final coverage report.\n \n@@ -142,7 +134,7 @@\n session.run(\"coverage\", \"erase\")\n \n \[email protected](python=\"3.7\")\[email protected](python=\"3.8\")\n def lint(session):\n \"\"\"Run linters.\n \n@@ -159,7 +151,7 @@\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n \n \[email protected](python=\"3.7\")\[email protected](python=\"3.8\")\n def lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n \n@@ -180,7 +172,7 @@\n session.run(\"black\", *BLACK_PATHS)\n \n \[email protected](python=\"3.7\")\[email protected](python=\"3.8\")\n def docs(session):\n \"\"\"Build the docs.\"\"\"\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -105,6 +105,7 @@\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n", "issue": "BigQuery: test with all optional dependencies in Python 3.8\nBlocked on:\r\n\r\n- Apache Arrow: https://issues.apache.org/jira/browse/ARROW-6920\r\n- fastparquet: https://github.com/dask/fastparquet/issues/468\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\n\nimport nox\n\n\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\n\ndef default(session):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n # Install all test dependencies, then install local packages in-place.\n session.install(\"mock\", \"pytest\", \"pytest-cov\", \"freezegun\")\n session.install(\"grpcio\")\n session.install(\"-e\", \"test_utils\")\n\n coverage_fail_under = \"--cov-fail-under=97\"\n\n # fastparquet is not included in .[all] because, in general, it's redundant\n # with pyarrow. We still want to run some unit tests with fastparquet\n # serialization, though.\n dev_install = \".[all,fastparquet]\"\n\n # There is no pyarrow or fastparquet wheel for Python 3.8.\n if session.python == \"3.8\":\n # Since many tests are skipped due to missing dependencies, test\n # coverage is much lower in Python 3.8. Remove once we can test with\n # pyarrow.\n coverage_fail_under = \"--cov-fail-under=91\"\n dev_install = \".[pandas,tqdm]\"\n\n session.install(\"-e\", dev_install)\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n coverage_fail_under,\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=[\"2.7\", \"3.7\"])\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"psutil\")\n session.install(\"google-cloud-storage\")\n session.install(\"fastavro\")\n session.install(\"-e\", \"test_utils\")\n session.install(\"-e\", \".[all]\")\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # Run py.test against the system tests.\n session.run(\n \"py.test\", \"--quiet\", os.path.join(\"tests\", \"system.py\"), *session.posargs\n )\n\n\[email protected](python=[\"2.7\", \"3.7\"])\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\")\n session.install(\"google-cloud-storage\")\n session.install(\"grpcio\")\n session.install(\"-e\", \"test_utils\")\n session.install(\"-e\", \".[all]\")\n\n # Run py.test against the snippets tests.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\"py.test\", \"samples\", *session.posargs)\n\n\[email protected](python=\"3.7\")\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=\"3.7\")\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"black\", \"flake8\")\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=\"3.7\")\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(\"black\")\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=\"3.7\")\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}, {"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\nversion = \"1.24.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n 'enum34; python_version < \"3.4\"',\n \"google-auth >= 1.9.0, < 2.0dev\",\n \"google-api-core >= 1.15.0, < 2.0dev\",\n \"google-cloud-core >= 1.1.0, < 2.0dev\",\n \"google-resumable-media >= 0.5.0, < 0.6dev\",\n \"protobuf >= 3.6.0\",\n \"six >=1.13.0,< 2.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 0.6.0, <2.0.0dev\",\n \"pyarrow>=0.16.0, < 2.0dev\",\n ],\n \"pandas\": [\"pandas>=0.17.1\"],\n # Exclude PyArrow dependency from Windows Python 2.7.\n 'pyarrow: platform_system != \"Windows\" or python_version >= \"3.4\"': [\n # Bad Linux release for 0.14.0.\n # https://issues.apache.org/jira/browse/ARROW-5868\n \"pyarrow>=0.4.1, != 0.14.0\"\n ],\n \"tqdm\": [\"tqdm >= 4.0.0, <5.0.0dev\"],\n \"fastparquet\": [\"fastparquet\", \"python-snappy\"],\n}\n\nall_extras = []\n\nfor extra in extras:\n if extra == \"fastparquet\":\n # Skip fastparquet from \"all\" because it is redundant with pyarrow and\n # creates a dependency on pre-release versions of numpy. See:\n # https://github.com/googleapis/google-cloud-python/issues/8549\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\n\nimport nox\n\n\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\n\ndef default(session):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n # Install all test dependencies, then install local packages in-place.\n session.install(\"mock\", \"pytest\", \"pytest-cov\", \"freezegun\")\n session.install(\"grpcio\")\n session.install(\"-e\", \"test_utils\")\n\n coverage_fail_under = \"--cov-fail-under=97\"\n\n # fastparquet is not included in .[all] because, in general, it's redundant\n # with pyarrow. We still want to run some unit tests with fastparquet\n # serialization, though.\n dev_install = \".[all,fastparquet]\"\n\n session.install(\"-e\", dev_install)\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n coverage_fail_under,\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=[\"2.7\", \"3.8\"])\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"psutil\")\n session.install(\"google-cloud-storage\")\n session.install(\"fastavro\")\n session.install(\"-e\", \"test_utils\")\n session.install(\"-e\", \".[all]\")\n\n # IPython does not support Python 2 after version 5.x\n if session.python == \"2.7\":\n session.install(\"ipython==5.5\")\n else:\n session.install(\"ipython\")\n\n # Run py.test against the system tests.\n session.run(\n \"py.test\", \"--quiet\", os.path.join(\"tests\", \"system.py\"), *session.posargs\n )\n\n\[email protected](python=[\"2.7\", \"3.8\"])\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\")\n session.install(\"google-cloud-storage\")\n session.install(\"grpcio\")\n session.install(\"-e\", \"test_utils\")\n session.install(\"-e\", \".[all]\")\n\n # Run py.test against the snippets tests.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\"py.test\", \"samples\", *session.posargs)\n\n\[email protected](python=\"3.8\")\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=\"3.8\")\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"black\", \"flake8\")\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(\"black\")\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}, {"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\nversion = \"1.24.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n 'enum34; python_version < \"3.4\"',\n \"google-auth >= 1.9.0, < 2.0dev\",\n \"google-api-core >= 1.15.0, < 2.0dev\",\n \"google-cloud-core >= 1.1.0, < 2.0dev\",\n \"google-resumable-media >= 0.5.0, < 0.6dev\",\n \"protobuf >= 3.6.0\",\n \"six >=1.13.0,< 2.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 0.6.0, <2.0.0dev\",\n \"pyarrow>=0.16.0, < 2.0dev\",\n ],\n \"pandas\": [\"pandas>=0.17.1\"],\n # Exclude PyArrow dependency from Windows Python 2.7.\n 'pyarrow: platform_system != \"Windows\" or python_version >= \"3.4\"': [\n # Bad Linux release for 0.14.0.\n # https://issues.apache.org/jira/browse/ARROW-5868\n \"pyarrow>=0.4.1, != 0.14.0\"\n ],\n \"tqdm\": [\"tqdm >= 4.0.0, <5.0.0dev\"],\n \"fastparquet\": [\"fastparquet\", \"python-snappy\"],\n}\n\nall_extras = []\n\nfor extra in extras:\n if extra == \"fastparquet\":\n # Skip fastparquet from \"all\" because it is redundant with pyarrow and\n # creates a dependency on pre-release versions of numpy. See:\n # https://github.com/googleapis/google-cloud-python/issues/8549\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 3,810 | 665 |
gh_patches_debug_4934 | rasdani/github-patches | git_diff | google__mobly-472 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Log the contents of config file at the debug level early
This helps in debugging remote user's malformed json/yaml or configs that don't adhere to schema.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mobly/config_parser.py`
Content:
```
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from builtins import str
16
17 import copy
18 import io
19 import os
20 import yaml
21
22 from mobly import keys
23 from mobly import utils
24
25 # An environment variable defining the base location for Mobly logs.
26 ENV_MOBLY_LOGPATH = 'MOBLY_LOGPATH'
27 _DEFAULT_LOG_PATH = '/tmp/logs/mobly/'
28
29
30 class MoblyConfigError(Exception):
31 """Raised when there is a problem in test configuration file."""
32
33
34 def _validate_test_config(test_config):
35 """Validates the raw configuration loaded from the config file.
36
37 Making sure the required key 'TestBeds' is present.
38 """
39 required_key = keys.Config.key_testbed.value
40 if required_key not in test_config:
41 raise MoblyConfigError(
42 'Required key %s missing in test config.' % required_key)
43
44
45 def _validate_testbed_name(name):
46 """Validates the name of a test bed.
47
48 Since test bed names are used as part of the test run id, it needs to meet
49 certain requirements.
50
51 Args:
52 name: The test bed's name specified in config file.
53
54 Raises:
55 MoblyConfigError: The name does not meet any criteria.
56 """
57 if not name:
58 raise MoblyConfigError("Test bed names can't be empty.")
59 name = str(name)
60 for char in name:
61 if char not in utils.valid_filename_chars:
62 raise MoblyConfigError(
63 'Char "%s" is not allowed in test bed names.' % char)
64
65
66 def _validate_testbed_configs(testbed_configs):
67 """Validates the testbed configurations.
68
69 Args:
70 testbed_configs: A list of testbed configuration dicts.
71
72 Raises:
73 MoblyConfigError: Some parts of the configuration is invalid.
74 """
75 seen_names = set()
76 # Cross checks testbed configs for resource conflicts.
77 for config in testbed_configs:
78 # Check for conflicts between multiple concurrent testbed configs.
79 # No need to call it if there's only one testbed config.
80 name = config[keys.Config.key_testbed_name.value]
81 _validate_testbed_name(name)
82 # Test bed names should be unique.
83 if name in seen_names:
84 raise MoblyConfigError('Duplicate testbed name %s found.' % name)
85 seen_names.add(name)
86
87
88 def load_test_config_file(test_config_path, tb_filters=None):
89 """Processes the test configuration file provied by user.
90
91 Loads the configuration file into a dict, unpacks each testbed
92 config into its own dict, and validate the configuration in the
93 process.
94
95 Args:
96 test_config_path: Path to the test configuration file.
97 tb_filters: A subset of test bed names to be pulled from the config
98 file. If None, then all test beds will be selected.
99
100 Returns:
101 A list of test configuration dicts to be passed to
102 test_runner.TestRunner.
103 """
104 configs = _load_config_file(test_config_path)
105 if tb_filters:
106 tbs = []
107 for tb in configs[keys.Config.key_testbed.value]:
108 if tb[keys.Config.key_testbed_name.value] in tb_filters:
109 tbs.append(tb)
110 if len(tbs) != len(tb_filters):
111 raise MoblyConfigError(
112 'Expect to find %d test bed configs, found %d. Check if'
113 ' you have the correct test bed names.' % (len(tb_filters),
114 len(tbs)))
115 configs[keys.Config.key_testbed.value] = tbs
116 mobly_params = configs.get(keys.Config.key_mobly_params.value, {})
117 # Decide log path.
118 log_path = mobly_params.get(keys.Config.key_log_path.value,
119 _DEFAULT_LOG_PATH)
120 if ENV_MOBLY_LOGPATH in os.environ:
121 log_path = os.environ[ENV_MOBLY_LOGPATH]
122 log_path = utils.abs_path(log_path)
123 # Validate configs
124 _validate_test_config(configs)
125 _validate_testbed_configs(configs[keys.Config.key_testbed.value])
126 # Transform config dict from user-facing key mapping to internal config object.
127 test_configs = []
128 for original_bed_config in configs[keys.Config.key_testbed.value]:
129 test_run_config = TestRunConfig()
130 test_run_config.test_bed_name = original_bed_config[
131 keys.Config.key_testbed_name.value]
132 test_run_config.log_path = log_path
133 test_run_config.controller_configs = original_bed_config.get(
134 keys.Config.key_testbed_controllers.value, {})
135 test_run_config.user_params = original_bed_config.get(
136 keys.Config.key_testbed_test_params.value, {})
137 test_configs.append(test_run_config)
138 return test_configs
139
140
141 def _load_config_file(path):
142 """Loads a test config file.
143
144 The test config file has to be in YAML format.
145
146 Args:
147 path: A string that is the full path to the config file, including the
148 file name.
149
150 Returns:
151 A dict that represents info in the config file.
152 """
153 with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f:
154 conf = yaml.load(f)
155 return conf
156
157
158 class TestRunConfig(object):
159 """The data class that holds all the information needed for a test run.
160
161 Attributes:
162 log_path: string, specifies the root directory for all logs written by
163 a test run.
164 test_bed_name: string, the name of the test bed used by a test run.
165 controller_configs: dict, configs used for instantiating controller
166 objects.
167 user_params: dict, all the parameters to be consumed by the test logic.
168 register_controller: func, used by test classes to register controller
169 modules.
170 summary_writer: records.TestSummaryWriter, used to write elements to
171 the test result summary file.
172 test_class_name_suffix: string, suffix to append to the class name for
173 reporting. This is used for differentiating the same class
174 executed with different parameters in a suite.
175 """
176
177 def __init__(self):
178 self.log_path = None
179 self.test_bed_name = None
180 self.controller_configs = None
181 self.user_params = None
182 self.register_controller = None
183 self.summary_writer = None
184 self.test_class_name_suffix = None
185
186 def copy(self):
187 """Returns a deep copy of the current config.
188 """
189 return copy.deepcopy(self)
190
191 def __str__(self):
192 return str(self.__dict__)
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mobly/config_parser.py b/mobly/config_parser.py
--- a/mobly/config_parser.py
+++ b/mobly/config_parser.py
@@ -16,6 +16,7 @@
import copy
import io
+import pprint
import os
import yaml
@@ -189,4 +190,7 @@
return copy.deepcopy(self)
def __str__(self):
- return str(self.__dict__)
+ content = dict(self.__dict__)
+ content.pop('summary_writer')
+ content.pop('register_controller')
+ return pprint.pformat(content)
| {"golden_diff": "diff --git a/mobly/config_parser.py b/mobly/config_parser.py\n--- a/mobly/config_parser.py\n+++ b/mobly/config_parser.py\n@@ -16,6 +16,7 @@\n \n import copy\n import io\n+import pprint\n import os\n import yaml\n \n@@ -189,4 +190,7 @@\n return copy.deepcopy(self)\n \n def __str__(self):\n- return str(self.__dict__)\n+ content = dict(self.__dict__)\n+ content.pop('summary_writer')\n+ content.pop('register_controller')\n+ return pprint.pformat(content)\n", "issue": "Log the contents of config file at the debug level early\nThis helps in debugging remote user's malformed json/yaml or configs that don't adhere to schema.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom builtins import str\n\nimport copy\nimport io\nimport os\nimport yaml\n\nfrom mobly import keys\nfrom mobly import utils\n\n# An environment variable defining the base location for Mobly logs.\nENV_MOBLY_LOGPATH = 'MOBLY_LOGPATH'\n_DEFAULT_LOG_PATH = '/tmp/logs/mobly/'\n\n\nclass MoblyConfigError(Exception):\n \"\"\"Raised when there is a problem in test configuration file.\"\"\"\n\n\ndef _validate_test_config(test_config):\n \"\"\"Validates the raw configuration loaded from the config file.\n\n Making sure the required key 'TestBeds' is present.\n \"\"\"\n required_key = keys.Config.key_testbed.value\n if required_key not in test_config:\n raise MoblyConfigError(\n 'Required key %s missing in test config.' % required_key)\n\n\ndef _validate_testbed_name(name):\n \"\"\"Validates the name of a test bed.\n\n Since test bed names are used as part of the test run id, it needs to meet\n certain requirements.\n\n Args:\n name: The test bed's name specified in config file.\n\n Raises:\n MoblyConfigError: The name does not meet any criteria.\n \"\"\"\n if not name:\n raise MoblyConfigError(\"Test bed names can't be empty.\")\n name = str(name)\n for char in name:\n if char not in utils.valid_filename_chars:\n raise MoblyConfigError(\n 'Char \"%s\" is not allowed in test bed names.' % char)\n\n\ndef _validate_testbed_configs(testbed_configs):\n \"\"\"Validates the testbed configurations.\n\n Args:\n testbed_configs: A list of testbed configuration dicts.\n\n Raises:\n MoblyConfigError: Some parts of the configuration is invalid.\n \"\"\"\n seen_names = set()\n # Cross checks testbed configs for resource conflicts.\n for config in testbed_configs:\n # Check for conflicts between multiple concurrent testbed configs.\n # No need to call it if there's only one testbed config.\n name = config[keys.Config.key_testbed_name.value]\n _validate_testbed_name(name)\n # Test bed names should be unique.\n if name in seen_names:\n raise MoblyConfigError('Duplicate testbed name %s found.' % name)\n seen_names.add(name)\n\n\ndef load_test_config_file(test_config_path, tb_filters=None):\n \"\"\"Processes the test configuration file provied by user.\n\n Loads the configuration file into a dict, unpacks each testbed\n config into its own dict, and validate the configuration in the\n process.\n\n Args:\n test_config_path: Path to the test configuration file.\n tb_filters: A subset of test bed names to be pulled from the config\n file. If None, then all test beds will be selected.\n\n Returns:\n A list of test configuration dicts to be passed to\n test_runner.TestRunner.\n \"\"\"\n configs = _load_config_file(test_config_path)\n if tb_filters:\n tbs = []\n for tb in configs[keys.Config.key_testbed.value]:\n if tb[keys.Config.key_testbed_name.value] in tb_filters:\n tbs.append(tb)\n if len(tbs) != len(tb_filters):\n raise MoblyConfigError(\n 'Expect to find %d test bed configs, found %d. Check if'\n ' you have the correct test bed names.' % (len(tb_filters),\n len(tbs)))\n configs[keys.Config.key_testbed.value] = tbs\n mobly_params = configs.get(keys.Config.key_mobly_params.value, {})\n # Decide log path.\n log_path = mobly_params.get(keys.Config.key_log_path.value,\n _DEFAULT_LOG_PATH)\n if ENV_MOBLY_LOGPATH in os.environ:\n log_path = os.environ[ENV_MOBLY_LOGPATH]\n log_path = utils.abs_path(log_path)\n # Validate configs\n _validate_test_config(configs)\n _validate_testbed_configs(configs[keys.Config.key_testbed.value])\n # Transform config dict from user-facing key mapping to internal config object.\n test_configs = []\n for original_bed_config in configs[keys.Config.key_testbed.value]:\n test_run_config = TestRunConfig()\n test_run_config.test_bed_name = original_bed_config[\n keys.Config.key_testbed_name.value]\n test_run_config.log_path = log_path\n test_run_config.controller_configs = original_bed_config.get(\n keys.Config.key_testbed_controllers.value, {})\n test_run_config.user_params = original_bed_config.get(\n keys.Config.key_testbed_test_params.value, {})\n test_configs.append(test_run_config)\n return test_configs\n\n\ndef _load_config_file(path):\n \"\"\"Loads a test config file.\n\n The test config file has to be in YAML format.\n\n Args:\n path: A string that is the full path to the config file, including the\n file name.\n\n Returns:\n A dict that represents info in the config file.\n \"\"\"\n with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f:\n conf = yaml.load(f)\n return conf\n\n\nclass TestRunConfig(object):\n \"\"\"The data class that holds all the information needed for a test run.\n\n Attributes:\n log_path: string, specifies the root directory for all logs written by\n a test run.\n test_bed_name: string, the name of the test bed used by a test run.\n controller_configs: dict, configs used for instantiating controller\n objects.\n user_params: dict, all the parameters to be consumed by the test logic.\n register_controller: func, used by test classes to register controller\n modules.\n summary_writer: records.TestSummaryWriter, used to write elements to\n the test result summary file.\n test_class_name_suffix: string, suffix to append to the class name for\n reporting. This is used for differentiating the same class\n executed with different parameters in a suite.\n \"\"\"\n\n def __init__(self):\n self.log_path = None\n self.test_bed_name = None\n self.controller_configs = None\n self.user_params = None\n self.register_controller = None\n self.summary_writer = None\n self.test_class_name_suffix = None\n\n def copy(self):\n \"\"\"Returns a deep copy of the current config.\n \"\"\"\n return copy.deepcopy(self)\n\n def __str__(self):\n return str(self.__dict__)\n", "path": "mobly/config_parser.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom builtins import str\n\nimport copy\nimport io\nimport pprint\nimport os\nimport yaml\n\nfrom mobly import keys\nfrom mobly import utils\n\n# An environment variable defining the base location for Mobly logs.\nENV_MOBLY_LOGPATH = 'MOBLY_LOGPATH'\n_DEFAULT_LOG_PATH = '/tmp/logs/mobly/'\n\n\nclass MoblyConfigError(Exception):\n \"\"\"Raised when there is a problem in test configuration file.\"\"\"\n\n\ndef _validate_test_config(test_config):\n \"\"\"Validates the raw configuration loaded from the config file.\n\n Making sure the required key 'TestBeds' is present.\n \"\"\"\n required_key = keys.Config.key_testbed.value\n if required_key not in test_config:\n raise MoblyConfigError(\n 'Required key %s missing in test config.' % required_key)\n\n\ndef _validate_testbed_name(name):\n \"\"\"Validates the name of a test bed.\n\n Since test bed names are used as part of the test run id, it needs to meet\n certain requirements.\n\n Args:\n name: The test bed's name specified in config file.\n\n Raises:\n MoblyConfigError: The name does not meet any criteria.\n \"\"\"\n if not name:\n raise MoblyConfigError(\"Test bed names can't be empty.\")\n name = str(name)\n for char in name:\n if char not in utils.valid_filename_chars:\n raise MoblyConfigError(\n 'Char \"%s\" is not allowed in test bed names.' % char)\n\n\ndef _validate_testbed_configs(testbed_configs):\n \"\"\"Validates the testbed configurations.\n\n Args:\n testbed_configs: A list of testbed configuration dicts.\n\n Raises:\n MoblyConfigError: Some parts of the configuration is invalid.\n \"\"\"\n seen_names = set()\n # Cross checks testbed configs for resource conflicts.\n for config in testbed_configs:\n # Check for conflicts between multiple concurrent testbed configs.\n # No need to call it if there's only one testbed config.\n name = config[keys.Config.key_testbed_name.value]\n _validate_testbed_name(name)\n # Test bed names should be unique.\n if name in seen_names:\n raise MoblyConfigError('Duplicate testbed name %s found.' % name)\n seen_names.add(name)\n\n\ndef load_test_config_file(test_config_path, tb_filters=None):\n \"\"\"Processes the test configuration file provied by user.\n\n Loads the configuration file into a dict, unpacks each testbed\n config into its own dict, and validate the configuration in the\n process.\n\n Args:\n test_config_path: Path to the test configuration file.\n tb_filters: A subset of test bed names to be pulled from the config\n file. If None, then all test beds will be selected.\n\n Returns:\n A list of test configuration dicts to be passed to\n test_runner.TestRunner.\n \"\"\"\n configs = _load_config_file(test_config_path)\n if tb_filters:\n tbs = []\n for tb in configs[keys.Config.key_testbed.value]:\n if tb[keys.Config.key_testbed_name.value] in tb_filters:\n tbs.append(tb)\n if len(tbs) != len(tb_filters):\n raise MoblyConfigError(\n 'Expect to find %d test bed configs, found %d. Check if'\n ' you have the correct test bed names.' % (len(tb_filters),\n len(tbs)))\n configs[keys.Config.key_testbed.value] = tbs\n mobly_params = configs.get(keys.Config.key_mobly_params.value, {})\n # Decide log path.\n log_path = mobly_params.get(keys.Config.key_log_path.value,\n _DEFAULT_LOG_PATH)\n if ENV_MOBLY_LOGPATH in os.environ:\n log_path = os.environ[ENV_MOBLY_LOGPATH]\n log_path = utils.abs_path(log_path)\n # Validate configs\n _validate_test_config(configs)\n _validate_testbed_configs(configs[keys.Config.key_testbed.value])\n # Transform config dict from user-facing key mapping to internal config object.\n test_configs = []\n for original_bed_config in configs[keys.Config.key_testbed.value]:\n test_run_config = TestRunConfig()\n test_run_config.test_bed_name = original_bed_config[\n keys.Config.key_testbed_name.value]\n test_run_config.log_path = log_path\n test_run_config.controller_configs = original_bed_config.get(\n keys.Config.key_testbed_controllers.value, {})\n test_run_config.user_params = original_bed_config.get(\n keys.Config.key_testbed_test_params.value, {})\n test_configs.append(test_run_config)\n return test_configs\n\n\ndef _load_config_file(path):\n \"\"\"Loads a test config file.\n\n The test config file has to be in YAML format.\n\n Args:\n path: A string that is the full path to the config file, including the\n file name.\n\n Returns:\n A dict that represents info in the config file.\n \"\"\"\n with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f:\n conf = yaml.load(f)\n return conf\n\n\nclass TestRunConfig(object):\n \"\"\"The data class that holds all the information needed for a test run.\n\n Attributes:\n log_path: string, specifies the root directory for all logs written by\n a test run.\n test_bed_name: string, the name of the test bed used by a test run.\n controller_configs: dict, configs used for instantiating controller\n objects.\n user_params: dict, all the parameters to be consumed by the test logic.\n register_controller: func, used by test classes to register controller\n modules.\n summary_writer: records.TestSummaryWriter, used to write elements to\n the test result summary file.\n test_class_name_suffix: string, suffix to append to the class name for\n reporting. This is used for differentiating the same class\n executed with different parameters in a suite.\n \"\"\"\n\n def __init__(self):\n self.log_path = None\n self.test_bed_name = None\n self.controller_configs = None\n self.user_params = None\n self.register_controller = None\n self.summary_writer = None\n self.test_class_name_suffix = None\n\n def copy(self):\n \"\"\"Returns a deep copy of the current config.\n \"\"\"\n return copy.deepcopy(self)\n\n def __str__(self):\n content = dict(self.__dict__)\n content.pop('summary_writer')\n content.pop('register_controller')\n return pprint.pformat(content)\n", "path": "mobly/config_parser.py"}]} | 2,270 | 134 |
gh_patches_debug_39339 | rasdani/github-patches | git_diff | wagtail__wagtail-10545 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace `total_ordering` usage with comparison functions implementation
### Is your proposal related to a problem?
We have two instances of `total_ordering` usage within the codebase:
https://github.com/wagtail/wagtail/blob/cd5200c8e1ac0d7299fd9c398b2b994606b3c7d2/wagtail/admin/search.py#L12-L13
https://github.com/wagtail/wagtail/blob/cd5200c8e1ac0d7299fd9c398b2b994606b3c7d2/wagtail/admin/widgets/button.py#L11-L12
Even though it's convenient, `total_ordering` is known to be slow. According to [Python's docs](https://docs.python.org/3/library/functools.html#functools.total_ordering):
> **Note**
> While this decorator makes it easy to create well behaved totally ordered types, it does come at the cost of slower execution and more complex stack traces for the derived comparison methods. If performance benchmarking indicates this is a bottleneck for a given application, implementing all six rich comparison methods instead is likely to provide an easy speed boost.
Django recently removed their usage of `total_ordering` in https://github.com/django/django/pull/16958/commits/ee36e101e8f8c0acde4bb148b738ab7034e902a0 (probably not all usages, I haven't checked).
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
Replace `total_ordering` with implementations of `__eq__()`, `__ne__()`, `__lt__()`, `__le__()`, `__gt__()`, and `__ge__()`.
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
Keep using `total_ordering`
### Additional context
I found this while fixing an incorrect import of `total_ordering` in #10525.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/widgets/button.py`
Content:
```
1 from functools import total_ordering
2
3 from django.forms.utils import flatatt
4 from django.template.loader import render_to_string
5 from django.utils.functional import cached_property
6 from django.utils.html import format_html
7
8 from wagtail import hooks
9
10
11 @total_ordering
12 class Button:
13 show = True
14
15 def __init__(
16 self, label, url, classes=set(), icon_name=None, attrs={}, priority=1000
17 ):
18 self.label = label
19 self.url = url
20 self.classes = classes
21 self.icon_name = icon_name
22 self.attrs = attrs.copy()
23 self.priority = priority
24
25 def render(self):
26 attrs = {
27 "href": self.url,
28 "class": " ".join(sorted(self.classes)),
29 "title": self.label,
30 }
31 attrs.update(self.attrs)
32 return format_html("<a{}>{}</a>", flatatt(attrs), self.label)
33
34 def __str__(self):
35 return self.render()
36
37 def __repr__(self):
38 return f"<Button: {self.label}>"
39
40 def __lt__(self, other):
41 if not isinstance(other, Button):
42 return NotImplemented
43 return (self.priority, self.label) < (other.priority, other.label)
44
45 def __eq__(self, other):
46 if not isinstance(other, Button):
47 return NotImplemented
48 return (
49 self.label == other.label
50 and self.url == other.url
51 and self.classes == other.classes
52 and self.attrs == other.attrs
53 and self.priority == other.priority
54 )
55
56
57 # Base class for all listing buttons
58 # This is also used by SnippetListingButton defined in wagtail.snippets.widgets
59 class ListingButton(Button):
60 def __init__(self, label, url, classes=set(), **kwargs):
61 classes = {"button", "button-small", "button-secondary"} | set(classes)
62 super().__init__(label, url, classes=classes, **kwargs)
63
64
65 class PageListingButton(ListingButton):
66 pass
67
68
69 class BaseDropdownMenuButton(Button):
70 def __init__(self, *args, **kwargs):
71 super().__init__(*args, url=None, **kwargs)
72
73 @cached_property
74 def dropdown_buttons(self):
75 raise NotImplementedError
76
77 def get_context_data(self):
78 return {
79 "buttons": self.dropdown_buttons,
80 "label": self.label,
81 "title": self.attrs.get("title"),
82 "classes": self.classes,
83 }
84
85 def render(self):
86 return render_to_string(self.template_name, self.get_context_data())
87
88
89 class ButtonWithDropdown(BaseDropdownMenuButton):
90 template_name = "wagtailadmin/pages/listing/_button_with_dropdown.html"
91
92 def __init__(self, *args, **kwargs):
93 self.button_classes = kwargs.pop("button_classes", set())
94 self.buttons_data = kwargs.pop("buttons_data", [])
95 super().__init__(*args, **kwargs)
96
97 def get_context_data(self):
98 context = super().get_context_data()
99 context["button_classes"] = self.button_classes
100 return context
101
102 @cached_property
103 def dropdown_buttons(self):
104 return [Button(**button) for button in self.buttons_data]
105
106
107 class ButtonWithDropdownFromHook(BaseDropdownMenuButton):
108 template_name = "wagtailadmin/pages/listing/_button_with_dropdown.html"
109
110 def __init__(self, label, hook_name, page, page_perms, next_url=None, **kwargs):
111 self.hook_name = hook_name
112 self.page = page
113 self.page_perms = page_perms
114 self.next_url = next_url
115
116 super().__init__(label, **kwargs)
117
118 @property
119 def show(self):
120 return bool(self.dropdown_buttons)
121
122 @cached_property
123 def dropdown_buttons(self):
124 button_hooks = hooks.get_hooks(self.hook_name)
125
126 buttons = []
127 for hook in button_hooks:
128 buttons.extend(hook(self.page, self.page_perms, self.next_url))
129
130 buttons.sort()
131 return buttons
132
```
Path: `wagtail/admin/search.py`
Content:
```
1 from functools import total_ordering
2
3 from django.forms import Media, MediaDefiningClass
4 from django.forms.utils import flatatt
5 from django.template.loader import render_to_string
6 from django.utils.functional import cached_property
7 from django.utils.safestring import mark_safe
8 from django.utils.text import slugify
9
10 from wagtail import hooks
11 from wagtail.admin.forms.search import SearchForm
12
13
14 @total_ordering
15 class SearchArea(metaclass=MediaDefiningClass):
16 template = "wagtailadmin/shared/search_area.html"
17
18 def __init__(
19 self, label, url, name=None, classnames="", icon_name="", attrs=None, order=1000
20 ):
21 self.label = label
22 self.url = url
23 self.classnames = classnames
24 self.icon_name = icon_name
25 self.name = name or slugify(str(label))
26 self.order = order
27
28 if attrs:
29 self.attr_string = flatatt(attrs)
30 else:
31 self.attr_string = ""
32
33 def __lt__(self, other):
34 return (self.order, self.label) < (other.order, other.label)
35
36 def __eq__(self, other):
37 return (self.order, self.label) == (other.order, other.label)
38
39 def is_shown(self, request):
40 """
41 Whether this search area should be shown for the given request; permission
42 checks etc should go here. By default, search areas are shown all the time
43 """
44 return True
45
46 def is_active(self, request, current=None):
47 if current is None:
48 return request.path.startswith(self.url)
49 else:
50 return self.name == current
51
52 def render_html(self, request, query, current=None):
53 return render_to_string(
54 self.template,
55 {
56 "name": self.name,
57 "url": self.url,
58 "classnames": self.classnames,
59 "icon_name": self.icon_name,
60 "attr_string": self.attr_string,
61 "label": self.label,
62 "active": self.is_active(request, current),
63 "query_string": query,
64 },
65 request=request,
66 )
67
68
69 class Search:
70 def __init__(self, register_hook_name, construct_hook_name=None):
71 self.register_hook_name = register_hook_name
72 self.construct_hook_name = construct_hook_name
73
74 @cached_property
75 def registered_search_areas(self):
76 return sorted([fn() for fn in hooks.get_hooks(self.register_hook_name)])
77
78 def search_items_for_request(self, request):
79 return [item for item in self.registered_search_areas if item.is_shown(request)]
80
81 def active_search(self, request, current=None):
82 return [
83 item
84 for item in self.search_items_for_request(request)
85 if item.is_active(request, current)
86 ]
87
88 @property
89 def media(self):
90 media = Media()
91 for item in self.registered_search_areas:
92 media += item.media
93 return media
94
95 def render_html(self, request, current=None):
96 search_areas = self.search_items_for_request(request)
97
98 # Get query parameter
99 form = SearchForm(request.GET)
100 query = ""
101 if form.is_valid():
102 query = form.cleaned_data["q"]
103
104 # provide a hook for modifying the search area, if construct_hook_name has been set
105 if self.construct_hook_name:
106 for fn in hooks.get_hooks(self.construct_hook_name):
107 fn(request, search_areas)
108
109 rendered_search_areas = []
110 for item in search_areas:
111 rendered_search_areas.append(item.render_html(request, query, current))
112
113 return mark_safe("".join(rendered_search_areas))
114
115
116 admin_search_areas = Search(
117 register_hook_name="register_admin_search_area",
118 construct_hook_name="construct_search",
119 )
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/admin/search.py b/wagtail/admin/search.py
--- a/wagtail/admin/search.py
+++ b/wagtail/admin/search.py
@@ -1,5 +1,3 @@
-from functools import total_ordering
-
from django.forms import Media, MediaDefiningClass
from django.forms.utils import flatatt
from django.template.loader import render_to_string
@@ -11,7 +9,6 @@
from wagtail.admin.forms.search import SearchForm
-@total_ordering
class SearchArea(metaclass=MediaDefiningClass):
template = "wagtailadmin/shared/search_area.html"
@@ -31,9 +28,28 @@
self.attr_string = ""
def __lt__(self, other):
+ if not isinstance(other, SearchArea):
+ return NotImplemented
return (self.order, self.label) < (other.order, other.label)
+ def __le__(self, other):
+ if not isinstance(other, SearchArea):
+ return NotImplemented
+ return (self.order, self.label) <= (other.order, other.label)
+
+ def __gt__(self, other):
+ if not isinstance(other, SearchArea):
+ return NotImplemented
+ return (self.order, self.label) > (other.order, other.label)
+
+ def __ge__(self, other):
+ if not isinstance(other, SearchArea):
+ return NotImplemented
+ return (self.order, self.label) >= (other.order, other.label)
+
def __eq__(self, other):
+ if not isinstance(other, SearchArea):
+ return NotImplemented
return (self.order, self.label) == (other.order, other.label)
def is_shown(self, request):
diff --git a/wagtail/admin/widgets/button.py b/wagtail/admin/widgets/button.py
--- a/wagtail/admin/widgets/button.py
+++ b/wagtail/admin/widgets/button.py
@@ -1,5 +1,3 @@
-from functools import total_ordering
-
from django.forms.utils import flatatt
from django.template.loader import render_to_string
from django.utils.functional import cached_property
@@ -8,7 +6,6 @@
from wagtail import hooks
-@total_ordering
class Button:
show = True
@@ -42,6 +39,21 @@
return NotImplemented
return (self.priority, self.label) < (other.priority, other.label)
+ def __le__(self, other):
+ if not isinstance(other, Button):
+ return NotImplemented
+ return (self.priority, self.label) <= (other.priority, other.label)
+
+ def __gt__(self, other):
+ if not isinstance(other, Button):
+ return NotImplemented
+ return (self.priority, self.label) > (other.priority, other.label)
+
+ def __ge__(self, other):
+ if not isinstance(other, Button):
+ return NotImplemented
+ return (self.priority, self.label) >= (other.priority, other.label)
+
def __eq__(self, other):
if not isinstance(other, Button):
return NotImplemented
| {"golden_diff": "diff --git a/wagtail/admin/search.py b/wagtail/admin/search.py\n--- a/wagtail/admin/search.py\n+++ b/wagtail/admin/search.py\n@@ -1,5 +1,3 @@\n-from functools import total_ordering\n-\n from django.forms import Media, MediaDefiningClass\n from django.forms.utils import flatatt\n from django.template.loader import render_to_string\n@@ -11,7 +9,6 @@\n from wagtail.admin.forms.search import SearchForm\n \n \n-@total_ordering\n class SearchArea(metaclass=MediaDefiningClass):\n template = \"wagtailadmin/shared/search_area.html\"\n \n@@ -31,9 +28,28 @@\n self.attr_string = \"\"\n \n def __lt__(self, other):\n+ if not isinstance(other, SearchArea):\n+ return NotImplemented\n return (self.order, self.label) < (other.order, other.label)\n \n+ def __le__(self, other):\n+ if not isinstance(other, SearchArea):\n+ return NotImplemented\n+ return (self.order, self.label) <= (other.order, other.label)\n+\n+ def __gt__(self, other):\n+ if not isinstance(other, SearchArea):\n+ return NotImplemented\n+ return (self.order, self.label) > (other.order, other.label)\n+\n+ def __ge__(self, other):\n+ if not isinstance(other, SearchArea):\n+ return NotImplemented\n+ return (self.order, self.label) >= (other.order, other.label)\n+\n def __eq__(self, other):\n+ if not isinstance(other, SearchArea):\n+ return NotImplemented\n return (self.order, self.label) == (other.order, other.label)\n \n def is_shown(self, request):\ndiff --git a/wagtail/admin/widgets/button.py b/wagtail/admin/widgets/button.py\n--- a/wagtail/admin/widgets/button.py\n+++ b/wagtail/admin/widgets/button.py\n@@ -1,5 +1,3 @@\n-from functools import total_ordering\n-\n from django.forms.utils import flatatt\n from django.template.loader import render_to_string\n from django.utils.functional import cached_property\n@@ -8,7 +6,6 @@\n from wagtail import hooks\n \n \n-@total_ordering\n class Button:\n show = True\n \n@@ -42,6 +39,21 @@\n return NotImplemented\n return (self.priority, self.label) < (other.priority, other.label)\n \n+ def __le__(self, other):\n+ if not isinstance(other, Button):\n+ return NotImplemented\n+ return (self.priority, self.label) <= (other.priority, other.label)\n+\n+ def __gt__(self, other):\n+ if not isinstance(other, Button):\n+ return NotImplemented\n+ return (self.priority, self.label) > (other.priority, other.label)\n+\n+ def __ge__(self, other):\n+ if not isinstance(other, Button):\n+ return NotImplemented\n+ return (self.priority, self.label) >= (other.priority, other.label)\n+\n def __eq__(self, other):\n if not isinstance(other, Button):\n return NotImplemented\n", "issue": "Replace `total_ordering` usage with comparison functions implementation\n### Is your proposal related to a problem?\r\n\r\nWe have two instances of `total_ordering` usage within the codebase:\r\n\r\nhttps://github.com/wagtail/wagtail/blob/cd5200c8e1ac0d7299fd9c398b2b994606b3c7d2/wagtail/admin/search.py#L12-L13\r\n\r\nhttps://github.com/wagtail/wagtail/blob/cd5200c8e1ac0d7299fd9c398b2b994606b3c7d2/wagtail/admin/widgets/button.py#L11-L12\r\n\r\nEven though it's convenient, `total_ordering` is known to be slow. According to [Python's docs](https://docs.python.org/3/library/functools.html#functools.total_ordering):\r\n\r\n> **Note**\r\n> While this decorator makes it easy to create well behaved totally ordered types, it does come at the cost of slower execution and more complex stack traces for the derived comparison methods. If performance benchmarking indicates this is a bottleneck for a given application, implementing all six rich comparison methods instead is likely to provide an easy speed boost.\r\n\r\nDjango recently removed their usage of `total_ordering` in https://github.com/django/django/pull/16958/commits/ee36e101e8f8c0acde4bb148b738ab7034e902a0 (probably not all usages, I haven't checked).\r\n\r\n### Describe the solution you'd like\r\n\r\n<!--\r\n Provide a clear and concise description of what you want to happen.\r\n-->\r\n\r\nReplace `total_ordering` with implementations of `__eq__()`, `__ne__()`, `__lt__()`, `__le__()`, `__gt__()`, and `__ge__()`.\r\n\r\n### Describe alternatives you've considered\r\n\r\n<!--\r\n Let us know about other solutions you've tried or researched.\r\n-->\r\n\r\nKeep using `total_ordering`\r\n\r\n### Additional context\r\n\r\nI found this while fixing an incorrect import of `total_ordering` in #10525.\r\n\n", "before_files": [{"content": "from functools import total_ordering\n\nfrom django.forms.utils import flatatt\nfrom django.template.loader import render_to_string\nfrom django.utils.functional import cached_property\nfrom django.utils.html import format_html\n\nfrom wagtail import hooks\n\n\n@total_ordering\nclass Button:\n show = True\n\n def __init__(\n self, label, url, classes=set(), icon_name=None, attrs={}, priority=1000\n ):\n self.label = label\n self.url = url\n self.classes = classes\n self.icon_name = icon_name\n self.attrs = attrs.copy()\n self.priority = priority\n\n def render(self):\n attrs = {\n \"href\": self.url,\n \"class\": \" \".join(sorted(self.classes)),\n \"title\": self.label,\n }\n attrs.update(self.attrs)\n return format_html(\"<a{}>{}</a>\", flatatt(attrs), self.label)\n\n def __str__(self):\n return self.render()\n\n def __repr__(self):\n return f\"<Button: {self.label}>\"\n\n def __lt__(self, other):\n if not isinstance(other, Button):\n return NotImplemented\n return (self.priority, self.label) < (other.priority, other.label)\n\n def __eq__(self, other):\n if not isinstance(other, Button):\n return NotImplemented\n return (\n self.label == other.label\n and self.url == other.url\n and self.classes == other.classes\n and self.attrs == other.attrs\n and self.priority == other.priority\n )\n\n\n# Base class for all listing buttons\n# This is also used by SnippetListingButton defined in wagtail.snippets.widgets\nclass ListingButton(Button):\n def __init__(self, label, url, classes=set(), **kwargs):\n classes = {\"button\", \"button-small\", \"button-secondary\"} | set(classes)\n super().__init__(label, url, classes=classes, **kwargs)\n\n\nclass PageListingButton(ListingButton):\n pass\n\n\nclass BaseDropdownMenuButton(Button):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, url=None, **kwargs)\n\n @cached_property\n def dropdown_buttons(self):\n raise NotImplementedError\n\n def get_context_data(self):\n return {\n \"buttons\": self.dropdown_buttons,\n \"label\": self.label,\n \"title\": self.attrs.get(\"title\"),\n \"classes\": self.classes,\n }\n\n def render(self):\n return render_to_string(self.template_name, self.get_context_data())\n\n\nclass ButtonWithDropdown(BaseDropdownMenuButton):\n template_name = \"wagtailadmin/pages/listing/_button_with_dropdown.html\"\n\n def __init__(self, *args, **kwargs):\n self.button_classes = kwargs.pop(\"button_classes\", set())\n self.buttons_data = kwargs.pop(\"buttons_data\", [])\n super().__init__(*args, **kwargs)\n\n def get_context_data(self):\n context = super().get_context_data()\n context[\"button_classes\"] = self.button_classes\n return context\n\n @cached_property\n def dropdown_buttons(self):\n return [Button(**button) for button in self.buttons_data]\n\n\nclass ButtonWithDropdownFromHook(BaseDropdownMenuButton):\n template_name = \"wagtailadmin/pages/listing/_button_with_dropdown.html\"\n\n def __init__(self, label, hook_name, page, page_perms, next_url=None, **kwargs):\n self.hook_name = hook_name\n self.page = page\n self.page_perms = page_perms\n self.next_url = next_url\n\n super().__init__(label, **kwargs)\n\n @property\n def show(self):\n return bool(self.dropdown_buttons)\n\n @cached_property\n def dropdown_buttons(self):\n button_hooks = hooks.get_hooks(self.hook_name)\n\n buttons = []\n for hook in button_hooks:\n buttons.extend(hook(self.page, self.page_perms, self.next_url))\n\n buttons.sort()\n return buttons\n", "path": "wagtail/admin/widgets/button.py"}, {"content": "from functools import total_ordering\n\nfrom django.forms import Media, MediaDefiningClass\nfrom django.forms.utils import flatatt\nfrom django.template.loader import render_to_string\nfrom django.utils.functional import cached_property\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import slugify\n\nfrom wagtail import hooks\nfrom wagtail.admin.forms.search import SearchForm\n\n\n@total_ordering\nclass SearchArea(metaclass=MediaDefiningClass):\n template = \"wagtailadmin/shared/search_area.html\"\n\n def __init__(\n self, label, url, name=None, classnames=\"\", icon_name=\"\", attrs=None, order=1000\n ):\n self.label = label\n self.url = url\n self.classnames = classnames\n self.icon_name = icon_name\n self.name = name or slugify(str(label))\n self.order = order\n\n if attrs:\n self.attr_string = flatatt(attrs)\n else:\n self.attr_string = \"\"\n\n def __lt__(self, other):\n return (self.order, self.label) < (other.order, other.label)\n\n def __eq__(self, other):\n return (self.order, self.label) == (other.order, other.label)\n\n def is_shown(self, request):\n \"\"\"\n Whether this search area should be shown for the given request; permission\n checks etc should go here. By default, search areas are shown all the time\n \"\"\"\n return True\n\n def is_active(self, request, current=None):\n if current is None:\n return request.path.startswith(self.url)\n else:\n return self.name == current\n\n def render_html(self, request, query, current=None):\n return render_to_string(\n self.template,\n {\n \"name\": self.name,\n \"url\": self.url,\n \"classnames\": self.classnames,\n \"icon_name\": self.icon_name,\n \"attr_string\": self.attr_string,\n \"label\": self.label,\n \"active\": self.is_active(request, current),\n \"query_string\": query,\n },\n request=request,\n )\n\n\nclass Search:\n def __init__(self, register_hook_name, construct_hook_name=None):\n self.register_hook_name = register_hook_name\n self.construct_hook_name = construct_hook_name\n\n @cached_property\n def registered_search_areas(self):\n return sorted([fn() for fn in hooks.get_hooks(self.register_hook_name)])\n\n def search_items_for_request(self, request):\n return [item for item in self.registered_search_areas if item.is_shown(request)]\n\n def active_search(self, request, current=None):\n return [\n item\n for item in self.search_items_for_request(request)\n if item.is_active(request, current)\n ]\n\n @property\n def media(self):\n media = Media()\n for item in self.registered_search_areas:\n media += item.media\n return media\n\n def render_html(self, request, current=None):\n search_areas = self.search_items_for_request(request)\n\n # Get query parameter\n form = SearchForm(request.GET)\n query = \"\"\n if form.is_valid():\n query = form.cleaned_data[\"q\"]\n\n # provide a hook for modifying the search area, if construct_hook_name has been set\n if self.construct_hook_name:\n for fn in hooks.get_hooks(self.construct_hook_name):\n fn(request, search_areas)\n\n rendered_search_areas = []\n for item in search_areas:\n rendered_search_areas.append(item.render_html(request, query, current))\n\n return mark_safe(\"\".join(rendered_search_areas))\n\n\nadmin_search_areas = Search(\n register_hook_name=\"register_admin_search_area\",\n construct_hook_name=\"construct_search\",\n)\n", "path": "wagtail/admin/search.py"}], "after_files": [{"content": "from django.forms.utils import flatatt\nfrom django.template.loader import render_to_string\nfrom django.utils.functional import cached_property\nfrom django.utils.html import format_html\n\nfrom wagtail import hooks\n\n\nclass Button:\n show = True\n\n def __init__(\n self, label, url, classes=set(), icon_name=None, attrs={}, priority=1000\n ):\n self.label = label\n self.url = url\n self.classes = classes\n self.icon_name = icon_name\n self.attrs = attrs.copy()\n self.priority = priority\n\n def render(self):\n attrs = {\n \"href\": self.url,\n \"class\": \" \".join(sorted(self.classes)),\n \"title\": self.label,\n }\n attrs.update(self.attrs)\n return format_html(\"<a{}>{}</a>\", flatatt(attrs), self.label)\n\n def __str__(self):\n return self.render()\n\n def __repr__(self):\n return f\"<Button: {self.label}>\"\n\n def __lt__(self, other):\n if not isinstance(other, Button):\n return NotImplemented\n return (self.priority, self.label) < (other.priority, other.label)\n\n def __le__(self, other):\n if not isinstance(other, Button):\n return NotImplemented\n return (self.priority, self.label) <= (other.priority, other.label)\n\n def __gt__(self, other):\n if not isinstance(other, Button):\n return NotImplemented\n return (self.priority, self.label) > (other.priority, other.label)\n\n def __ge__(self, other):\n if not isinstance(other, Button):\n return NotImplemented\n return (self.priority, self.label) >= (other.priority, other.label)\n\n def __eq__(self, other):\n if not isinstance(other, Button):\n return NotImplemented\n return (\n self.label == other.label\n and self.url == other.url\n and self.classes == other.classes\n and self.attrs == other.attrs\n and self.priority == other.priority\n )\n\n\n# Base class for all listing buttons\n# This is also used by SnippetListingButton defined in wagtail.snippets.widgets\nclass ListingButton(Button):\n def __init__(self, label, url, classes=set(), **kwargs):\n classes = {\"button\", \"button-small\", \"button-secondary\"} | set(classes)\n super().__init__(label, url, classes=classes, **kwargs)\n\n\nclass PageListingButton(ListingButton):\n pass\n\n\nclass BaseDropdownMenuButton(Button):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, url=None, **kwargs)\n\n @cached_property\n def dropdown_buttons(self):\n raise NotImplementedError\n\n def get_context_data(self):\n return {\n \"buttons\": self.dropdown_buttons,\n \"label\": self.label,\n \"title\": self.attrs.get(\"title\"),\n \"classes\": self.classes,\n }\n\n def render(self):\n return render_to_string(self.template_name, self.get_context_data())\n\n\nclass ButtonWithDropdown(BaseDropdownMenuButton):\n template_name = \"wagtailadmin/pages/listing/_button_with_dropdown.html\"\n\n def __init__(self, *args, **kwargs):\n self.button_classes = kwargs.pop(\"button_classes\", set())\n self.buttons_data = kwargs.pop(\"buttons_data\", [])\n super().__init__(*args, **kwargs)\n\n def get_context_data(self):\n context = super().get_context_data()\n context[\"button_classes\"] = self.button_classes\n return context\n\n @cached_property\n def dropdown_buttons(self):\n return [Button(**button) for button in self.buttons_data]\n\n\nclass ButtonWithDropdownFromHook(BaseDropdownMenuButton):\n template_name = \"wagtailadmin/pages/listing/_button_with_dropdown.html\"\n\n def __init__(self, label, hook_name, page, page_perms, next_url=None, **kwargs):\n self.hook_name = hook_name\n self.page = page\n self.page_perms = page_perms\n self.next_url = next_url\n\n super().__init__(label, **kwargs)\n\n @property\n def show(self):\n return bool(self.dropdown_buttons)\n\n @cached_property\n def dropdown_buttons(self):\n button_hooks = hooks.get_hooks(self.hook_name)\n\n buttons = []\n for hook in button_hooks:\n buttons.extend(hook(self.page, self.page_perms, self.next_url))\n\n buttons.sort()\n return buttons\n", "path": "wagtail/admin/widgets/button.py"}, {"content": "from django.forms import Media, MediaDefiningClass\nfrom django.forms.utils import flatatt\nfrom django.template.loader import render_to_string\nfrom django.utils.functional import cached_property\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import slugify\n\nfrom wagtail import hooks\nfrom wagtail.admin.forms.search import SearchForm\n\n\nclass SearchArea(metaclass=MediaDefiningClass):\n template = \"wagtailadmin/shared/search_area.html\"\n\n def __init__(\n self, label, url, name=None, classnames=\"\", icon_name=\"\", attrs=None, order=1000\n ):\n self.label = label\n self.url = url\n self.classnames = classnames\n self.icon_name = icon_name\n self.name = name or slugify(str(label))\n self.order = order\n\n if attrs:\n self.attr_string = flatatt(attrs)\n else:\n self.attr_string = \"\"\n\n def __lt__(self, other):\n if not isinstance(other, SearchArea):\n return NotImplemented\n return (self.order, self.label) < (other.order, other.label)\n\n def __le__(self, other):\n if not isinstance(other, SearchArea):\n return NotImplemented\n return (self.order, self.label) <= (other.order, other.label)\n\n def __gt__(self, other):\n if not isinstance(other, SearchArea):\n return NotImplemented\n return (self.order, self.label) > (other.order, other.label)\n\n def __ge__(self, other):\n if not isinstance(other, SearchArea):\n return NotImplemented\n return (self.order, self.label) >= (other.order, other.label)\n\n def __eq__(self, other):\n if not isinstance(other, SearchArea):\n return NotImplemented\n return (self.order, self.label) == (other.order, other.label)\n\n def is_shown(self, request):\n \"\"\"\n Whether this search area should be shown for the given request; permission\n checks etc should go here. By default, search areas are shown all the time\n \"\"\"\n return True\n\n def is_active(self, request, current=None):\n if current is None:\n return request.path.startswith(self.url)\n else:\n return self.name == current\n\n def render_html(self, request, query, current=None):\n return render_to_string(\n self.template,\n {\n \"name\": self.name,\n \"url\": self.url,\n \"classnames\": self.classnames,\n \"icon_name\": self.icon_name,\n \"attr_string\": self.attr_string,\n \"label\": self.label,\n \"active\": self.is_active(request, current),\n \"query_string\": query,\n },\n request=request,\n )\n\n\nclass Search:\n def __init__(self, register_hook_name, construct_hook_name=None):\n self.register_hook_name = register_hook_name\n self.construct_hook_name = construct_hook_name\n\n @cached_property\n def registered_search_areas(self):\n return sorted([fn() for fn in hooks.get_hooks(self.register_hook_name)])\n\n def search_items_for_request(self, request):\n return [item for item in self.registered_search_areas if item.is_shown(request)]\n\n def active_search(self, request, current=None):\n return [\n item\n for item in self.search_items_for_request(request)\n if item.is_active(request, current)\n ]\n\n @property\n def media(self):\n media = Media()\n for item in self.registered_search_areas:\n media += item.media\n return media\n\n def render_html(self, request, current=None):\n search_areas = self.search_items_for_request(request)\n\n # Get query parameter\n form = SearchForm(request.GET)\n query = \"\"\n if form.is_valid():\n query = form.cleaned_data[\"q\"]\n\n # provide a hook for modifying the search area, if construct_hook_name has been set\n if self.construct_hook_name:\n for fn in hooks.get_hooks(self.construct_hook_name):\n fn(request, search_areas)\n\n rendered_search_areas = []\n for item in search_areas:\n rendered_search_areas.append(item.render_html(request, query, current))\n\n return mark_safe(\"\".join(rendered_search_areas))\n\n\nadmin_search_areas = Search(\n register_hook_name=\"register_admin_search_area\",\n construct_hook_name=\"construct_search\",\n)\n", "path": "wagtail/admin/search.py"}]} | 2,960 | 680 |
gh_patches_debug_19799 | rasdani/github-patches | git_diff | mirumee__ariadne-266 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change `make_executable_schema` API to accept multiple bindables args
Currently, the second argument to `make_executable_schema` is list of `SchemaBindlables` or single bindable:
```python
# Single bindable:
schema = make_executable_schema(type_defs, query_type, debug=True)
# Multiple bindables:
schema = make_executable_schema(type_defs, [query_type, mutation_type], debug=True)
```
Looking at Ariadne uses in the wild, a pattern is starting to emerge where developers create dedicated modules/packages in their project for `scalars`, `mutations` or `types`, that use their `__init__.py`'s to gather all bindables into single lists:
```
from .scalars import scalars
from .types import types
from .mutations import mutations
```
Those are then combined into single list and passed to `make_executable_schema`:
```
schema = make_executable_schema(type_defs, scalars + types + mutations, debug=True)
```
This looks ugly, but things get uglier when there's bindable involved:
```
schema = make_executable_schema(type_defs, scalars + types + mutations + [fallback_resolvers], debug=True)
```
We can simplify this by changing bindables to `*bindables`:
```
schema = make_executable_schema(type_defs, scalars, types, mutations, fallback_resolvers, debug=True)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/executable_schema.py`
Content:
```
1 from typing import Dict, List, Type, Union
2
3 from graphql import (
4 DocumentNode,
5 GraphQLSchema,
6 assert_valid_schema,
7 build_ast_schema,
8 extend_schema,
9 parse,
10 validate_schema,
11 )
12
13 from .enums import set_default_enum_values_on_schema
14 from .schema_visitor import SchemaDirectiveVisitor
15 from .types import SchemaBindable
16
17
18 def make_executable_schema(
19 type_defs: Union[str, List[str]],
20 bindables: Union[SchemaBindable, List[SchemaBindable], None] = None,
21 *,
22 directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,
23 ) -> GraphQLSchema:
24 if isinstance(type_defs, list):
25 type_defs = join_type_defs(type_defs)
26
27 ast_document = parse(type_defs)
28 schema = build_and_extend_schema(ast_document)
29 validate_schema(schema)
30
31 if isinstance(bindables, list):
32 for obj in bindables:
33 obj.bind_to_schema(schema)
34 elif bindables:
35 bindables.bind_to_schema(schema)
36
37 set_default_enum_values_on_schema(schema)
38
39 if directives:
40 SchemaDirectiveVisitor.visit_schema_directives(schema, directives)
41
42 assert_valid_schema(schema)
43
44 return schema
45
46
47 def join_type_defs(type_defs: List[str]) -> str:
48 return "\n\n".join(t.strip() for t in type_defs)
49
50
51 def build_and_extend_schema(ast: DocumentNode) -> GraphQLSchema:
52 schema = build_ast_schema(ast)
53 extension_ast = extract_extensions(ast)
54
55 if extension_ast.definitions:
56 schema = extend_schema(schema, extension_ast)
57
58 return schema
59
60
61 EXTENSION_KINDS = [
62 "scalar_type_extension",
63 "object_type_extension",
64 "interface_type_extension",
65 "union_type_extension",
66 "enum_type_extension",
67 "input_object_type_extension",
68 ]
69
70
71 def extract_extensions(ast: DocumentNode) -> DocumentNode:
72 extensions = [node for node in ast.definitions if node.kind in EXTENSION_KINDS]
73 return DocumentNode(definitions=extensions)
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py
--- a/ariadne/executable_schema.py
+++ b/ariadne/executable_schema.py
@@ -17,8 +17,7 @@
def make_executable_schema(
type_defs: Union[str, List[str]],
- bindables: Union[SchemaBindable, List[SchemaBindable], None] = None,
- *,
+ *bindables: Union[SchemaBindable, List[SchemaBindable]],
directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,
) -> GraphQLSchema:
if isinstance(type_defs, list):
@@ -28,11 +27,12 @@
schema = build_and_extend_schema(ast_document)
validate_schema(schema)
- if isinstance(bindables, list):
- for obj in bindables:
- obj.bind_to_schema(schema)
- elif bindables:
- bindables.bind_to_schema(schema)
+ for bindable in bindables:
+ if isinstance(bindable, list):
+ for obj in bindable:
+ obj.bind_to_schema(schema)
+ else:
+ bindable.bind_to_schema(schema)
set_default_enum_values_on_schema(schema)
| {"golden_diff": "diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py\n--- a/ariadne/executable_schema.py\n+++ b/ariadne/executable_schema.py\n@@ -17,8 +17,7 @@\n \n def make_executable_schema(\n type_defs: Union[str, List[str]],\n- bindables: Union[SchemaBindable, List[SchemaBindable], None] = None,\n- *,\n+ *bindables: Union[SchemaBindable, List[SchemaBindable]],\n directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,\n ) -> GraphQLSchema:\n if isinstance(type_defs, list):\n@@ -28,11 +27,12 @@\n schema = build_and_extend_schema(ast_document)\n validate_schema(schema)\n \n- if isinstance(bindables, list):\n- for obj in bindables:\n- obj.bind_to_schema(schema)\n- elif bindables:\n- bindables.bind_to_schema(schema)\n+ for bindable in bindables:\n+ if isinstance(bindable, list):\n+ for obj in bindable:\n+ obj.bind_to_schema(schema)\n+ else:\n+ bindable.bind_to_schema(schema)\n \n set_default_enum_values_on_schema(schema)\n", "issue": "Change `make_executable_schema` API to accept multiple bindables args\nCurrently, the second argument to `make_executable_schema` is list of `SchemaBindlables` or single bindable:\r\n\r\n```python\r\n# Single bindable:\r\nschema = make_executable_schema(type_defs, query_type, debug=True)\r\n\r\n# Multiple bindables:\r\nschema = make_executable_schema(type_defs, [query_type, mutation_type], debug=True)\r\n```\r\n\r\nLooking at Ariadne uses in the wild, a pattern is starting to emerge where developers create dedicated modules/packages in their project for `scalars`, `mutations` or `types`, that use their `__init__.py`'s to gather all bindables into single lists:\r\n\r\n```\r\nfrom .scalars import scalars\r\nfrom .types import types\r\nfrom .mutations import mutations\r\n```\r\n\r\nThose are then combined into single list and passed to `make_executable_schema`:\r\n\r\n```\r\nschema = make_executable_schema(type_defs, scalars + types + mutations, debug=True)\r\n```\r\n\r\nThis looks ugly, but things get uglier when there's bindable involved:\r\n\r\n```\r\nschema = make_executable_schema(type_defs, scalars + types + mutations + [fallback_resolvers], debug=True)\r\n```\r\n\r\nWe can simplify this by changing bindables to `*bindables`:\r\n\r\n```\r\nschema = make_executable_schema(type_defs, scalars, types, mutations, fallback_resolvers, debug=True)\r\n```\n", "before_files": [{"content": "from typing import Dict, List, Type, Union\n\nfrom graphql import (\n DocumentNode,\n GraphQLSchema,\n assert_valid_schema,\n build_ast_schema,\n extend_schema,\n parse,\n validate_schema,\n)\n\nfrom .enums import set_default_enum_values_on_schema\nfrom .schema_visitor import SchemaDirectiveVisitor\nfrom .types import SchemaBindable\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]],\n bindables: Union[SchemaBindable, List[SchemaBindable], None] = None,\n *,\n directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n ast_document = parse(type_defs)\n schema = build_and_extend_schema(ast_document)\n validate_schema(schema)\n\n if isinstance(bindables, list):\n for obj in bindables:\n obj.bind_to_schema(schema)\n elif bindables:\n bindables.bind_to_schema(schema)\n\n set_default_enum_values_on_schema(schema)\n\n if directives:\n SchemaDirectiveVisitor.visit_schema_directives(schema, directives)\n\n assert_valid_schema(schema)\n\n return schema\n\n\ndef join_type_defs(type_defs: List[str]) -> str:\n return \"\\n\\n\".join(t.strip() for t in type_defs)\n\n\ndef build_and_extend_schema(ast: DocumentNode) -> GraphQLSchema:\n schema = build_ast_schema(ast)\n extension_ast = extract_extensions(ast)\n\n if extension_ast.definitions:\n schema = extend_schema(schema, extension_ast)\n\n return schema\n\n\nEXTENSION_KINDS = [\n \"scalar_type_extension\",\n \"object_type_extension\",\n \"interface_type_extension\",\n \"union_type_extension\",\n \"enum_type_extension\",\n \"input_object_type_extension\",\n]\n\n\ndef extract_extensions(ast: DocumentNode) -> DocumentNode:\n extensions = [node for node in ast.definitions if node.kind in EXTENSION_KINDS]\n return DocumentNode(definitions=extensions)\n", "path": "ariadne/executable_schema.py"}], "after_files": [{"content": "from typing import Dict, List, Type, Union\n\nfrom graphql import (\n DocumentNode,\n GraphQLSchema,\n assert_valid_schema,\n build_ast_schema,\n extend_schema,\n parse,\n validate_schema,\n)\n\nfrom .enums import set_default_enum_values_on_schema\nfrom .schema_visitor import SchemaDirectiveVisitor\nfrom .types import SchemaBindable\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]],\n *bindables: Union[SchemaBindable, List[SchemaBindable]],\n directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n ast_document = parse(type_defs)\n schema = build_and_extend_schema(ast_document)\n validate_schema(schema)\n\n for bindable in bindables:\n if isinstance(bindable, list):\n for obj in bindable:\n obj.bind_to_schema(schema)\n else:\n bindable.bind_to_schema(schema)\n\n set_default_enum_values_on_schema(schema)\n\n if directives:\n SchemaDirectiveVisitor.visit_schema_directives(schema, directives)\n\n assert_valid_schema(schema)\n\n return schema\n\n\ndef join_type_defs(type_defs: List[str]) -> str:\n return \"\\n\\n\".join(t.strip() for t in type_defs)\n\n\ndef build_and_extend_schema(ast: DocumentNode) -> GraphQLSchema:\n schema = build_ast_schema(ast)\n extension_ast = extract_extensions(ast)\n\n if extension_ast.definitions:\n schema = extend_schema(schema, extension_ast)\n\n return schema\n\n\nEXTENSION_KINDS = [\n \"scalar_type_extension\",\n \"object_type_extension\",\n \"interface_type_extension\",\n \"union_type_extension\",\n \"enum_type_extension\",\n \"input_object_type_extension\",\n]\n\n\ndef extract_extensions(ast: DocumentNode) -> DocumentNode:\n extensions = [node for node in ast.definitions if node.kind in EXTENSION_KINDS]\n return DocumentNode(definitions=extensions)\n", "path": "ariadne/executable_schema.py"}]} | 1,122 | 267 |
gh_patches_debug_17397 | rasdani/github-patches | git_diff | benoitc__gunicorn-2570 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
threading.Thread.setDaemon has been deprecated in favor of setting daemon attribute directly in Python 3.10
Ref : python/cpython#25174
https://github.com/benoitc/gunicorn/blob/cf55d2cec277f220ebd605989ce78ad1bb553c46/gunicorn/reloader.py#L20
https://github.com/benoitc/gunicorn/blob/cf55d2cec277f220ebd605989ce78ad1bb553c46/gunicorn/reloader.py#L77
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gunicorn/reloader.py`
Content:
```
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5 # pylint: disable=no-else-continue
6
7 import os
8 import os.path
9 import re
10 import sys
11 import time
12 import threading
13
14 COMPILED_EXT_RE = re.compile(r'py[co]$')
15
16
17 class Reloader(threading.Thread):
18 def __init__(self, extra_files=None, interval=1, callback=None):
19 super().__init__()
20 self.setDaemon(True)
21 self._extra_files = set(extra_files or ())
22 self._interval = interval
23 self._callback = callback
24
25 def add_extra_file(self, filename):
26 self._extra_files.add(filename)
27
28 def get_files(self):
29 fnames = [
30 COMPILED_EXT_RE.sub('py', module.__file__)
31 for module in tuple(sys.modules.values())
32 if getattr(module, '__file__', None)
33 ]
34
35 fnames.extend(self._extra_files)
36
37 return fnames
38
39 def run(self):
40 mtimes = {}
41 while True:
42 for filename in self.get_files():
43 try:
44 mtime = os.stat(filename).st_mtime
45 except OSError:
46 continue
47 old_time = mtimes.get(filename)
48 if old_time is None:
49 mtimes[filename] = mtime
50 continue
51 elif mtime > old_time:
52 if self._callback:
53 self._callback(filename)
54 time.sleep(self._interval)
55
56
57 has_inotify = False
58 if sys.platform.startswith('linux'):
59 try:
60 from inotify.adapters import Inotify
61 import inotify.constants
62 has_inotify = True
63 except ImportError:
64 pass
65
66
67 if has_inotify:
68
69 class InotifyReloader(threading.Thread):
70 event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE
71 | inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY
72 | inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM
73 | inotify.constants.IN_MOVED_TO)
74
75 def __init__(self, extra_files=None, callback=None):
76 super().__init__()
77 self.setDaemon(True)
78 self._callback = callback
79 self._dirs = set()
80 self._watcher = Inotify()
81
82 for extra_file in extra_files:
83 self.add_extra_file(extra_file)
84
85 def add_extra_file(self, filename):
86 dirname = os.path.dirname(filename)
87
88 if dirname in self._dirs:
89 return
90
91 self._watcher.add_watch(dirname, mask=self.event_mask)
92 self._dirs.add(dirname)
93
94 def get_dirs(self):
95 fnames = [
96 os.path.dirname(os.path.abspath(COMPILED_EXT_RE.sub('py', module.__file__)))
97 for module in tuple(sys.modules.values())
98 if getattr(module, '__file__', None)
99 ]
100
101 return set(fnames)
102
103 def run(self):
104 self._dirs = self.get_dirs()
105
106 for dirname in self._dirs:
107 if os.path.isdir(dirname):
108 self._watcher.add_watch(dirname, mask=self.event_mask)
109
110 for event in self._watcher.event_gen():
111 if event is None:
112 continue
113
114 filename = event[3]
115
116 self._callback(filename)
117
118 else:
119
120 class InotifyReloader(object):
121 def __init__(self, callback=None):
122 raise ImportError('You must have the inotify module installed to '
123 'use the inotify reloader')
124
125
126 preferred_reloader = InotifyReloader if has_inotify else Reloader
127
128 reloader_engines = {
129 'auto': preferred_reloader,
130 'poll': Reloader,
131 'inotify': InotifyReloader,
132 }
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gunicorn/reloader.py b/gunicorn/reloader.py
--- a/gunicorn/reloader.py
+++ b/gunicorn/reloader.py
@@ -17,7 +17,7 @@
class Reloader(threading.Thread):
def __init__(self, extra_files=None, interval=1, callback=None):
super().__init__()
- self.setDaemon(True)
+ self.daemon = True
self._extra_files = set(extra_files or ())
self._interval = interval
self._callback = callback
@@ -74,7 +74,7 @@
def __init__(self, extra_files=None, callback=None):
super().__init__()
- self.setDaemon(True)
+ self.daemon = True
self._callback = callback
self._dirs = set()
self._watcher = Inotify()
| {"golden_diff": "diff --git a/gunicorn/reloader.py b/gunicorn/reloader.py\n--- a/gunicorn/reloader.py\n+++ b/gunicorn/reloader.py\n@@ -17,7 +17,7 @@\n class Reloader(threading.Thread):\n def __init__(self, extra_files=None, interval=1, callback=None):\n super().__init__()\n- self.setDaemon(True)\n+ self.daemon = True\n self._extra_files = set(extra_files or ())\n self._interval = interval\n self._callback = callback\n@@ -74,7 +74,7 @@\n \n def __init__(self, extra_files=None, callback=None):\n super().__init__()\n- self.setDaemon(True)\n+ self.daemon = True\n self._callback = callback\n self._dirs = set()\n self._watcher = Inotify()\n", "issue": "threading.Thread.setDaemon has been deprecated in favor of setting daemon attribute directly in Python 3.10\nRef : python/cpython#25174\r\n\r\nhttps://github.com/benoitc/gunicorn/blob/cf55d2cec277f220ebd605989ce78ad1bb553c46/gunicorn/reloader.py#L20\r\n\r\nhttps://github.com/benoitc/gunicorn/blob/cf55d2cec277f220ebd605989ce78ad1bb553c46/gunicorn/reloader.py#L77\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n# pylint: disable=no-else-continue\n\nimport os\nimport os.path\nimport re\nimport sys\nimport time\nimport threading\n\nCOMPILED_EXT_RE = re.compile(r'py[co]$')\n\n\nclass Reloader(threading.Thread):\n def __init__(self, extra_files=None, interval=1, callback=None):\n super().__init__()\n self.setDaemon(True)\n self._extra_files = set(extra_files or ())\n self._interval = interval\n self._callback = callback\n\n def add_extra_file(self, filename):\n self._extra_files.add(filename)\n\n def get_files(self):\n fnames = [\n COMPILED_EXT_RE.sub('py', module.__file__)\n for module in tuple(sys.modules.values())\n if getattr(module, '__file__', None)\n ]\n\n fnames.extend(self._extra_files)\n\n return fnames\n\n def run(self):\n mtimes = {}\n while True:\n for filename in self.get_files():\n try:\n mtime = os.stat(filename).st_mtime\n except OSError:\n continue\n old_time = mtimes.get(filename)\n if old_time is None:\n mtimes[filename] = mtime\n continue\n elif mtime > old_time:\n if self._callback:\n self._callback(filename)\n time.sleep(self._interval)\n\n\nhas_inotify = False\nif sys.platform.startswith('linux'):\n try:\n from inotify.adapters import Inotify\n import inotify.constants\n has_inotify = True\n except ImportError:\n pass\n\n\nif has_inotify:\n\n class InotifyReloader(threading.Thread):\n event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE\n | inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY\n | inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM\n | inotify.constants.IN_MOVED_TO)\n\n def __init__(self, extra_files=None, callback=None):\n super().__init__()\n self.setDaemon(True)\n self._callback = callback\n self._dirs = set()\n self._watcher = Inotify()\n\n for extra_file in extra_files:\n self.add_extra_file(extra_file)\n\n def add_extra_file(self, filename):\n dirname = os.path.dirname(filename)\n\n if dirname in self._dirs:\n return\n\n self._watcher.add_watch(dirname, mask=self.event_mask)\n self._dirs.add(dirname)\n\n def get_dirs(self):\n fnames = [\n os.path.dirname(os.path.abspath(COMPILED_EXT_RE.sub('py', module.__file__)))\n for module in tuple(sys.modules.values())\n if getattr(module, '__file__', None)\n ]\n\n return set(fnames)\n\n def run(self):\n self._dirs = self.get_dirs()\n\n for dirname in self._dirs:\n if os.path.isdir(dirname):\n self._watcher.add_watch(dirname, mask=self.event_mask)\n\n for event in self._watcher.event_gen():\n if event is None:\n continue\n\n filename = event[3]\n\n self._callback(filename)\n\nelse:\n\n class InotifyReloader(object):\n def __init__(self, callback=None):\n raise ImportError('You must have the inotify module installed to '\n 'use the inotify reloader')\n\n\npreferred_reloader = InotifyReloader if has_inotify else Reloader\n\nreloader_engines = {\n 'auto': preferred_reloader,\n 'poll': Reloader,\n 'inotify': InotifyReloader,\n}\n", "path": "gunicorn/reloader.py"}], "after_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n# pylint: disable=no-else-continue\n\nimport os\nimport os.path\nimport re\nimport sys\nimport time\nimport threading\n\nCOMPILED_EXT_RE = re.compile(r'py[co]$')\n\n\nclass Reloader(threading.Thread):\n def __init__(self, extra_files=None, interval=1, callback=None):\n super().__init__()\n self.daemon = True\n self._extra_files = set(extra_files or ())\n self._interval = interval\n self._callback = callback\n\n def add_extra_file(self, filename):\n self._extra_files.add(filename)\n\n def get_files(self):\n fnames = [\n COMPILED_EXT_RE.sub('py', module.__file__)\n for module in tuple(sys.modules.values())\n if getattr(module, '__file__', None)\n ]\n\n fnames.extend(self._extra_files)\n\n return fnames\n\n def run(self):\n mtimes = {}\n while True:\n for filename in self.get_files():\n try:\n mtime = os.stat(filename).st_mtime\n except OSError:\n continue\n old_time = mtimes.get(filename)\n if old_time is None:\n mtimes[filename] = mtime\n continue\n elif mtime > old_time:\n if self._callback:\n self._callback(filename)\n time.sleep(self._interval)\n\n\nhas_inotify = False\nif sys.platform.startswith('linux'):\n try:\n from inotify.adapters import Inotify\n import inotify.constants\n has_inotify = True\n except ImportError:\n pass\n\n\nif has_inotify:\n\n class InotifyReloader(threading.Thread):\n event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE\n | inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY\n | inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM\n | inotify.constants.IN_MOVED_TO)\n\n def __init__(self, extra_files=None, callback=None):\n super().__init__()\n self.daemon = True\n self._callback = callback\n self._dirs = set()\n self._watcher = Inotify()\n\n for extra_file in extra_files:\n self.add_extra_file(extra_file)\n\n def add_extra_file(self, filename):\n dirname = os.path.dirname(filename)\n\n if dirname in self._dirs:\n return\n\n self._watcher.add_watch(dirname, mask=self.event_mask)\n self._dirs.add(dirname)\n\n def get_dirs(self):\n fnames = [\n os.path.dirname(os.path.abspath(COMPILED_EXT_RE.sub('py', module.__file__)))\n for module in tuple(sys.modules.values())\n if getattr(module, '__file__', None)\n ]\n\n return set(fnames)\n\n def run(self):\n self._dirs = self.get_dirs()\n\n for dirname in self._dirs:\n if os.path.isdir(dirname):\n self._watcher.add_watch(dirname, mask=self.event_mask)\n\n for event in self._watcher.event_gen():\n if event is None:\n continue\n\n filename = event[3]\n\n self._callback(filename)\n\nelse:\n\n class InotifyReloader(object):\n def __init__(self, callback=None):\n raise ImportError('You must have the inotify module installed to '\n 'use the inotify reloader')\n\n\npreferred_reloader = InotifyReloader if has_inotify else Reloader\n\nreloader_engines = {\n 'auto': preferred_reloader,\n 'poll': Reloader,\n 'inotify': InotifyReloader,\n}\n", "path": "gunicorn/reloader.py"}]} | 1,497 | 184 |
gh_patches_debug_9112 | rasdani/github-patches | git_diff | pypi__warehouse-6207 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve webauthn errors
Problems:
- We currently have two pieces of JS that control the display of webauthn errors - some in `index.js`, some in `webauthn.js`
- The errors are not announced to the screenreader (via `role=alert`)
- The errors are not associated with the webauthn label field (on the provisioning page) - we should use `aria-describedby` for this
- The user is able to put text into the label field on the provisioning page - it should be disabled
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/manage/forms.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import json
14
15 import wtforms
16
17 import warehouse.utils.otp as otp
18 import warehouse.utils.webauthn as webauthn
19
20 from warehouse import forms
21 from warehouse.accounts.forms import (
22 NewEmailMixin,
23 NewPasswordMixin,
24 PasswordMixin,
25 TOTPValueMixin,
26 WebAuthnCredentialMixin,
27 )
28
29
30 class RoleNameMixin:
31
32 role_name = wtforms.SelectField(
33 "Select role",
34 choices=[("Maintainer", "Maintainer"), ("Owner", "Owner")],
35 validators=[wtforms.validators.DataRequired(message="Select role")],
36 )
37
38
39 class UsernameMixin:
40
41 username = wtforms.StringField(
42 validators=[wtforms.validators.DataRequired(message="Specify username")]
43 )
44
45 def validate_username(self, field):
46 userid = self.user_service.find_userid(field.data)
47
48 if userid is None:
49 raise wtforms.validators.ValidationError(
50 "No user found with that username. Try again."
51 )
52
53
54 class CreateRoleForm(RoleNameMixin, UsernameMixin, forms.Form):
55 def __init__(self, *args, user_service, **kwargs):
56 super().__init__(*args, **kwargs)
57 self.user_service = user_service
58
59
60 class ChangeRoleForm(RoleNameMixin, forms.Form):
61 pass
62
63
64 class SaveAccountForm(forms.Form):
65
66 __params__ = ["name"]
67
68 name = wtforms.StringField()
69
70
71 class AddEmailForm(NewEmailMixin, forms.Form):
72
73 __params__ = ["email"]
74
75 def __init__(self, *args, user_service, user_id, **kwargs):
76 super().__init__(*args, **kwargs)
77 self.user_service = user_service
78 self.user_id = user_id
79
80
81 class ChangePasswordForm(PasswordMixin, NewPasswordMixin, forms.Form):
82
83 __params__ = ["password", "new_password", "password_confirm"]
84
85 def __init__(self, *args, user_service, **kwargs):
86 super().__init__(*args, **kwargs)
87 self.user_service = user_service
88
89
90 class DeleteTOTPForm(UsernameMixin, forms.Form):
91
92 __params__ = ["confirm_username"]
93
94 def __init__(self, *args, user_service, **kwargs):
95 super().__init__(*args, **kwargs)
96 self.user_service = user_service
97
98
99 class ProvisionTOTPForm(TOTPValueMixin, forms.Form):
100
101 __params__ = ["totp_value"]
102
103 def __init__(self, *args, totp_secret, **kwargs):
104 super().__init__(*args, **kwargs)
105 self.totp_secret = totp_secret
106
107 def validate_totp_value(self, field):
108 totp_value = field.data.encode("utf8")
109 if not otp.verify_totp(self.totp_secret, totp_value):
110 raise wtforms.validators.ValidationError("Invalid TOTP code. Try again?")
111
112
113 class DeleteWebAuthnForm(forms.Form):
114 __params__ = ["confirm_key_name"]
115
116 label = wtforms.StringField(
117 validators=[
118 wtforms.validators.DataRequired(message="Specify a label"),
119 wtforms.validators.Length(
120 max=64, message=("Label must be 64 characters or less")
121 ),
122 ]
123 )
124
125 def __init__(self, *args, user_service, user_id, **kwargs):
126 super().__init__(*args, **kwargs)
127 self.user_service = user_service
128 self.user_id = user_id
129
130 def validate_label(self, field):
131 label = field.data
132
133 webauthn = self.user_service.get_webauthn_by_label(self.user_id, label)
134 if webauthn is None:
135 raise wtforms.validators.ValidationError("No WebAuthn key with given label")
136 self.webauthn = webauthn
137
138
139 class ProvisionWebAuthnForm(WebAuthnCredentialMixin, forms.Form):
140 __params__ = ["label", "credential"]
141
142 label = wtforms.StringField(
143 validators=[
144 wtforms.validators.DataRequired(message="Specify a label"),
145 wtforms.validators.Length(
146 max=64, message=("Label must be 64 characters or less")
147 ),
148 ]
149 )
150
151 def __init__(
152 self, *args, user_service, user_id, challenge, rp_id, origin, **kwargs
153 ):
154 super().__init__(*args, **kwargs)
155 self.user_service = user_service
156 self.user_id = user_id
157 self.challenge = challenge
158 self.rp_id = rp_id
159 self.origin = origin
160
161 def validate_credential(self, field):
162 try:
163 credential_dict = json.loads(field.data.encode("utf8"))
164 except json.JSONDecodeError:
165 raise wtforms.validators.ValidationError(
166 "Invalid WebAuthn credential: Bad payload"
167 )
168
169 try:
170 validated_credential = self.user_service.verify_webauthn_credential(
171 credential_dict,
172 challenge=self.challenge,
173 rp_id=self.rp_id,
174 origin=self.origin,
175 )
176 except webauthn.RegistrationRejectedException as e:
177 raise wtforms.validators.ValidationError(str(e))
178
179 self.validated_credential = validated_credential
180
181 def validate_label(self, field):
182 label = field.data
183
184 if self.user_service.get_webauthn_by_label(self.user_id, label) is not None:
185 raise wtforms.validators.ValidationError(f"Label '{label}' already in use")
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/manage/forms.py b/warehouse/manage/forms.py
--- a/warehouse/manage/forms.py
+++ b/warehouse/manage/forms.py
@@ -111,11 +111,11 @@
class DeleteWebAuthnForm(forms.Form):
- __params__ = ["confirm_key_name"]
+ __params__ = ["confirm_device_name"]
label = wtforms.StringField(
validators=[
- wtforms.validators.DataRequired(message="Specify a label"),
+ wtforms.validators.DataRequired(message="Specify a device name"),
wtforms.validators.Length(
max=64, message=("Label must be 64 characters or less")
),
| {"golden_diff": "diff --git a/warehouse/manage/forms.py b/warehouse/manage/forms.py\n--- a/warehouse/manage/forms.py\n+++ b/warehouse/manage/forms.py\n@@ -111,11 +111,11 @@\n \n \n class DeleteWebAuthnForm(forms.Form):\n- __params__ = [\"confirm_key_name\"]\n+ __params__ = [\"confirm_device_name\"]\n \n label = wtforms.StringField(\n validators=[\n- wtforms.validators.DataRequired(message=\"Specify a label\"),\n+ wtforms.validators.DataRequired(message=\"Specify a device name\"),\n wtforms.validators.Length(\n max=64, message=(\"Label must be 64 characters or less\")\n ),\n", "issue": "Improve webauthn errors\nProblems:\r\n\r\n- We currently have two pieces of JS that control the display of webauthn errors - some in `index.js`, some in `webauthn.js`\r\n- The errors are not announced to the screenreader (via `role=alert`)\r\n- The errors are not associated with the webauthn label field (on the provisioning page) - we should use `aria-describedby` for this\r\n- The user is able to put text into the label field on the provisioning page - it should be disabled\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport wtforms\n\nimport warehouse.utils.otp as otp\nimport warehouse.utils.webauthn as webauthn\n\nfrom warehouse import forms\nfrom warehouse.accounts.forms import (\n NewEmailMixin,\n NewPasswordMixin,\n PasswordMixin,\n TOTPValueMixin,\n WebAuthnCredentialMixin,\n)\n\n\nclass RoleNameMixin:\n\n role_name = wtforms.SelectField(\n \"Select role\",\n choices=[(\"Maintainer\", \"Maintainer\"), (\"Owner\", \"Owner\")],\n validators=[wtforms.validators.DataRequired(message=\"Select role\")],\n )\n\n\nclass UsernameMixin:\n\n username = wtforms.StringField(\n validators=[wtforms.validators.DataRequired(message=\"Specify username\")]\n )\n\n def validate_username(self, field):\n userid = self.user_service.find_userid(field.data)\n\n if userid is None:\n raise wtforms.validators.ValidationError(\n \"No user found with that username. Try again.\"\n )\n\n\nclass CreateRoleForm(RoleNameMixin, UsernameMixin, forms.Form):\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\nclass ChangeRoleForm(RoleNameMixin, forms.Form):\n pass\n\n\nclass SaveAccountForm(forms.Form):\n\n __params__ = [\"name\"]\n\n name = wtforms.StringField()\n\n\nclass AddEmailForm(NewEmailMixin, forms.Form):\n\n __params__ = [\"email\"]\n\n def __init__(self, *args, user_service, user_id, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n self.user_id = user_id\n\n\nclass ChangePasswordForm(PasswordMixin, NewPasswordMixin, forms.Form):\n\n __params__ = [\"password\", \"new_password\", \"password_confirm\"]\n\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\nclass DeleteTOTPForm(UsernameMixin, forms.Form):\n\n __params__ = [\"confirm_username\"]\n\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\nclass ProvisionTOTPForm(TOTPValueMixin, forms.Form):\n\n __params__ = [\"totp_value\"]\n\n def __init__(self, *args, totp_secret, **kwargs):\n super().__init__(*args, **kwargs)\n self.totp_secret = totp_secret\n\n def validate_totp_value(self, field):\n totp_value = field.data.encode(\"utf8\")\n if not otp.verify_totp(self.totp_secret, totp_value):\n raise wtforms.validators.ValidationError(\"Invalid TOTP code. Try again?\")\n\n\nclass DeleteWebAuthnForm(forms.Form):\n __params__ = [\"confirm_key_name\"]\n\n label = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(message=\"Specify a label\"),\n wtforms.validators.Length(\n max=64, message=(\"Label must be 64 characters or less\")\n ),\n ]\n )\n\n def __init__(self, *args, user_service, user_id, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n self.user_id = user_id\n\n def validate_label(self, field):\n label = field.data\n\n webauthn = self.user_service.get_webauthn_by_label(self.user_id, label)\n if webauthn is None:\n raise wtforms.validators.ValidationError(\"No WebAuthn key with given label\")\n self.webauthn = webauthn\n\n\nclass ProvisionWebAuthnForm(WebAuthnCredentialMixin, forms.Form):\n __params__ = [\"label\", \"credential\"]\n\n label = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(message=\"Specify a label\"),\n wtforms.validators.Length(\n max=64, message=(\"Label must be 64 characters or less\")\n ),\n ]\n )\n\n def __init__(\n self, *args, user_service, user_id, challenge, rp_id, origin, **kwargs\n ):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n self.user_id = user_id\n self.challenge = challenge\n self.rp_id = rp_id\n self.origin = origin\n\n def validate_credential(self, field):\n try:\n credential_dict = json.loads(field.data.encode(\"utf8\"))\n except json.JSONDecodeError:\n raise wtforms.validators.ValidationError(\n \"Invalid WebAuthn credential: Bad payload\"\n )\n\n try:\n validated_credential = self.user_service.verify_webauthn_credential(\n credential_dict,\n challenge=self.challenge,\n rp_id=self.rp_id,\n origin=self.origin,\n )\n except webauthn.RegistrationRejectedException as e:\n raise wtforms.validators.ValidationError(str(e))\n\n self.validated_credential = validated_credential\n\n def validate_label(self, field):\n label = field.data\n\n if self.user_service.get_webauthn_by_label(self.user_id, label) is not None:\n raise wtforms.validators.ValidationError(f\"Label '{label}' already in use\")\n", "path": "warehouse/manage/forms.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport wtforms\n\nimport warehouse.utils.otp as otp\nimport warehouse.utils.webauthn as webauthn\n\nfrom warehouse import forms\nfrom warehouse.accounts.forms import (\n NewEmailMixin,\n NewPasswordMixin,\n PasswordMixin,\n TOTPValueMixin,\n WebAuthnCredentialMixin,\n)\n\n\nclass RoleNameMixin:\n\n role_name = wtforms.SelectField(\n \"Select role\",\n choices=[(\"Maintainer\", \"Maintainer\"), (\"Owner\", \"Owner\")],\n validators=[wtforms.validators.DataRequired(message=\"Select role\")],\n )\n\n\nclass UsernameMixin:\n\n username = wtforms.StringField(\n validators=[wtforms.validators.DataRequired(message=\"Specify username\")]\n )\n\n def validate_username(self, field):\n userid = self.user_service.find_userid(field.data)\n\n if userid is None:\n raise wtforms.validators.ValidationError(\n \"No user found with that username. Try again.\"\n )\n\n\nclass CreateRoleForm(RoleNameMixin, UsernameMixin, forms.Form):\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\nclass ChangeRoleForm(RoleNameMixin, forms.Form):\n pass\n\n\nclass SaveAccountForm(forms.Form):\n\n __params__ = [\"name\"]\n\n name = wtforms.StringField()\n\n\nclass AddEmailForm(NewEmailMixin, forms.Form):\n\n __params__ = [\"email\"]\n\n def __init__(self, *args, user_service, user_id, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n self.user_id = user_id\n\n\nclass ChangePasswordForm(PasswordMixin, NewPasswordMixin, forms.Form):\n\n __params__ = [\"password\", \"new_password\", \"password_confirm\"]\n\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\nclass DeleteTOTPForm(UsernameMixin, forms.Form):\n\n __params__ = [\"confirm_username\"]\n\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\nclass ProvisionTOTPForm(TOTPValueMixin, forms.Form):\n\n __params__ = [\"totp_value\"]\n\n def __init__(self, *args, totp_secret, **kwargs):\n super().__init__(*args, **kwargs)\n self.totp_secret = totp_secret\n\n def validate_totp_value(self, field):\n totp_value = field.data.encode(\"utf8\")\n if not otp.verify_totp(self.totp_secret, totp_value):\n raise wtforms.validators.ValidationError(\"Invalid TOTP code. Try again?\")\n\n\nclass DeleteWebAuthnForm(forms.Form):\n __params__ = [\"confirm_device_name\"]\n\n label = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(message=\"Specify a device name\"),\n wtforms.validators.Length(\n max=64, message=(\"Label must be 64 characters or less\")\n ),\n ]\n )\n\n def __init__(self, *args, user_service, user_id, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n self.user_id = user_id\n\n def validate_label(self, field):\n label = field.data\n\n webauthn = self.user_service.get_webauthn_by_label(self.user_id, label)\n if webauthn is None:\n raise wtforms.validators.ValidationError(\"No WebAuthn key with given label\")\n self.webauthn = webauthn\n\n\nclass ProvisionWebAuthnForm(WebAuthnCredentialMixin, forms.Form):\n __params__ = [\"label\", \"credential\"]\n\n label = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(message=\"Specify a label\"),\n wtforms.validators.Length(\n max=64, message=(\"Label must be 64 characters or less\")\n ),\n ]\n )\n\n def __init__(\n self, *args, user_service, user_id, challenge, rp_id, origin, **kwargs\n ):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n self.user_id = user_id\n self.challenge = challenge\n self.rp_id = rp_id\n self.origin = origin\n\n def validate_credential(self, field):\n try:\n credential_dict = json.loads(field.data.encode(\"utf8\"))\n except json.JSONDecodeError:\n raise wtforms.validators.ValidationError(\n \"Invalid WebAuthn credential: Bad payload\"\n )\n\n try:\n validated_credential = self.user_service.verify_webauthn_credential(\n credential_dict,\n challenge=self.challenge,\n rp_id=self.rp_id,\n origin=self.origin,\n )\n except webauthn.RegistrationRejectedException as e:\n raise wtforms.validators.ValidationError(str(e))\n\n self.validated_credential = validated_credential\n\n def validate_label(self, field):\n label = field.data\n\n if self.user_service.get_webauthn_by_label(self.user_id, label) is not None:\n raise wtforms.validators.ValidationError(f\"Label '{label}' already in use\")\n", "path": "warehouse/manage/forms.py"}]} | 2,082 | 145 |
gh_patches_debug_8487 | rasdani/github-patches | git_diff | pytorch__ignite-785 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tqdm_logger does not take epoch_length into account
## 🐛 `tqdm_logger` does not take `epoch_length` into account
When calling `Engine.run()` with a custom `epoch_length`,
the tqdm progress bar does not adapt and displays the full number of batches in the data.
Here is a minimal example:
```python
from ignite.contrib.handlers.tqdm_logger import ProgressBar
from ignite.engine import Engine
from torch import nn
from torch.utils.data import DataLoader
data = list(range(100))
model = nn.Identity()
engine = Engine(lambda engine, batch: model(batch))
ProgressBar(persist=True).attach(engine)
engine.run(data, epoch_length=50)
```
We have 100 items in `data` but the true end of the epoch is at 50 iterations, yet the progress is displayed over the range of 100 and just ends midway, when I expect it to be displayed over the range of 50, thus ending when the bar is full.
One can not overwrite tqdm's `total` argument by replacing
```python
ProgressBar(persist=True).attach(engine)
```
by
```python
ProgressBar(persist=True, total=50).attach(engine)
```
for it raises `TypeError: type object got multiple values for keyword argument 'total'`.
## Environment
- PyTorch Version : 1.4.0
- Ignite Version : 0.3.0
- OS : Ubuntu 19.04
- Ignite installation method : `pip`
- Python version: 3.7.3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/contrib/handlers/tqdm_logger.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import warnings
3
4 import torch
5
6 from ignite.engine import Events
7 from ignite.engine.engine import EventWithFilter
8 from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler
9
10
11 class ProgressBar(BaseLogger):
12 """
13 TQDM progress bar handler to log training progress and computed metrics.
14
15 Args:
16 persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)
17 bar_format (str, optional): Specify a custom bar string formatting. May impact performance.
18 [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].
19 Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where
20 l_bar='{desc}: {percentage:3.0f}%|' and
21 r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the
22 formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.
23 **tqdm_kwargs: kwargs passed to tqdm progress bar.
24 By default, progress bar description displays "Epoch [5/10]" where 5 is the current epoch and 10 is the
25 number of epochs. If tqdm_kwargs defines `desc`, e.g. "Predictions", than the description is
26 "Predictions [5/10]" if number of epochs is more than one otherwise it is simply "Predictions".
27
28 Examples:
29
30 Simple progress bar
31
32 .. code-block:: python
33
34 trainer = create_supervised_trainer(model, optimizer, loss)
35
36 pbar = ProgressBar()
37 pbar.attach(trainer)
38
39 # Progress bar will looks like
40 # Epoch [2/50]: [64/128] 50%|█████ [06:17<12:34]
41
42 Log output to a file instead of stderr (tqdm's default output)
43
44 .. code-block:: python
45
46 trainer = create_supervised_trainer(model, optimizer, loss)
47
48 log_file = open("output.log", "w")
49 pbar = ProgressBar(file=log_file)
50 pbar.attach(trainer)
51
52 Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`
53 (such as :class:`~ignite.metrics.RunningAverage`)
54
55 .. code-block:: python
56
57 trainer = create_supervised_trainer(model, optimizer, loss)
58
59 RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')
60
61 pbar = ProgressBar()
62 pbar.attach(trainer, ['loss'])
63
64 # Progress bar will looks like
65 # Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34]
66
67 Directly attach the engine's output
68
69 .. code-block:: python
70
71 trainer = create_supervised_trainer(model, optimizer, loss)
72
73 pbar = ProgressBar()
74 pbar.attach(trainer, output_transform=lambda x: {'loss': x})
75
76 # Progress bar will looks like
77 # Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34]
78
79 Note:
80 When adding attaching the progress bar to an engine, it is recommend that you replace
81 every print operation in the engine's handlers triggered every iteration with
82 ``pbar.log_message`` to guarantee the correct format of the stdout.
83
84 Note:
85 When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,
86 please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.
87 Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set
88 to an empty string value.
89
90 """
91
92 _events_order = [
93 Events.STARTED,
94 Events.EPOCH_STARTED,
95 Events.ITERATION_STARTED,
96 Events.ITERATION_COMPLETED,
97 Events.EPOCH_COMPLETED,
98 Events.COMPLETED
99 ]
100
101 def __init__(self, persist=False,
102 bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',
103 **tqdm_kwargs):
104
105 try:
106 from tqdm.autonotebook import tqdm
107 except ImportError:
108 raise RuntimeError("This contrib module requires tqdm to be installed. "
109 "Please install it with command: \n pip install tqdm")
110
111 self.pbar_cls = tqdm
112 self.pbar = None
113 self.persist = persist
114 self.bar_format = bar_format
115 self.tqdm_kwargs = tqdm_kwargs
116
117 def _reset(self, pbar_total):
118 self.pbar = self.pbar_cls(
119 total=pbar_total,
120 leave=self.persist,
121 bar_format=self.bar_format,
122 initial=1,
123 **self.tqdm_kwargs
124 )
125
126 def _close(self, engine):
127 if self.pbar:
128 self.pbar.close()
129 self.pbar = None
130
131 @staticmethod
132 def _compare_lt(event1, event2):
133 if isinstance(event1, EventWithFilter):
134 event1 = event1.event
135 if isinstance(event2, EventWithFilter):
136 event2 = event2.event
137 i1 = ProgressBar._events_order.index(event1)
138 i2 = ProgressBar._events_order.index(event2)
139 return i1 < i2
140
141 def log_message(self, message):
142 """
143 Logs a message, preserving the progress bar correct output format.
144
145 Args:
146 message (str): string you wish to log.
147 """
148 from tqdm import tqdm
149
150 tqdm.write(message, file=self.tqdm_kwargs.get("file", None))
151
152 def attach(self, engine, metric_names=None, output_transform=None,
153 event_name=Events.ITERATION_COMPLETED,
154 closing_event_name=Events.EPOCH_COMPLETED):
155 """
156 Attaches the progress bar to an engine object.
157
158 Args:
159 engine (Engine): engine object.
160 metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
161 metrics.
162 output_transform (callable, optional): a function to select what you want to print from the engine's
163 output. This function may return either a dictionary with entries in the format of ``{name: value}``,
164 or a single scalar, which will be displayed with the default name `output`.
165 event_name: event's name on which the progress bar advances. Valid events are from
166 :class:`~ignite.engine.Events`.
167 closing_event_name: event's name on which the progress bar is closed. Valid events are from
168 :class:`~ignite.engine.Events`.
169
170 Note: accepted output value types are numbers, 0d and 1d torch tensors and strings
171
172 """
173 desc = self.tqdm_kwargs.get("desc", "Epoch")
174
175 if not isinstance(event_name, (Events, EventWithFilter)):
176 raise ValueError("Logging event should be only `ignite.engine.Events`")
177
178 if isinstance(closing_event_name, EventWithFilter):
179 raise ValueError("Closing event should not use any event filter")
180
181 if not self._compare_lt(event_name, closing_event_name):
182 raise ValueError("Logging event {} should be called before closing event {}"
183 .format(event_name, closing_event_name))
184
185 log_handler = _OutputHandler(desc, metric_names, output_transform,
186 closing_event_name=closing_event_name)
187 # if event_name is EventWithFilter, filter is passed here
188 super(ProgressBar, self).attach(engine, log_handler, event_name)
189 engine.add_event_handler(closing_event_name, self._close)
190
191
192 class _OutputHandler(BaseOutputHandler):
193 """Helper handler to log engine's output and/or metrics
194
195 Args:
196 description (str): progress bar description.
197 metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
198 metrics.
199 output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
200 For example, `output_transform = lambda output: output`
201 This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot
202 with corresponding keys.
203 closing_event_name: event's name on which the progress bar is closed. Valid events are from
204 :class:`~ignite.engine.Events` or any `event_name` added by
205 :meth:`~ignite.engine.Engine.register_events`.
206
207 """
208
209 def __init__(self, description, metric_names=None, output_transform=None,
210 closing_event_name=Events.EPOCH_COMPLETED):
211 if metric_names is None and output_transform is None:
212 # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler
213 metric_names = []
214 super(_OutputHandler, self).__init__(description, metric_names, output_transform,
215 another_engine=None, global_step_transform=None)
216 self.closing_event_name = closing_event_name
217
218 @staticmethod
219 def get_max_number_events(event_name, engine):
220 if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):
221 return len(engine.state.dataloader)
222 if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):
223 return engine.state.max_epochs
224 return 1
225
226 def __call__(self, engine, logger, event_name):
227
228 pbar_total = self.get_max_number_events(event_name, engine)
229 if logger.pbar is None:
230 logger._reset(pbar_total=pbar_total)
231
232 desc = self.tag
233 max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)
234 if max_num_of_closing_events > 1:
235 global_step = engine.state.get_event_attrib_value(self.closing_event_name)
236 desc += " [{}/{}]".format(global_step, max_num_of_closing_events)
237 logger.pbar.set_description(desc)
238
239 metrics = self._setup_output_metrics(engine)
240
241 rendered_metrics = {}
242 for key, value in metrics.items():
243 if isinstance(value, torch.Tensor):
244 if value.ndimension() == 0:
245 rendered_metrics[key] = value.item()
246 elif value.ndimension() == 1:
247 for i, v in enumerate(value):
248 k = "{}_{}".format(key, i)
249 rendered_metrics[k] = v.item()
250 else:
251 warnings.warn("ProgressBar can not log "
252 "tensor with {} dimensions".format(value.ndimension()))
253 else:
254 rendered_metrics[key] = value
255
256 if rendered_metrics:
257 logger.pbar.set_postfix(**rendered_metrics)
258
259 global_step = engine.state.get_event_attrib_value(event_name)
260 global_step = (global_step - 1) % pbar_total + 1
261 logger.pbar.update(global_step - logger.pbar.n)
262
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py
--- a/ignite/contrib/handlers/tqdm_logger.py
+++ b/ignite/contrib/handlers/tqdm_logger.py
@@ -218,7 +218,7 @@
@staticmethod
def get_max_number_events(event_name, engine):
if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):
- return len(engine.state.dataloader)
+ return engine.state.epoch_length
if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):
return engine.state.max_epochs
return 1
| {"golden_diff": "diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py\n--- a/ignite/contrib/handlers/tqdm_logger.py\n+++ b/ignite/contrib/handlers/tqdm_logger.py\n@@ -218,7 +218,7 @@\n @staticmethod\n def get_max_number_events(event_name, engine):\n if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):\n- return len(engine.state.dataloader)\n+ return engine.state.epoch_length\n if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):\n return engine.state.max_epochs\n return 1\n", "issue": "tqdm_logger does not take epoch_length into account\n## \ud83d\udc1b `tqdm_logger` does not take `epoch_length` into account\r\n\r\nWhen calling `Engine.run()` with a custom `epoch_length`,\r\nthe tqdm progress bar does not adapt and displays the full number of batches in the data.\r\nHere is a minimal example:\r\n```python\r\nfrom ignite.contrib.handlers.tqdm_logger import ProgressBar\r\nfrom ignite.engine import Engine\r\nfrom torch import nn\r\nfrom torch.utils.data import DataLoader\r\n\r\ndata = list(range(100))\r\nmodel = nn.Identity()\r\nengine = Engine(lambda engine, batch: model(batch))\r\n\r\nProgressBar(persist=True).attach(engine)\r\nengine.run(data, epoch_length=50)\r\n```\r\nWe have 100 items in `data` but the true end of the epoch is at 50 iterations, yet the progress is displayed over the range of 100 and just ends midway, when I expect it to be displayed over the range of 50, thus ending when the bar is full.\r\nOne can not overwrite tqdm's `total` argument by replacing\r\n```python\r\nProgressBar(persist=True).attach(engine)\r\n```\r\nby\r\n```python\r\nProgressBar(persist=True, total=50).attach(engine)\r\n```\r\nfor it raises `TypeError: type object got multiple values for keyword argument 'total'`.\r\n\r\n## Environment\r\n - PyTorch Version : 1.4.0 \r\n - Ignite Version : 0.3.0\r\n - OS : Ubuntu 19.04\r\n - Ignite installation method : `pip`\r\n - Python version: 3.7.3\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\nfrom ignite.engine.engine import EventWithFilter\nfrom ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler\n\n\nclass ProgressBar(BaseLogger):\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Args:\n persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)\n bar_format (str, optional): Specify a custom bar string formatting. May impact performance.\n [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].\n Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the\n formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.\n **tqdm_kwargs: kwargs passed to tqdm progress bar.\n By default, progress bar description displays \"Epoch [5/10]\" where 5 is the current epoch and 10 is the\n number of epochs. If tqdm_kwargs defines `desc`, e.g. \"Predictions\", than the description is\n \"Predictions [5/10]\" if number of epochs is more than one otherwise it is simply \"Predictions\".\n\n Examples:\n\n Simple progress bar\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer)\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 [06:17<12:34]\n\n Log output to a file instead of stderr (tqdm's default output)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n log_file = open(\"output.log\", \"w\")\n pbar = ProgressBar(file=log_file)\n pbar.attach(trainer)\n\n Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`\n (such as :class:`~ignite.metrics.RunningAverage`)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Directly attach the engine's output\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer, output_transform=lambda x: {'loss': x})\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n\n Note:\n When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,\n please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.\n Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set\n to an empty string value.\n\n \"\"\"\n\n _events_order = [\n Events.STARTED,\n Events.EPOCH_STARTED,\n Events.ITERATION_STARTED,\n Events.ITERATION_COMPLETED,\n Events.EPOCH_COMPLETED,\n Events.COMPLETED\n ]\n\n def __init__(self, persist=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',\n **tqdm_kwargs):\n\n try:\n from tqdm.autonotebook import tqdm\n except ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed. \"\n \"Please install it with command: \\n pip install tqdm\")\n\n self.pbar_cls = tqdm\n self.pbar = None\n self.persist = persist\n self.bar_format = bar_format\n self.tqdm_kwargs = tqdm_kwargs\n\n def _reset(self, pbar_total):\n self.pbar = self.pbar_cls(\n total=pbar_total,\n leave=self.persist,\n bar_format=self.bar_format,\n initial=1,\n **self.tqdm_kwargs\n )\n\n def _close(self, engine):\n if self.pbar:\n self.pbar.close()\n self.pbar = None\n\n @staticmethod\n def _compare_lt(event1, event2):\n if isinstance(event1, EventWithFilter):\n event1 = event1.event\n if isinstance(event2, EventWithFilter):\n event2 = event2.event\n i1 = ProgressBar._events_order.index(event1)\n i2 = ProgressBar._events_order.index(event2)\n return i1 < i2\n\n def log_message(self, message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format.\n\n Args:\n message (str): string you wish to log.\n \"\"\"\n from tqdm import tqdm\n\n tqdm.write(message, file=self.tqdm_kwargs.get(\"file\", None))\n\n def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n \"\"\"\n Attaches the progress bar to an engine object.\n\n Args:\n engine (Engine): engine object.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): a function to select what you want to print from the engine's\n output. This function may return either a dictionary with entries in the format of ``{name: value}``,\n or a single scalar, which will be displayed with the default name `output`.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events`.\n\n Note: accepted output value types are numbers, 0d and 1d torch tensors and strings\n\n \"\"\"\n desc = self.tqdm_kwargs.get(\"desc\", \"Epoch\")\n\n if not isinstance(event_name, (Events, EventWithFilter)):\n raise ValueError(\"Logging event should be only `ignite.engine.Events`\")\n\n if isinstance(closing_event_name, EventWithFilter):\n raise ValueError(\"Closing event should not use any event filter\")\n\n if not self._compare_lt(event_name, closing_event_name):\n raise ValueError(\"Logging event {} should be called before closing event {}\"\n .format(event_name, closing_event_name))\n\n log_handler = _OutputHandler(desc, metric_names, output_transform,\n closing_event_name=closing_event_name)\n # if event_name is EventWithFilter, filter is passed here\n super(ProgressBar, self).attach(engine, log_handler, event_name)\n engine.add_event_handler(closing_event_name, self._close)\n\n\nclass _OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output and/or metrics\n\n Args:\n description (str): progress bar description.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot\n with corresponding keys.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n\n \"\"\"\n\n def __init__(self, description, metric_names=None, output_transform=None,\n closing_event_name=Events.EPOCH_COMPLETED):\n if metric_names is None and output_transform is None:\n # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler\n metric_names = []\n super(_OutputHandler, self).__init__(description, metric_names, output_transform,\n another_engine=None, global_step_transform=None)\n self.closing_event_name = closing_event_name\n\n @staticmethod\n def get_max_number_events(event_name, engine):\n if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):\n return len(engine.state.dataloader)\n if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):\n return engine.state.max_epochs\n return 1\n\n def __call__(self, engine, logger, event_name):\n\n pbar_total = self.get_max_number_events(event_name, engine)\n if logger.pbar is None:\n logger._reset(pbar_total=pbar_total)\n\n desc = self.tag\n max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)\n if max_num_of_closing_events > 1:\n global_step = engine.state.get_event_attrib_value(self.closing_event_name)\n desc += \" [{}/{}]\".format(global_step, max_num_of_closing_events)\n logger.pbar.set_description(desc)\n\n metrics = self._setup_output_metrics(engine)\n\n rendered_metrics = {}\n for key, value in metrics.items():\n if isinstance(value, torch.Tensor):\n if value.ndimension() == 0:\n rendered_metrics[key] = value.item()\n elif value.ndimension() == 1:\n for i, v in enumerate(value):\n k = \"{}_{}\".format(key, i)\n rendered_metrics[k] = v.item()\n else:\n warnings.warn(\"ProgressBar can not log \"\n \"tensor with {} dimensions\".format(value.ndimension()))\n else:\n rendered_metrics[key] = value\n\n if rendered_metrics:\n logger.pbar.set_postfix(**rendered_metrics)\n\n global_step = engine.state.get_event_attrib_value(event_name)\n global_step = (global_step - 1) % pbar_total + 1\n logger.pbar.update(global_step - logger.pbar.n)\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\nfrom ignite.engine.engine import EventWithFilter\nfrom ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler\n\n\nclass ProgressBar(BaseLogger):\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Args:\n persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)\n bar_format (str, optional): Specify a custom bar string formatting. May impact performance.\n [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].\n Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the\n formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.\n **tqdm_kwargs: kwargs passed to tqdm progress bar.\n By default, progress bar description displays \"Epoch [5/10]\" where 5 is the current epoch and 10 is the\n number of epochs. If tqdm_kwargs defines `desc`, e.g. \"Predictions\", than the description is\n \"Predictions [5/10]\" if number of epochs is more than one otherwise it is simply \"Predictions\".\n\n Examples:\n\n Simple progress bar\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer)\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 [06:17<12:34]\n\n Log output to a file instead of stderr (tqdm's default output)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n log_file = open(\"output.log\", \"w\")\n pbar = ProgressBar(file=log_file)\n pbar.attach(trainer)\n\n Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`\n (such as :class:`~ignite.metrics.RunningAverage`)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Directly attach the engine's output\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer, output_transform=lambda x: {'loss': x})\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n\n Note:\n When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,\n please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.\n Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set\n to an empty string value.\n\n \"\"\"\n\n _events_order = [\n Events.STARTED,\n Events.EPOCH_STARTED,\n Events.ITERATION_STARTED,\n Events.ITERATION_COMPLETED,\n Events.EPOCH_COMPLETED,\n Events.COMPLETED\n ]\n\n def __init__(self, persist=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',\n **tqdm_kwargs):\n\n try:\n from tqdm.autonotebook import tqdm\n except ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed. \"\n \"Please install it with command: \\n pip install tqdm\")\n\n self.pbar_cls = tqdm\n self.pbar = None\n self.persist = persist\n self.bar_format = bar_format\n self.tqdm_kwargs = tqdm_kwargs\n\n def _reset(self, pbar_total):\n self.pbar = self.pbar_cls(\n total=pbar_total,\n leave=self.persist,\n bar_format=self.bar_format,\n initial=1,\n **self.tqdm_kwargs\n )\n\n def _close(self, engine):\n if self.pbar:\n self.pbar.close()\n self.pbar = None\n\n @staticmethod\n def _compare_lt(event1, event2):\n if isinstance(event1, EventWithFilter):\n event1 = event1.event\n if isinstance(event2, EventWithFilter):\n event2 = event2.event\n i1 = ProgressBar._events_order.index(event1)\n i2 = ProgressBar._events_order.index(event2)\n return i1 < i2\n\n def log_message(self, message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format.\n\n Args:\n message (str): string you wish to log.\n \"\"\"\n from tqdm import tqdm\n\n tqdm.write(message, file=self.tqdm_kwargs.get(\"file\", None))\n\n def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n \"\"\"\n Attaches the progress bar to an engine object.\n\n Args:\n engine (Engine): engine object.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): a function to select what you want to print from the engine's\n output. This function may return either a dictionary with entries in the format of ``{name: value}``,\n or a single scalar, which will be displayed with the default name `output`.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events`.\n\n Note: accepted output value types are numbers, 0d and 1d torch tensors and strings\n\n \"\"\"\n desc = self.tqdm_kwargs.get(\"desc\", \"Epoch\")\n\n if not isinstance(event_name, (Events, EventWithFilter)):\n raise ValueError(\"Logging event should be only `ignite.engine.Events`\")\n\n if isinstance(closing_event_name, EventWithFilter):\n raise ValueError(\"Closing event should not use any event filter\")\n\n if not self._compare_lt(event_name, closing_event_name):\n raise ValueError(\"Logging event {} should be called before closing event {}\"\n .format(event_name, closing_event_name))\n\n log_handler = _OutputHandler(desc, metric_names, output_transform,\n closing_event_name=closing_event_name)\n # if event_name is EventWithFilter, filter is passed here\n super(ProgressBar, self).attach(engine, log_handler, event_name)\n engine.add_event_handler(closing_event_name, self._close)\n\n\nclass _OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output and/or metrics\n\n Args:\n description (str): progress bar description.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot\n with corresponding keys.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n\n \"\"\"\n\n def __init__(self, description, metric_names=None, output_transform=None,\n closing_event_name=Events.EPOCH_COMPLETED):\n if metric_names is None and output_transform is None:\n # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler\n metric_names = []\n super(_OutputHandler, self).__init__(description, metric_names, output_transform,\n another_engine=None, global_step_transform=None)\n self.closing_event_name = closing_event_name\n\n @staticmethod\n def get_max_number_events(event_name, engine):\n if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):\n return engine.state.epoch_length\n if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):\n return engine.state.max_epochs\n return 1\n\n def __call__(self, engine, logger, event_name):\n\n pbar_total = self.get_max_number_events(event_name, engine)\n if logger.pbar is None:\n logger._reset(pbar_total=pbar_total)\n\n desc = self.tag\n max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)\n if max_num_of_closing_events > 1:\n global_step = engine.state.get_event_attrib_value(self.closing_event_name)\n desc += \" [{}/{}]\".format(global_step, max_num_of_closing_events)\n logger.pbar.set_description(desc)\n\n metrics = self._setup_output_metrics(engine)\n\n rendered_metrics = {}\n for key, value in metrics.items():\n if isinstance(value, torch.Tensor):\n if value.ndimension() == 0:\n rendered_metrics[key] = value.item()\n elif value.ndimension() == 1:\n for i, v in enumerate(value):\n k = \"{}_{}\".format(key, i)\n rendered_metrics[k] = v.item()\n else:\n warnings.warn(\"ProgressBar can not log \"\n \"tensor with {} dimensions\".format(value.ndimension()))\n else:\n rendered_metrics[key] = value\n\n if rendered_metrics:\n logger.pbar.set_postfix(**rendered_metrics)\n\n global_step = engine.state.get_event_attrib_value(event_name)\n global_step = (global_step - 1) % pbar_total + 1\n logger.pbar.update(global_step - logger.pbar.n)\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}]} | 3,730 | 152 |
gh_patches_debug_3191 | rasdani/github-patches | git_diff | weecology__retriever-663 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stop bad scripts from causing errors
Currently when `compile_json` gets run if something goes wrong the retriever errors out even on commands not running the script (e.g., `retriever ls`). What it should do is ignore the bad script and possibly report back that there is an issue with the script but keep running normally otherwise.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/compile.py`
Content:
```
1 from builtins import str
2 import json
3 import sys
4 if sys.version_info[0] < 3:
5 from codecs import open
6
7 script_templates = {
8 "default": """#retriever
9 from retriever.lib.templates import BasicTextTemplate
10 from retriever.lib.models import Table, Cleanup, correct_invalid_value
11
12 SCRIPT = BasicTextTemplate(%s)""",
13
14 "html_table": """#retriever
15 from retriever.lib.templates import HtmlTableTemplate
16 from retriever.lib.models import Table, Cleanup, correct_invalid_value
17
18 SCRIPT = HtmlTableTemplate(%s)""",
19 }
20
21
22 def compile_script(script_file):
23 definition = open(script_file + ".script", 'r')
24
25 values = {}
26 urls = {}
27 tables = {}
28 last_table = ""
29 replace = []
30 keys_to_ignore = ["template"]
31
32 for line in [line.strip() for line in definition]:
33 if line and ':' in line and not line[0] == '#':
34 split_line = [a.strip() for a in line.split(":")]
35 key = split_line[0].lower()
36 value = ':'.join(split_line[1:])
37 if key == "table":
38 table_name = value.split(',')[0].strip()
39 last_table = table_name
40 table_url = ','.join(value.split(',')[1:]).strip()
41 urls[table_name] = table_url
42 if replace:
43 try:
44 tables[last_table]
45 except:
46 tables[table_name] = {'replace_columns': str(replace)}
47 elif key == "*nulls":
48 if last_table:
49 nulls = [eval(v) for v in [v.strip()
50 for v in value.split(',')]]
51 try:
52 tables[last_table]
53 except KeyError:
54 if replace:
55 tables[last_table] = {'replace_columns': str(replace)}
56 else:
57 tables[last_table] = {}
58 tables[last_table]['cleanup'] = "Cleanup(correct_invalid_value, nulls=" + str(nulls) + ")"
59 elif key == "replace":
60 replace = [(v.split(',')[0].strip(), v.split(',')[1].strip())
61 for v in [v.strip() for v in value.split(';')]]
62 elif key == "tags":
63 values["tags"] = [v.strip() for v in value.split(',')]
64 elif key == "*ct_names":
65 tables[last_table]["ct_names"] = [v.strip()
66 for v in value.split(',')]
67 elif key == "*column":
68 if last_table:
69 vs = [v.strip() for v in value.split(',')]
70 column = [(vs[0], (vs[1], vs[2]) if len(vs) > 2 else (vs[1],))]
71 try:
72 tables[last_table]
73 except KeyError:
74 tables[last_table] = {}
75
76 try:
77 tables[last_table]['columns'] += column
78 except KeyError:
79 tables[last_table]['columns'] = column
80 elif key[0] == "*":
81 # attribute that should be applied to the most recently
82 # declared table
83 if key[0] == "*":
84 key = key[1:]
85 if last_table:
86 try:
87 tables[last_table]
88 except KeyError:
89 tables[last_table] = {}
90
91 try:
92 e = eval(value)
93 except:
94 e = str(value)
95
96 tables[last_table][key] = "'" + str(e) + "'"
97 else:
98 # general script attributes
99 values[key] = '"' + str(value) + '"'
100
101 if 'shortname' not in list(values.keys()):
102 try:
103 values['shortname'] = values['name']
104 except:
105 pass
106 values['urls'] = str(urls)
107
108 def get_value(key):
109 try:
110 return values[key]
111 except KeyError:
112 return ""
113
114 table_desc = "{"
115 for (key, value) in list(tables.items()):
116 table_desc += "'" + key + "': Table('" + key + "', "
117 table_desc += ','.join([key + "=" + str(value)
118 for key, value, in list(value.items())])
119 table_desc += "),"
120 if table_desc != '{':
121 table_desc = table_desc[:-1]
122 table_desc += "}"
123
124 values['tables'] = table_desc
125
126 script_desc = []
127 for key, value in list(values.items()):
128 if key == "url":
129 key = "ref"
130 if key not in keys_to_ignore:
131 script_desc.append(key + "=" + str(value))
132 script_desc = (',\n' + ' ' * 27).join(script_desc)
133
134 if 'template' in list(values.keys()):
135 template = values["template"]
136 else:
137 template = "default"
138 script_contents = (script_templates[template] % script_desc)
139
140 new_script = open(script_file + '.py', 'w')
141 new_script.write(script_contents)
142 new_script.close()
143
144 definition.close()
145
146
147 def add_dialect(table_dict, table):
148 """
149 Reads dialect key of JSON script and extracts key-value pairs to store them
150 in python script
151
152 Contains properties such 'nulls', delimiter', etc
153 """
154 for (key, val) in table['dialect'].items():
155 # dialect related key-value pairs
156 # copied as is
157 if key == "nulls":
158 table_dict[
159 'cleanup'] = "Cleanup(correct_invalid_value, nulls=" + str(val) + ")"
160
161 elif key == "delimiter":
162 table_dict[key] = "'" + str(val) + "'"
163 else:
164 table_dict[key] = val
165
166
167 def add_schema(table_dict, table):
168 """
169 Reads schema key of JSON script and extracts values to store them in
170 python script
171
172 Contains properties related to table schema, such as 'fields' and cross-tab
173 column name ('ct_column').
174 """
175 for (key, val) in table['schema'].items():
176 # schema related key-value pairs
177
178 if key == "fields":
179 # fields = columns of the table
180
181 # list of column tuples
182 column_list = []
183 for obj in val:
184 # fields is a collection of JSON objects
185 # (similar to a list of dicts in python)
186
187 if "size" in obj:
188 column_list.append((obj["name"],
189 (obj["type"], obj["size"])))
190 else:
191 column_list.append((obj["name"],
192 (obj["type"],)))
193
194 table_dict["columns"] = column_list
195
196 elif key == "ct_column":
197 table_dict[key] = "'" + val + "'"
198
199 else:
200 table_dict[key] = val
201
202
203 def compile_json(json_file):
204 """
205 Function to compile JSON script files to python scripts
206 The scripts are created with `retriever create_json <script_name` using
207 command line
208 """
209 json_object = json.load(open(json_file + ".json", "r"))
210
211 if "retriever" not in json_object.keys():
212 # Compile only files that have retriever key
213 return
214
215 values = {}
216 values['urls'] = {}
217
218 keys_to_ignore = ["template"]
219
220 for (key, value) in json_object.items():
221
222 if key == "title":
223 values["name"] = "\"" + str(value) + "\""
224
225 elif key == "name":
226 values["shortname"] = "\"" + str(value) + "\""
227
228 elif key == "description":
229 values["description"] = "\"" + str(value) + "\""
230
231 elif key == "homepage":
232 values["ref"] = "\"" + str(value) + "\""
233
234 elif key == "citation":
235 values["citation"] = "\"" + str(value) + "\""
236
237 elif key == "keywords":
238 values["tags"] = value
239
240 elif key == "retriever_minimum_version":
241 values["retriever_minimum_version"] = "\"" + value + "\""
242
243 elif key == "resources":
244 # Array of table objects
245 tables = {}
246 for table in value:
247 # Maintain a dict for table keys and values
248 table_dict = {}
249
250 try:
251 values['urls'][table['name']] = table['url']
252 except Exception as e:
253 print(e, "\nError in reading table: " + table)
254 continue
255
256 if table["schema"] == {} and table["dialect"] == {}:
257 continue
258
259 for (t_key, t_val) in table.items():
260
261 if t_key == "dialect":
262 add_dialect(table_dict, table)
263
264 elif t_key == "schema":
265 add_schema(table_dict, table)
266
267 tables[table["name"]] = table_dict
268
269 else:
270 values[key] = value
271
272 # Create a Table object string using the tables dict
273 table_desc = "{"
274 for (key, value) in tables.items():
275 table_desc += "'" + key + "': Table('" + key + "', "
276 table_desc += ','.join([key + "=" + str(value)
277 for key, value, in value.items()])
278 table_desc += "),"
279 if table_desc != '{':
280 table_desc = table_desc[:-1]
281 table_desc += "}"
282
283 values["tables"] = table_desc
284
285 script_desc = []
286 for key, value in values.items():
287 if key not in keys_to_ignore:
288 script_desc.append(key + "=" + str(value))
289 script_desc = (',\n' + ' ' * 27).join(script_desc)
290
291 if 'template' in values.keys():
292 template = values["template"]
293 else:
294 template = "default"
295 script_contents = (script_templates[template] % script_desc)
296
297 new_script = open(json_file + '.py', 'w', encoding='utf-8')
298 new_script.write('# -*- coding: latin-1 -*-\n')
299 new_script.write(script_contents)
300 new_script.close()
301
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/compile.py b/lib/compile.py
--- a/lib/compile.py
+++ b/lib/compile.py
@@ -238,7 +238,7 @@
values["tags"] = value
elif key == "retriever_minimum_version":
- values["retriever_minimum_version"] = "\"" + value + "\""
+ values["retriever_minimum_version"] = "\"" + str(value) + "\""
elif key == "resources":
# Array of table objects
| {"golden_diff": "diff --git a/lib/compile.py b/lib/compile.py\n--- a/lib/compile.py\n+++ b/lib/compile.py\n@@ -238,7 +238,7 @@\n values[\"tags\"] = value\n \n elif key == \"retriever_minimum_version\":\n- values[\"retriever_minimum_version\"] = \"\\\"\" + value + \"\\\"\"\n+ values[\"retriever_minimum_version\"] = \"\\\"\" + str(value) + \"\\\"\"\n \n elif key == \"resources\":\n # Array of table objects\n", "issue": "Stop bad scripts from causing errors\nCurrently when `compile_json` gets run if something goes wrong the retriever errors out even on commands not running the script (e.g., `retriever ls`). What it should do is ignore the bad script and possibly report back that there is an issue with the script but keep running normally otherwise.\n\n", "before_files": [{"content": "from builtins import str\nimport json\nimport sys\nif sys.version_info[0] < 3:\n from codecs import open\n\nscript_templates = {\n \"default\": \"\"\"#retriever\nfrom retriever.lib.templates import BasicTextTemplate\nfrom retriever.lib.models import Table, Cleanup, correct_invalid_value\n\nSCRIPT = BasicTextTemplate(%s)\"\"\",\n\n \"html_table\": \"\"\"#retriever\nfrom retriever.lib.templates import HtmlTableTemplate\nfrom retriever.lib.models import Table, Cleanup, correct_invalid_value\n\nSCRIPT = HtmlTableTemplate(%s)\"\"\",\n}\n\n\ndef compile_script(script_file):\n definition = open(script_file + \".script\", 'r')\n\n values = {}\n urls = {}\n tables = {}\n last_table = \"\"\n replace = []\n keys_to_ignore = [\"template\"]\n\n for line in [line.strip() for line in definition]:\n if line and ':' in line and not line[0] == '#':\n split_line = [a.strip() for a in line.split(\":\")]\n key = split_line[0].lower()\n value = ':'.join(split_line[1:])\n if key == \"table\":\n table_name = value.split(',')[0].strip()\n last_table = table_name\n table_url = ','.join(value.split(',')[1:]).strip()\n urls[table_name] = table_url\n if replace:\n try:\n tables[last_table]\n except:\n tables[table_name] = {'replace_columns': str(replace)}\n elif key == \"*nulls\":\n if last_table:\n nulls = [eval(v) for v in [v.strip()\n for v in value.split(',')]]\n try:\n tables[last_table]\n except KeyError:\n if replace:\n tables[last_table] = {'replace_columns': str(replace)}\n else:\n tables[last_table] = {}\n tables[last_table]['cleanup'] = \"Cleanup(correct_invalid_value, nulls=\" + str(nulls) + \")\"\n elif key == \"replace\":\n replace = [(v.split(',')[0].strip(), v.split(',')[1].strip())\n for v in [v.strip() for v in value.split(';')]]\n elif key == \"tags\":\n values[\"tags\"] = [v.strip() for v in value.split(',')]\n elif key == \"*ct_names\":\n tables[last_table][\"ct_names\"] = [v.strip()\n for v in value.split(',')]\n elif key == \"*column\":\n if last_table:\n vs = [v.strip() for v in value.split(',')]\n column = [(vs[0], (vs[1], vs[2]) if len(vs) > 2 else (vs[1],))]\n try:\n tables[last_table]\n except KeyError:\n tables[last_table] = {}\n\n try:\n tables[last_table]['columns'] += column\n except KeyError:\n tables[last_table]['columns'] = column\n elif key[0] == \"*\":\n # attribute that should be applied to the most recently\n # declared table\n if key[0] == \"*\":\n key = key[1:]\n if last_table:\n try:\n tables[last_table]\n except KeyError:\n tables[last_table] = {}\n\n try:\n e = eval(value)\n except:\n e = str(value)\n\n tables[last_table][key] = \"'\" + str(e) + \"'\"\n else:\n # general script attributes\n values[key] = '\"' + str(value) + '\"'\n\n if 'shortname' not in list(values.keys()):\n try:\n values['shortname'] = values['name']\n except:\n pass\n values['urls'] = str(urls)\n\n def get_value(key):\n try:\n return values[key]\n except KeyError:\n return \"\"\n\n table_desc = \"{\"\n for (key, value) in list(tables.items()):\n table_desc += \"'\" + key + \"': Table('\" + key + \"', \"\n table_desc += ','.join([key + \"=\" + str(value)\n for key, value, in list(value.items())])\n table_desc += \"),\"\n if table_desc != '{':\n table_desc = table_desc[:-1]\n table_desc += \"}\"\n\n values['tables'] = table_desc\n\n script_desc = []\n for key, value in list(values.items()):\n if key == \"url\":\n key = \"ref\"\n if key not in keys_to_ignore:\n script_desc.append(key + \"=\" + str(value))\n script_desc = (',\\n' + ' ' * 27).join(script_desc)\n\n if 'template' in list(values.keys()):\n template = values[\"template\"]\n else:\n template = \"default\"\n script_contents = (script_templates[template] % script_desc)\n\n new_script = open(script_file + '.py', 'w')\n new_script.write(script_contents)\n new_script.close()\n\n definition.close()\n\n\ndef add_dialect(table_dict, table):\n \"\"\"\n Reads dialect key of JSON script and extracts key-value pairs to store them\n in python script\n\n Contains properties such 'nulls', delimiter', etc\n \"\"\"\n for (key, val) in table['dialect'].items():\n # dialect related key-value pairs\n # copied as is\n if key == \"nulls\":\n table_dict[\n 'cleanup'] = \"Cleanup(correct_invalid_value, nulls=\" + str(val) + \")\"\n\n elif key == \"delimiter\":\n table_dict[key] = \"'\" + str(val) + \"'\"\n else:\n table_dict[key] = val\n\n\ndef add_schema(table_dict, table):\n \"\"\"\n Reads schema key of JSON script and extracts values to store them in\n python script\n\n Contains properties related to table schema, such as 'fields' and cross-tab\n column name ('ct_column').\n \"\"\"\n for (key, val) in table['schema'].items():\n # schema related key-value pairs\n\n if key == \"fields\":\n # fields = columns of the table\n\n # list of column tuples\n column_list = []\n for obj in val:\n # fields is a collection of JSON objects\n # (similar to a list of dicts in python)\n\n if \"size\" in obj:\n column_list.append((obj[\"name\"],\n (obj[\"type\"], obj[\"size\"])))\n else:\n column_list.append((obj[\"name\"],\n (obj[\"type\"],)))\n\n table_dict[\"columns\"] = column_list\n\n elif key == \"ct_column\":\n table_dict[key] = \"'\" + val + \"'\"\n\n else:\n table_dict[key] = val\n\n\ndef compile_json(json_file):\n \"\"\"\n Function to compile JSON script files to python scripts\n The scripts are created with `retriever create_json <script_name` using\n command line\n \"\"\"\n json_object = json.load(open(json_file + \".json\", \"r\"))\n\n if \"retriever\" not in json_object.keys():\n # Compile only files that have retriever key\n return\n\n values = {}\n values['urls'] = {}\n\n keys_to_ignore = [\"template\"]\n\n for (key, value) in json_object.items():\n\n if key == \"title\":\n values[\"name\"] = \"\\\"\" + str(value) + \"\\\"\"\n\n elif key == \"name\":\n values[\"shortname\"] = \"\\\"\" + str(value) + \"\\\"\"\n\n elif key == \"description\":\n values[\"description\"] = \"\\\"\" + str(value) + \"\\\"\"\n\n elif key == \"homepage\":\n values[\"ref\"] = \"\\\"\" + str(value) + \"\\\"\"\n\n elif key == \"citation\":\n values[\"citation\"] = \"\\\"\" + str(value) + \"\\\"\"\n\n elif key == \"keywords\":\n values[\"tags\"] = value\n\n elif key == \"retriever_minimum_version\":\n values[\"retriever_minimum_version\"] = \"\\\"\" + value + \"\\\"\"\n\n elif key == \"resources\":\n # Array of table objects\n tables = {}\n for table in value:\n # Maintain a dict for table keys and values\n table_dict = {}\n\n try:\n values['urls'][table['name']] = table['url']\n except Exception as e:\n print(e, \"\\nError in reading table: \" + table)\n continue\n\n if table[\"schema\"] == {} and table[\"dialect\"] == {}:\n continue\n\n for (t_key, t_val) in table.items():\n\n if t_key == \"dialect\":\n add_dialect(table_dict, table)\n\n elif t_key == \"schema\":\n add_schema(table_dict, table)\n\n tables[table[\"name\"]] = table_dict\n\n else:\n values[key] = value\n\n # Create a Table object string using the tables dict\n table_desc = \"{\"\n for (key, value) in tables.items():\n table_desc += \"'\" + key + \"': Table('\" + key + \"', \"\n table_desc += ','.join([key + \"=\" + str(value)\n for key, value, in value.items()])\n table_desc += \"),\"\n if table_desc != '{':\n table_desc = table_desc[:-1]\n table_desc += \"}\"\n\n values[\"tables\"] = table_desc\n\n script_desc = []\n for key, value in values.items():\n if key not in keys_to_ignore:\n script_desc.append(key + \"=\" + str(value))\n script_desc = (',\\n' + ' ' * 27).join(script_desc)\n\n if 'template' in values.keys():\n template = values[\"template\"]\n else:\n template = \"default\"\n script_contents = (script_templates[template] % script_desc)\n\n new_script = open(json_file + '.py', 'w', encoding='utf-8')\n new_script.write('# -*- coding: latin-1 -*-\\n')\n new_script.write(script_contents)\n new_script.close()\n", "path": "lib/compile.py"}], "after_files": [{"content": "from builtins import str\nimport json\nimport sys\nif sys.version_info[0] < 3:\n from codecs import open\n\nscript_templates = {\n \"default\": \"\"\"#retriever\nfrom retriever.lib.templates import BasicTextTemplate\nfrom retriever.lib.models import Table, Cleanup, correct_invalid_value\n\nSCRIPT = BasicTextTemplate(%s)\"\"\",\n\n \"html_table\": \"\"\"#retriever\nfrom retriever.lib.templates import HtmlTableTemplate\nfrom retriever.lib.models import Table, Cleanup, correct_invalid_value\n\nSCRIPT = HtmlTableTemplate(%s)\"\"\",\n}\n\n\ndef compile_script(script_file):\n definition = open(script_file + \".script\", 'r')\n\n values = {}\n urls = {}\n tables = {}\n last_table = \"\"\n replace = []\n keys_to_ignore = [\"template\"]\n\n for line in [line.strip() for line in definition]:\n if line and ':' in line and not line[0] == '#':\n split_line = [a.strip() for a in line.split(\":\")]\n key = split_line[0].lower()\n value = ':'.join(split_line[1:])\n if key == \"table\":\n table_name = value.split(',')[0].strip()\n last_table = table_name\n table_url = ','.join(value.split(',')[1:]).strip()\n urls[table_name] = table_url\n if replace:\n try:\n tables[last_table]\n except:\n tables[table_name] = {'replace_columns': str(replace)}\n elif key == \"*nulls\":\n if last_table:\n nulls = [eval(v) for v in [v.strip()\n for v in value.split(',')]]\n try:\n tables[last_table]\n except KeyError:\n if replace:\n tables[last_table] = {'replace_columns': str(replace)}\n else:\n tables[last_table] = {}\n tables[last_table]['cleanup'] = \"Cleanup(correct_invalid_value, nulls=\" + str(nulls) + \")\"\n elif key == \"replace\":\n replace = [(v.split(',')[0].strip(), v.split(',')[1].strip())\n for v in [v.strip() for v in value.split(';')]]\n elif key == \"tags\":\n values[\"tags\"] = [v.strip() for v in value.split(',')]\n elif key == \"*ct_names\":\n tables[last_table][\"ct_names\"] = [v.strip()\n for v in value.split(',')]\n elif key == \"*column\":\n if last_table:\n vs = [v.strip() for v in value.split(',')]\n column = [(vs[0], (vs[1], vs[2]) if len(vs) > 2 else (vs[1],))]\n try:\n tables[last_table]\n except KeyError:\n tables[last_table] = {}\n\n try:\n tables[last_table]['columns'] += column\n except KeyError:\n tables[last_table]['columns'] = column\n elif key[0] == \"*\":\n # attribute that should be applied to the most recently\n # declared table\n if key[0] == \"*\":\n key = key[1:]\n if last_table:\n try:\n tables[last_table]\n except KeyError:\n tables[last_table] = {}\n\n try:\n e = eval(value)\n except:\n e = str(value)\n\n tables[last_table][key] = \"'\" + str(e) + \"'\"\n else:\n # general script attributes\n values[key] = '\"' + str(value) + '\"'\n\n if 'shortname' not in list(values.keys()):\n try:\n values['shortname'] = values['name']\n except:\n pass\n values['urls'] = str(urls)\n\n def get_value(key):\n try:\n return values[key]\n except KeyError:\n return \"\"\n\n table_desc = \"{\"\n for (key, value) in list(tables.items()):\n table_desc += \"'\" + key + \"': Table('\" + key + \"', \"\n table_desc += ','.join([key + \"=\" + str(value)\n for key, value, in list(value.items())])\n table_desc += \"),\"\n if table_desc != '{':\n table_desc = table_desc[:-1]\n table_desc += \"}\"\n\n values['tables'] = table_desc\n\n script_desc = []\n for key, value in list(values.items()):\n if key == \"url\":\n key = \"ref\"\n if key not in keys_to_ignore:\n script_desc.append(key + \"=\" + str(value))\n script_desc = (',\\n' + ' ' * 27).join(script_desc)\n\n if 'template' in list(values.keys()):\n template = values[\"template\"]\n else:\n template = \"default\"\n script_contents = (script_templates[template] % script_desc)\n\n new_script = open(script_file + '.py', 'w')\n new_script.write(script_contents)\n new_script.close()\n\n definition.close()\n\n\ndef add_dialect(table_dict, table):\n \"\"\"\n Reads dialect key of JSON script and extracts key-value pairs to store them\n in python script\n\n Contains properties such 'nulls', delimiter', etc\n \"\"\"\n for (key, val) in table['dialect'].items():\n # dialect related key-value pairs\n # copied as is\n if key == \"nulls\":\n table_dict[\n 'cleanup'] = \"Cleanup(correct_invalid_value, nulls=\" + str(val) + \")\"\n\n elif key == \"delimiter\":\n table_dict[key] = \"'\" + str(val) + \"'\"\n else:\n table_dict[key] = val\n\n\ndef add_schema(table_dict, table):\n \"\"\"\n Reads schema key of JSON script and extracts values to store them in\n python script\n\n Contains properties related to table schema, such as 'fields' and cross-tab\n column name ('ct_column').\n \"\"\"\n for (key, val) in table['schema'].items():\n # schema related key-value pairs\n\n if key == \"fields\":\n # fields = columns of the table\n\n # list of column tuples\n column_list = []\n for obj in val:\n # fields is a collection of JSON objects\n # (similar to a list of dicts in python)\n\n if \"size\" in obj:\n column_list.append((obj[\"name\"],\n (obj[\"type\"], obj[\"size\"])))\n else:\n column_list.append((obj[\"name\"],\n (obj[\"type\"],)))\n\n table_dict[\"columns\"] = column_list\n\n elif key == \"ct_column\":\n table_dict[key] = \"'\" + val + \"'\"\n\n else:\n table_dict[key] = val\n\n\ndef compile_json(json_file):\n \"\"\"\n Function to compile JSON script files to python scripts\n The scripts are created with `retriever create_json <script_name` using\n command line\n \"\"\"\n json_object = json.load(open(json_file + \".json\", \"r\"))\n\n if \"retriever\" not in json_object.keys():\n # Compile only files that have retriever key\n return\n\n values = {}\n values['urls'] = {}\n\n keys_to_ignore = [\"template\"]\n\n for (key, value) in json_object.items():\n\n if key == \"title\":\n values[\"name\"] = \"\\\"\" + str(value) + \"\\\"\"\n\n elif key == \"name\":\n values[\"shortname\"] = \"\\\"\" + str(value) + \"\\\"\"\n\n elif key == \"description\":\n values[\"description\"] = \"\\\"\" + str(value) + \"\\\"\"\n\n elif key == \"homepage\":\n values[\"ref\"] = \"\\\"\" + str(value) + \"\\\"\"\n\n elif key == \"citation\":\n values[\"citation\"] = \"\\\"\" + str(value) + \"\\\"\"\n\n elif key == \"keywords\":\n values[\"tags\"] = value\n\n elif key == \"retriever_minimum_version\":\n values[\"retriever_minimum_version\"] = \"\\\"\" + str(value) + \"\\\"\"\n\n elif key == \"resources\":\n # Array of table objects\n tables = {}\n for table in value:\n # Maintain a dict for table keys and values\n table_dict = {}\n\n try:\n values['urls'][table['name']] = table['url']\n except Exception as e:\n print(e, \"\\nError in reading table: \" + table)\n continue\n\n if table[\"schema\"] == {} and table[\"dialect\"] == {}:\n continue\n\n for (t_key, t_val) in table.items():\n\n if t_key == \"dialect\":\n add_dialect(table_dict, table)\n\n elif t_key == \"schema\":\n add_schema(table_dict, table)\n\n tables[table[\"name\"]] = table_dict\n\n else:\n values[key] = value\n\n # Create a Table object string using the tables dict\n table_desc = \"{\"\n for (key, value) in tables.items():\n table_desc += \"'\" + key + \"': Table('\" + key + \"', \"\n table_desc += ','.join([key + \"=\" + str(value)\n for key, value, in value.items()])\n table_desc += \"),\"\n if table_desc != '{':\n table_desc = table_desc[:-1]\n table_desc += \"}\"\n\n values[\"tables\"] = table_desc\n\n script_desc = []\n for key, value in values.items():\n if key not in keys_to_ignore:\n script_desc.append(key + \"=\" + str(value))\n script_desc = (',\\n' + ' ' * 27).join(script_desc)\n\n if 'template' in values.keys():\n template = values[\"template\"]\n else:\n template = \"default\"\n script_contents = (script_templates[template] % script_desc)\n\n new_script = open(json_file + '.py', 'w', encoding='utf-8')\n new_script.write('# -*- coding: latin-1 -*-\\n')\n new_script.write(script_contents)\n new_script.close()\n", "path": "lib/compile.py"}]} | 3,250 | 112 |
gh_patches_debug_20304 | rasdani/github-patches | git_diff | frappe__hrms-1583 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IFSC Code showing wrong value in Bank Remittance Report
### Information about bug
IFSC Code showing wrong value in Bank Remittance Report. It is showing the same IFSC Code for all the employee in the list.
### Module
Payroll
### Version
ERPNext: v14.52.1 (HEAD)
Frappe Framework: v14.57.0 (HEAD)
Frappe HR: v14.18.1 (HEAD)
### Installation method
FrappeCloud
### Relevant log output / Stack trace / Full Error Message.
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hrms/payroll/report/bank_remittance/bank_remittance.py`
Content:
```
1 # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
2 # For license information, please see license.txt
3
4
5 import frappe
6 from frappe import _, get_all
7
8
9 def execute(filters=None):
10 columns = [
11 {
12 "label": _("Payroll Number"),
13 "fieldtype": "Link",
14 "fieldname": "payroll_no",
15 "options": "Payroll Entry",
16 "width": 150,
17 },
18 {
19 "label": _("Debit A/C Number"),
20 "fieldtype": "Int",
21 "fieldname": "debit_account",
22 "hidden": 1,
23 "width": 200,
24 },
25 {"label": _("Payment Date"), "fieldtype": "Data", "fieldname": "payment_date", "width": 100},
26 {
27 "label": _("Employee Name"),
28 "fieldtype": "Link",
29 "fieldname": "employee_name",
30 "options": "Employee",
31 "width": 200,
32 },
33 {"label": _("Bank Name"), "fieldtype": "Data", "fieldname": "bank_name", "width": 50},
34 {
35 "label": _("Employee A/C Number"),
36 "fieldtype": "Int",
37 "fieldname": "employee_account_no",
38 "width": 50,
39 },
40 ]
41
42 if frappe.db.has_column("Employee", "ifsc_code"):
43 columns.append(
44 {"label": _("IFSC Code"), "fieldtype": "Data", "fieldname": "bank_code", "width": 100}
45 )
46
47 columns += [
48 {"label": _("Currency"), "fieldtype": "Data", "fieldname": "currency", "width": 50},
49 {
50 "label": _("Net Salary Amount"),
51 "fieldtype": "Currency",
52 "options": "currency",
53 "fieldname": "amount",
54 "width": 100,
55 },
56 ]
57
58 data = []
59
60 accounts = get_bank_accounts()
61 payroll_entries = get_payroll_entries(accounts, filters)
62 salary_slips = get_salary_slips(payroll_entries)
63
64 if frappe.db.has_column("Employee", "ifsc_code"):
65 get_emp_bank_ifsc_code(salary_slips)
66
67 for salary in salary_slips:
68 if (
69 salary.bank_name
70 and salary.bank_account_no
71 and salary.debit_acc_no
72 and salary.status in ["Submitted", "Paid"]
73 ):
74 row = {
75 "payroll_no": salary.payroll_entry,
76 "debit_account": salary.debit_acc_no,
77 "payment_date": frappe.utils.formatdate(salary.modified.strftime("%Y-%m-%d")),
78 "bank_name": salary.bank_name,
79 "employee_account_no": salary.bank_account_no,
80 "bank_code": salary.ifsc_code,
81 "employee_name": salary.employee + ": " + salary.employee_name,
82 "currency": frappe.get_cached_value("Company", filters.company, "default_currency"),
83 "amount": salary.net_pay,
84 }
85 data.append(row)
86
87 return columns, data
88
89
90 def get_bank_accounts():
91 accounts = [d.name for d in get_all("Account", filters={"account_type": "Bank"})]
92 return accounts
93
94
95 def get_payroll_entries(accounts, filters):
96 payroll_filter = [
97 ("payment_account", "IN", accounts),
98 ("number_of_employees", ">", 0),
99 ("Company", "=", filters.company),
100 ]
101 if filters.to_date:
102 payroll_filter.append(("posting_date", "<", filters.to_date))
103
104 if filters.from_date:
105 payroll_filter.append(("posting_date", ">", filters.from_date))
106
107 entries = get_all("Payroll Entry", payroll_filter, ["name", "payment_account"])
108
109 payment_accounts = [d.payment_account for d in entries]
110 entries = set_company_account(payment_accounts, entries)
111 return entries
112
113
114 def get_salary_slips(payroll_entries):
115 payroll = [d.name for d in payroll_entries]
116 salary_slips = get_all(
117 "Salary Slip",
118 filters=[("payroll_entry", "IN", payroll)],
119 fields=[
120 "modified",
121 "net_pay",
122 "bank_name",
123 "bank_account_no",
124 "payroll_entry",
125 "employee",
126 "employee_name",
127 "status",
128 ],
129 )
130
131 payroll_entry_map = {}
132 for entry in payroll_entries:
133 payroll_entry_map[entry.name] = entry
134
135 # appending company debit accounts
136 for slip in salary_slips:
137 if slip.payroll_entry:
138 slip["debit_acc_no"] = payroll_entry_map[slip.payroll_entry]["company_account"]
139 else:
140 slip["debit_acc_no"] = None
141
142 return salary_slips
143
144
145 def get_emp_bank_ifsc_code(salary_slips):
146 emp_names = [d.employee for d in salary_slips]
147 ifsc_codes = get_all("Employee", [("name", "IN", emp_names)], ["ifsc_code", "name"])
148
149 ifsc_codes_map = {}
150 for code in ifsc_codes:
151 ifsc_codes_map[code.name] = code
152
153 for slip in salary_slips:
154 slip["ifsc_code"] = ifsc_codes_map[code.name]["ifsc_code"]
155
156 return salary_slips
157
158
159 def set_company_account(payment_accounts, payroll_entries):
160 company_accounts = get_all(
161 "Bank Account", [("account", "in", payment_accounts)], ["account", "bank_account_no"]
162 )
163 company_accounts_map = {}
164 for acc in company_accounts:
165 company_accounts_map[acc.account] = acc
166
167 for entry in payroll_entries:
168 company_account = ""
169 if entry.payment_account in company_accounts_map:
170 company_account = company_accounts_map[entry.payment_account]["bank_account_no"]
171 entry["company_account"] = company_account
172
173 return payroll_entries
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hrms/payroll/report/bank_remittance/bank_remittance.py b/hrms/payroll/report/bank_remittance/bank_remittance.py
--- a/hrms/payroll/report/bank_remittance/bank_remittance.py
+++ b/hrms/payroll/report/bank_remittance/bank_remittance.py
@@ -22,7 +22,12 @@
"hidden": 1,
"width": 200,
},
- {"label": _("Payment Date"), "fieldtype": "Data", "fieldname": "payment_date", "width": 100},
+ {
+ "label": _("Payment Date"),
+ "fieldtype": "Data",
+ "fieldname": "payment_date",
+ "width": 100,
+ },
{
"label": _("Employee Name"),
"fieldtype": "Link",
@@ -146,12 +151,10 @@
emp_names = [d.employee for d in salary_slips]
ifsc_codes = get_all("Employee", [("name", "IN", emp_names)], ["ifsc_code", "name"])
- ifsc_codes_map = {}
- for code in ifsc_codes:
- ifsc_codes_map[code.name] = code
+ ifsc_codes_map = {code.name: code.ifsc_code for code in ifsc_codes}
for slip in salary_slips:
- slip["ifsc_code"] = ifsc_codes_map[code.name]["ifsc_code"]
+ slip["ifsc_code"] = ifsc_codes_map[slip.employee]
return salary_slips
| {"golden_diff": "diff --git a/hrms/payroll/report/bank_remittance/bank_remittance.py b/hrms/payroll/report/bank_remittance/bank_remittance.py\n--- a/hrms/payroll/report/bank_remittance/bank_remittance.py\n+++ b/hrms/payroll/report/bank_remittance/bank_remittance.py\n@@ -22,7 +22,12 @@\n \t\t\t\"hidden\": 1,\n \t\t\t\"width\": 200,\n \t\t},\n-\t\t{\"label\": _(\"Payment Date\"), \"fieldtype\": \"Data\", \"fieldname\": \"payment_date\", \"width\": 100},\n+\t\t{\n+\t\t\t\"label\": _(\"Payment Date\"),\n+\t\t\t\"fieldtype\": \"Data\",\n+\t\t\t\"fieldname\": \"payment_date\",\n+\t\t\t\"width\": 100,\n+\t\t},\n \t\t{\n \t\t\t\"label\": _(\"Employee Name\"),\n \t\t\t\"fieldtype\": \"Link\",\n@@ -146,12 +151,10 @@\n \temp_names = [d.employee for d in salary_slips]\n \tifsc_codes = get_all(\"Employee\", [(\"name\", \"IN\", emp_names)], [\"ifsc_code\", \"name\"])\n \n-\tifsc_codes_map = {}\n-\tfor code in ifsc_codes:\n-\t\tifsc_codes_map[code.name] = code\n+\tifsc_codes_map = {code.name: code.ifsc_code for code in ifsc_codes}\n \n \tfor slip in salary_slips:\n-\t\tslip[\"ifsc_code\"] = ifsc_codes_map[code.name][\"ifsc_code\"]\n+\t\tslip[\"ifsc_code\"] = ifsc_codes_map[slip.employee]\n \n \treturn salary_slips\n", "issue": "IFSC Code showing wrong value in Bank Remittance Report\n### Information about bug\n\nIFSC Code showing wrong value in Bank Remittance Report. It is showing the same IFSC Code for all the employee in the list.\n\n### Module\n\nPayroll\n\n### Version\n\nERPNext: v14.52.1 (HEAD)\r\nFrappe Framework: v14.57.0 (HEAD)\r\nFrappe HR: v14.18.1 (HEAD)\n\n### Installation method\n\nFrappeCloud\n\n### Relevant log output / Stack trace / Full Error Message.\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors\n# For license information, please see license.txt\n\n\nimport frappe\nfrom frappe import _, get_all\n\n\ndef execute(filters=None):\n\tcolumns = [\n\t\t{\n\t\t\t\"label\": _(\"Payroll Number\"),\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"fieldname\": \"payroll_no\",\n\t\t\t\"options\": \"Payroll Entry\",\n\t\t\t\"width\": 150,\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"Debit A/C Number\"),\n\t\t\t\"fieldtype\": \"Int\",\n\t\t\t\"fieldname\": \"debit_account\",\n\t\t\t\"hidden\": 1,\n\t\t\t\"width\": 200,\n\t\t},\n\t\t{\"label\": _(\"Payment Date\"), \"fieldtype\": \"Data\", \"fieldname\": \"payment_date\", \"width\": 100},\n\t\t{\n\t\t\t\"label\": _(\"Employee Name\"),\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"fieldname\": \"employee_name\",\n\t\t\t\"options\": \"Employee\",\n\t\t\t\"width\": 200,\n\t\t},\n\t\t{\"label\": _(\"Bank Name\"), \"fieldtype\": \"Data\", \"fieldname\": \"bank_name\", \"width\": 50},\n\t\t{\n\t\t\t\"label\": _(\"Employee A/C Number\"),\n\t\t\t\"fieldtype\": \"Int\",\n\t\t\t\"fieldname\": \"employee_account_no\",\n\t\t\t\"width\": 50,\n\t\t},\n\t]\n\n\tif frappe.db.has_column(\"Employee\", \"ifsc_code\"):\n\t\tcolumns.append(\n\t\t\t{\"label\": _(\"IFSC Code\"), \"fieldtype\": \"Data\", \"fieldname\": \"bank_code\", \"width\": 100}\n\t\t)\n\n\tcolumns += [\n\t\t{\"label\": _(\"Currency\"), \"fieldtype\": \"Data\", \"fieldname\": \"currency\", \"width\": 50},\n\t\t{\n\t\t\t\"label\": _(\"Net Salary Amount\"),\n\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\"options\": \"currency\",\n\t\t\t\"fieldname\": \"amount\",\n\t\t\t\"width\": 100,\n\t\t},\n\t]\n\n\tdata = []\n\n\taccounts = get_bank_accounts()\n\tpayroll_entries = get_payroll_entries(accounts, filters)\n\tsalary_slips = get_salary_slips(payroll_entries)\n\n\tif frappe.db.has_column(\"Employee\", \"ifsc_code\"):\n\t\tget_emp_bank_ifsc_code(salary_slips)\n\n\tfor salary in salary_slips:\n\t\tif (\n\t\t\tsalary.bank_name\n\t\t\tand salary.bank_account_no\n\t\t\tand salary.debit_acc_no\n\t\t\tand salary.status in [\"Submitted\", \"Paid\"]\n\t\t):\n\t\t\trow = {\n\t\t\t\t\"payroll_no\": salary.payroll_entry,\n\t\t\t\t\"debit_account\": salary.debit_acc_no,\n\t\t\t\t\"payment_date\": frappe.utils.formatdate(salary.modified.strftime(\"%Y-%m-%d\")),\n\t\t\t\t\"bank_name\": salary.bank_name,\n\t\t\t\t\"employee_account_no\": salary.bank_account_no,\n\t\t\t\t\"bank_code\": salary.ifsc_code,\n\t\t\t\t\"employee_name\": salary.employee + \": \" + salary.employee_name,\n\t\t\t\t\"currency\": frappe.get_cached_value(\"Company\", filters.company, \"default_currency\"),\n\t\t\t\t\"amount\": salary.net_pay,\n\t\t\t}\n\t\t\tdata.append(row)\n\n\treturn columns, data\n\n\ndef get_bank_accounts():\n\taccounts = [d.name for d in get_all(\"Account\", filters={\"account_type\": \"Bank\"})]\n\treturn accounts\n\n\ndef get_payroll_entries(accounts, filters):\n\tpayroll_filter = [\n\t\t(\"payment_account\", \"IN\", accounts),\n\t\t(\"number_of_employees\", \">\", 0),\n\t\t(\"Company\", \"=\", filters.company),\n\t]\n\tif filters.to_date:\n\t\tpayroll_filter.append((\"posting_date\", \"<\", filters.to_date))\n\n\tif filters.from_date:\n\t\tpayroll_filter.append((\"posting_date\", \">\", filters.from_date))\n\n\tentries = get_all(\"Payroll Entry\", payroll_filter, [\"name\", \"payment_account\"])\n\n\tpayment_accounts = [d.payment_account for d in entries]\n\tentries = set_company_account(payment_accounts, entries)\n\treturn entries\n\n\ndef get_salary_slips(payroll_entries):\n\tpayroll = [d.name for d in payroll_entries]\n\tsalary_slips = get_all(\n\t\t\"Salary Slip\",\n\t\tfilters=[(\"payroll_entry\", \"IN\", payroll)],\n\t\tfields=[\n\t\t\t\"modified\",\n\t\t\t\"net_pay\",\n\t\t\t\"bank_name\",\n\t\t\t\"bank_account_no\",\n\t\t\t\"payroll_entry\",\n\t\t\t\"employee\",\n\t\t\t\"employee_name\",\n\t\t\t\"status\",\n\t\t],\n\t)\n\n\tpayroll_entry_map = {}\n\tfor entry in payroll_entries:\n\t\tpayroll_entry_map[entry.name] = entry\n\n\t# appending company debit accounts\n\tfor slip in salary_slips:\n\t\tif slip.payroll_entry:\n\t\t\tslip[\"debit_acc_no\"] = payroll_entry_map[slip.payroll_entry][\"company_account\"]\n\t\telse:\n\t\t\tslip[\"debit_acc_no\"] = None\n\n\treturn salary_slips\n\n\ndef get_emp_bank_ifsc_code(salary_slips):\n\temp_names = [d.employee for d in salary_slips]\n\tifsc_codes = get_all(\"Employee\", [(\"name\", \"IN\", emp_names)], [\"ifsc_code\", \"name\"])\n\n\tifsc_codes_map = {}\n\tfor code in ifsc_codes:\n\t\tifsc_codes_map[code.name] = code\n\n\tfor slip in salary_slips:\n\t\tslip[\"ifsc_code\"] = ifsc_codes_map[code.name][\"ifsc_code\"]\n\n\treturn salary_slips\n\n\ndef set_company_account(payment_accounts, payroll_entries):\n\tcompany_accounts = get_all(\n\t\t\"Bank Account\", [(\"account\", \"in\", payment_accounts)], [\"account\", \"bank_account_no\"]\n\t)\n\tcompany_accounts_map = {}\n\tfor acc in company_accounts:\n\t\tcompany_accounts_map[acc.account] = acc\n\n\tfor entry in payroll_entries:\n\t\tcompany_account = \"\"\n\t\tif entry.payment_account in company_accounts_map:\n\t\t\tcompany_account = company_accounts_map[entry.payment_account][\"bank_account_no\"]\n\t\tentry[\"company_account\"] = company_account\n\n\treturn payroll_entries\n", "path": "hrms/payroll/report/bank_remittance/bank_remittance.py"}], "after_files": [{"content": "# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors\n# For license information, please see license.txt\n\n\nimport frappe\nfrom frappe import _, get_all\n\n\ndef execute(filters=None):\n\tcolumns = [\n\t\t{\n\t\t\t\"label\": _(\"Payroll Number\"),\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"fieldname\": \"payroll_no\",\n\t\t\t\"options\": \"Payroll Entry\",\n\t\t\t\"width\": 150,\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"Debit A/C Number\"),\n\t\t\t\"fieldtype\": \"Int\",\n\t\t\t\"fieldname\": \"debit_account\",\n\t\t\t\"hidden\": 1,\n\t\t\t\"width\": 200,\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"Payment Date\"),\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"fieldname\": \"payment_date\",\n\t\t\t\"width\": 100,\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"Employee Name\"),\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"fieldname\": \"employee_name\",\n\t\t\t\"options\": \"Employee\",\n\t\t\t\"width\": 200,\n\t\t},\n\t\t{\"label\": _(\"Bank Name\"), \"fieldtype\": \"Data\", \"fieldname\": \"bank_name\", \"width\": 50},\n\t\t{\n\t\t\t\"label\": _(\"Employee A/C Number\"),\n\t\t\t\"fieldtype\": \"Int\",\n\t\t\t\"fieldname\": \"employee_account_no\",\n\t\t\t\"width\": 50,\n\t\t},\n\t]\n\n\tif frappe.db.has_column(\"Employee\", \"ifsc_code\"):\n\t\tcolumns.append(\n\t\t\t{\"label\": _(\"IFSC Code\"), \"fieldtype\": \"Data\", \"fieldname\": \"bank_code\", \"width\": 100}\n\t\t)\n\n\tcolumns += [\n\t\t{\"label\": _(\"Currency\"), \"fieldtype\": \"Data\", \"fieldname\": \"currency\", \"width\": 50},\n\t\t{\n\t\t\t\"label\": _(\"Net Salary Amount\"),\n\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\"options\": \"currency\",\n\t\t\t\"fieldname\": \"amount\",\n\t\t\t\"width\": 100,\n\t\t},\n\t]\n\n\tdata = []\n\n\taccounts = get_bank_accounts()\n\tpayroll_entries = get_payroll_entries(accounts, filters)\n\tsalary_slips = get_salary_slips(payroll_entries)\n\n\tif frappe.db.has_column(\"Employee\", \"ifsc_code\"):\n\t\tget_emp_bank_ifsc_code(salary_slips)\n\n\tfor salary in salary_slips:\n\t\tif (\n\t\t\tsalary.bank_name\n\t\t\tand salary.bank_account_no\n\t\t\tand salary.debit_acc_no\n\t\t\tand salary.status in [\"Submitted\", \"Paid\"]\n\t\t):\n\t\t\trow = {\n\t\t\t\t\"payroll_no\": salary.payroll_entry,\n\t\t\t\t\"debit_account\": salary.debit_acc_no,\n\t\t\t\t\"payment_date\": frappe.utils.formatdate(salary.modified.strftime(\"%Y-%m-%d\")),\n\t\t\t\t\"bank_name\": salary.bank_name,\n\t\t\t\t\"employee_account_no\": salary.bank_account_no,\n\t\t\t\t\"bank_code\": salary.ifsc_code,\n\t\t\t\t\"employee_name\": salary.employee + \": \" + salary.employee_name,\n\t\t\t\t\"currency\": frappe.get_cached_value(\"Company\", filters.company, \"default_currency\"),\n\t\t\t\t\"amount\": salary.net_pay,\n\t\t\t}\n\t\t\tdata.append(row)\n\n\treturn columns, data\n\n\ndef get_bank_accounts():\n\taccounts = [d.name for d in get_all(\"Account\", filters={\"account_type\": \"Bank\"})]\n\treturn accounts\n\n\ndef get_payroll_entries(accounts, filters):\n\tpayroll_filter = [\n\t\t(\"payment_account\", \"IN\", accounts),\n\t\t(\"number_of_employees\", \">\", 0),\n\t\t(\"Company\", \"=\", filters.company),\n\t]\n\tif filters.to_date:\n\t\tpayroll_filter.append((\"posting_date\", \"<\", filters.to_date))\n\n\tif filters.from_date:\n\t\tpayroll_filter.append((\"posting_date\", \">\", filters.from_date))\n\n\tentries = get_all(\"Payroll Entry\", payroll_filter, [\"name\", \"payment_account\"])\n\n\tpayment_accounts = [d.payment_account for d in entries]\n\tentries = set_company_account(payment_accounts, entries)\n\treturn entries\n\n\ndef get_salary_slips(payroll_entries):\n\tpayroll = [d.name for d in payroll_entries]\n\tsalary_slips = get_all(\n\t\t\"Salary Slip\",\n\t\tfilters=[(\"payroll_entry\", \"IN\", payroll)],\n\t\tfields=[\n\t\t\t\"modified\",\n\t\t\t\"net_pay\",\n\t\t\t\"bank_name\",\n\t\t\t\"bank_account_no\",\n\t\t\t\"payroll_entry\",\n\t\t\t\"employee\",\n\t\t\t\"employee_name\",\n\t\t\t\"status\",\n\t\t],\n\t)\n\n\tpayroll_entry_map = {}\n\tfor entry in payroll_entries:\n\t\tpayroll_entry_map[entry.name] = entry\n\n\t# appending company debit accounts\n\tfor slip in salary_slips:\n\t\tif slip.payroll_entry:\n\t\t\tslip[\"debit_acc_no\"] = payroll_entry_map[slip.payroll_entry][\"company_account\"]\n\t\telse:\n\t\t\tslip[\"debit_acc_no\"] = None\n\n\treturn salary_slips\n\n\ndef get_emp_bank_ifsc_code(salary_slips):\n\temp_names = [d.employee for d in salary_slips]\n\tifsc_codes = get_all(\"Employee\", [(\"name\", \"IN\", emp_names)], [\"ifsc_code\", \"name\"])\n\n\tifsc_codes_map = {code.name: code.ifsc_code for code in ifsc_codes}\n\n\tfor slip in salary_slips:\n\t\tslip[\"ifsc_code\"] = ifsc_codes_map[slip.employee]\n\n\treturn salary_slips\n\n\ndef set_company_account(payment_accounts, payroll_entries):\n\tcompany_accounts = get_all(\n\t\t\"Bank Account\", [(\"account\", \"in\", payment_accounts)], [\"account\", \"bank_account_no\"]\n\t)\n\tcompany_accounts_map = {}\n\tfor acc in company_accounts:\n\t\tcompany_accounts_map[acc.account] = acc\n\n\tfor entry in payroll_entries:\n\t\tcompany_account = \"\"\n\t\tif entry.payment_account in company_accounts_map:\n\t\t\tcompany_account = company_accounts_map[entry.payment_account][\"bank_account_no\"]\n\t\tentry[\"company_account\"] = company_account\n\n\treturn payroll_entries\n", "path": "hrms/payroll/report/bank_remittance/bank_remittance.py"}]} | 2,193 | 365 |
gh_patches_debug_6470 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-2306 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix enum_value handling for inputs
A clean and easy solution for fixing the broken enum_value handling for inputs
Closes https://github.com/strawberry-graphql/strawberry/issues/2305
Closes https://github.com/strawberry-graphql/strawberry/pull/2203
Closes https://github.com/strawberry-graphql/strawberry/pull/2185
Closes https://github.com/strawberry-graphql/strawberry/pull/2306
@patrick91 sorry for stealing your release file and tests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/enum.py`
Content:
```
1 import dataclasses
2 from enum import EnumMeta
3 from typing import (
4 Any,
5 Callable,
6 Iterable,
7 List,
8 Mapping,
9 Optional,
10 TypeVar,
11 Union,
12 overload,
13 )
14
15 from strawberry.type import StrawberryType
16
17 from .exceptions import ObjectIsNotAnEnumError
18
19
20 @dataclasses.dataclass
21 class EnumValue:
22 name: str
23 value: Any
24 deprecation_reason: Optional[str] = None
25 directives: Iterable[object] = ()
26 description: Optional[str] = None
27
28
29 @dataclasses.dataclass
30 class EnumDefinition(StrawberryType):
31 wrapped_cls: EnumMeta
32 name: str
33 values: List[EnumValue]
34 description: Optional[str]
35 directives: Iterable[object] = ()
36
37 def __hash__(self) -> int:
38 # TODO: Is this enough for unique-ness?
39 return hash(self.name)
40
41 def copy_with(
42 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
43 ) -> Union[StrawberryType, type]:
44 return super().copy_with(type_var_map)
45
46 @property
47 def is_generic(self) -> bool:
48 return False
49
50
51 # TODO: remove duplication of EnumValueDefinition and EnumValue
52 @dataclasses.dataclass
53 class EnumValueDefinition:
54 value: Any
55 deprecation_reason: Optional[str] = None
56 directives: Iterable[object] = ()
57 description: Optional[str] = None
58
59
60 def enum_value(
61 value: Any,
62 deprecation_reason: Optional[str] = None,
63 directives: Iterable[object] = (),
64 description: Optional[str] = None,
65 ) -> EnumValueDefinition:
66 return EnumValueDefinition(
67 value=value,
68 deprecation_reason=deprecation_reason,
69 directives=directives,
70 description=description,
71 )
72
73
74 EnumType = TypeVar("EnumType", bound=EnumMeta)
75
76
77 def _process_enum(
78 cls: EnumType,
79 name: Optional[str] = None,
80 description: Optional[str] = None,
81 directives: Iterable[object] = (),
82 ) -> EnumType:
83 if not isinstance(cls, EnumMeta):
84 raise ObjectIsNotAnEnumError(cls)
85
86 if not name:
87 name = cls.__name__
88
89 description = description
90
91 values = []
92 for item in cls: # type: ignore
93 item_value = item.value
94 item_name = item.name
95 deprecation_reason = None
96 item_directives: Iterable[object] = ()
97 enum_value_description = None
98
99 if isinstance(item_value, EnumValueDefinition):
100 item_directives = item_value.directives
101 enum_value_description = item_value.description
102 deprecation_reason = item_value.deprecation_reason
103 item_value = item_value.value
104
105 value = EnumValue(
106 item_name,
107 item_value,
108 deprecation_reason=deprecation_reason,
109 directives=item_directives,
110 description=enum_value_description,
111 )
112 values.append(value)
113
114 cls._enum_definition = EnumDefinition( # type: ignore
115 wrapped_cls=cls,
116 name=name,
117 values=values,
118 description=description,
119 directives=directives,
120 )
121
122 return cls
123
124
125 @overload
126 def enum(
127 _cls: EnumType,
128 *,
129 name: Optional[str] = None,
130 description: Optional[str] = None,
131 directives: Iterable[object] = ()
132 ) -> EnumType:
133 ...
134
135
136 @overload
137 def enum(
138 _cls: None = None,
139 *,
140 name: Optional[str] = None,
141 description: Optional[str] = None,
142 directives: Iterable[object] = ()
143 ) -> Callable[[EnumType], EnumType]:
144 ...
145
146
147 def enum(
148 _cls: Optional[EnumType] = None,
149 *,
150 name: Optional[str] = None,
151 description: Optional[str] = None,
152 directives: Iterable[object] = ()
153 ) -> Union[EnumType, Callable[[EnumType], EnumType]]:
154 """Registers the enum in the GraphQL type system.
155
156 If name is passed, the name of the GraphQL type will be
157 the value passed of name instead of the Enum class name.
158 """
159
160 def wrap(cls: EnumType) -> EnumType:
161 return _process_enum(cls, name, description, directives=directives)
162
163 if not _cls:
164 return wrap
165
166 return wrap(_cls)
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/enum.py b/strawberry/enum.py
--- a/strawberry/enum.py
+++ b/strawberry/enum.py
@@ -102,6 +102,11 @@
deprecation_reason = item_value.deprecation_reason
item_value = item_value.value
+ # update _value2member_map_ so that doing `MyEnum.MY_VALUE` and
+ # `MyEnum['MY_VALUE']` both work
+ cls._value2member_map_[item_value] = item
+ cls._member_map_[item_name]._value_ = item_value
+
value = EnumValue(
item_name,
item_value,
| {"golden_diff": "diff --git a/strawberry/enum.py b/strawberry/enum.py\n--- a/strawberry/enum.py\n+++ b/strawberry/enum.py\n@@ -102,6 +102,11 @@\n deprecation_reason = item_value.deprecation_reason\n item_value = item_value.value\n \n+ # update _value2member_map_ so that doing `MyEnum.MY_VALUE` and\n+ # `MyEnum['MY_VALUE']` both work\n+ cls._value2member_map_[item_value] = item\n+ cls._member_map_[item_name]._value_ = item_value\n+\n value = EnumValue(\n item_name,\n item_value,\n", "issue": "Fix enum_value handling for inputs\nA clean and easy solution for fixing the broken enum_value handling for inputs\r\n\r\nCloses https://github.com/strawberry-graphql/strawberry/issues/2305\r\nCloses https://github.com/strawberry-graphql/strawberry/pull/2203\r\nCloses https://github.com/strawberry-graphql/strawberry/pull/2185\r\nCloses https://github.com/strawberry-graphql/strawberry/pull/2306\r\n\r\n@patrick91 sorry for stealing your release file and tests\n", "before_files": [{"content": "import dataclasses\nfrom enum import EnumMeta\nfrom typing import (\n Any,\n Callable,\n Iterable,\n List,\n Mapping,\n Optional,\n TypeVar,\n Union,\n overload,\n)\n\nfrom strawberry.type import StrawberryType\n\nfrom .exceptions import ObjectIsNotAnEnumError\n\n\[email protected]\nclass EnumValue:\n name: str\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\[email protected]\nclass EnumDefinition(StrawberryType):\n wrapped_cls: EnumMeta\n name: str\n values: List[EnumValue]\n description: Optional[str]\n directives: Iterable[object] = ()\n\n def __hash__(self) -> int:\n # TODO: Is this enough for unique-ness?\n return hash(self.name)\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n return super().copy_with(type_var_map)\n\n @property\n def is_generic(self) -> bool:\n return False\n\n\n# TODO: remove duplication of EnumValueDefinition and EnumValue\[email protected]\nclass EnumValueDefinition:\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\ndef enum_value(\n value: Any,\n deprecation_reason: Optional[str] = None,\n directives: Iterable[object] = (),\n description: Optional[str] = None,\n) -> EnumValueDefinition:\n return EnumValueDefinition(\n value=value,\n deprecation_reason=deprecation_reason,\n directives=directives,\n description=description,\n )\n\n\nEnumType = TypeVar(\"EnumType\", bound=EnumMeta)\n\n\ndef _process_enum(\n cls: EnumType,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = (),\n) -> EnumType:\n if not isinstance(cls, EnumMeta):\n raise ObjectIsNotAnEnumError(cls)\n\n if not name:\n name = cls.__name__\n\n description = description\n\n values = []\n for item in cls: # type: ignore\n item_value = item.value\n item_name = item.name\n deprecation_reason = None\n item_directives: Iterable[object] = ()\n enum_value_description = None\n\n if isinstance(item_value, EnumValueDefinition):\n item_directives = item_value.directives\n enum_value_description = item_value.description\n deprecation_reason = item_value.deprecation_reason\n item_value = item_value.value\n\n value = EnumValue(\n item_name,\n item_value,\n deprecation_reason=deprecation_reason,\n directives=item_directives,\n description=enum_value_description,\n )\n values.append(value)\n\n cls._enum_definition = EnumDefinition( # type: ignore\n wrapped_cls=cls,\n name=name,\n values=values,\n description=description,\n directives=directives,\n )\n\n return cls\n\n\n@overload\ndef enum(\n _cls: EnumType,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> EnumType:\n ...\n\n\n@overload\ndef enum(\n _cls: None = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Callable[[EnumType], EnumType]:\n ...\n\n\ndef enum(\n _cls: Optional[EnumType] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Union[EnumType, Callable[[EnumType], EnumType]]:\n \"\"\"Registers the enum in the GraphQL type system.\n\n If name is passed, the name of the GraphQL type will be\n the value passed of name instead of the Enum class name.\n \"\"\"\n\n def wrap(cls: EnumType) -> EnumType:\n return _process_enum(cls, name, description, directives=directives)\n\n if not _cls:\n return wrap\n\n return wrap(_cls)\n", "path": "strawberry/enum.py"}], "after_files": [{"content": "import dataclasses\nfrom enum import EnumMeta\nfrom typing import (\n Any,\n Callable,\n Iterable,\n List,\n Mapping,\n Optional,\n TypeVar,\n Union,\n overload,\n)\n\nfrom strawberry.type import StrawberryType\n\nfrom .exceptions import ObjectIsNotAnEnumError\n\n\[email protected]\nclass EnumValue:\n name: str\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\[email protected]\nclass EnumDefinition(StrawberryType):\n wrapped_cls: EnumMeta\n name: str\n values: List[EnumValue]\n description: Optional[str]\n directives: Iterable[object] = ()\n\n def __hash__(self) -> int:\n # TODO: Is this enough for unique-ness?\n return hash(self.name)\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n return super().copy_with(type_var_map)\n\n @property\n def is_generic(self) -> bool:\n return False\n\n\n# TODO: remove duplication of EnumValueDefinition and EnumValue\[email protected]\nclass EnumValueDefinition:\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\ndef enum_value(\n value: Any,\n deprecation_reason: Optional[str] = None,\n directives: Iterable[object] = (),\n description: Optional[str] = None,\n) -> EnumValueDefinition:\n return EnumValueDefinition(\n value=value,\n deprecation_reason=deprecation_reason,\n directives=directives,\n description=description,\n )\n\n\nEnumType = TypeVar(\"EnumType\", bound=EnumMeta)\n\n\ndef _process_enum(\n cls: EnumType,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = (),\n) -> EnumType:\n if not isinstance(cls, EnumMeta):\n raise ObjectIsNotAnEnumError(cls)\n\n if not name:\n name = cls.__name__\n\n description = description\n\n values = []\n for item in cls: # type: ignore\n item_value = item.value\n item_name = item.name\n deprecation_reason = None\n item_directives: Iterable[object] = ()\n enum_value_description = None\n\n if isinstance(item_value, EnumValueDefinition):\n item_directives = item_value.directives\n enum_value_description = item_value.description\n deprecation_reason = item_value.deprecation_reason\n item_value = item_value.value\n\n # update _value2member_map_ so that doing `MyEnum.MY_VALUE` and\n # `MyEnum['MY_VALUE']` both work\n cls._value2member_map_[item_value] = item\n cls._member_map_[item_name]._value_ = item_value\n\n value = EnumValue(\n item_name,\n item_value,\n deprecation_reason=deprecation_reason,\n directives=item_directives,\n description=enum_value_description,\n )\n values.append(value)\n\n cls._enum_definition = EnumDefinition( # type: ignore\n wrapped_cls=cls,\n name=name,\n values=values,\n description=description,\n directives=directives,\n )\n\n return cls\n\n\n@overload\ndef enum(\n _cls: EnumType,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> EnumType:\n ...\n\n\n@overload\ndef enum(\n _cls: None = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Callable[[EnumType], EnumType]:\n ...\n\n\ndef enum(\n _cls: Optional[EnumType] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Union[EnumType, Callable[[EnumType], EnumType]]:\n \"\"\"Registers the enum in the GraphQL type system.\n\n If name is passed, the name of the GraphQL type will be\n the value passed of name instead of the Enum class name.\n \"\"\"\n\n def wrap(cls: EnumType) -> EnumType:\n return _process_enum(cls, name, description, directives=directives)\n\n if not _cls:\n return wrap\n\n return wrap(_cls)\n", "path": "strawberry/enum.py"}]} | 1,739 | 156 |
gh_patches_debug_29962 | rasdani/github-patches | git_diff | AUTOMATIC1111__stable-diffusion-webui-3628 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Portrait mode images generates in landscape mode in img2img [Bug]:
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Image in portrait mode shows up fine in the preview, but when the alternative image is generated it is rotated to landscape mode.
### Steps to reproduce the problem
1. Load a image that was taken using a phone in portrait mode.
2. Set a prompt and press generate.
### What should have happened?
It should have generated the output image in portrait mode as well.
### Commit where the problem happens
6bd6154a92eb05c80d66df661a38f8b70cc13729
### What platforms do you use to access UI ?
Windows
### What browsers do you use to access the UI ?
Microsoft Edge
### Command Line Arguments
```Shell
--xformers
```
### Additional information, context and logs
When images are taken in portrait mode, they are often stored as landscape, but have information that it is portrait so that they can be displayed correctly in image viewers, this should be used to determine how the output image should be generated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modules/img2img.py`
Content:
```
1 import math
2 import os
3 import sys
4 import traceback
5
6 import numpy as np
7 from PIL import Image, ImageOps, ImageChops
8
9 from modules import devices
10 from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
11 from modules.shared import opts, state
12 import modules.shared as shared
13 import modules.processing as processing
14 from modules.ui import plaintext_to_html
15 import modules.images as images
16 import modules.scripts
17
18
19 def process_batch(p, input_dir, output_dir, args):
20 processing.fix_seed(p)
21
22 images = [file for file in [os.path.join(input_dir, x) for x in os.listdir(input_dir)] if os.path.isfile(file)]
23
24 print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
25
26 save_normally = output_dir == ''
27
28 p.do_not_save_grid = True
29 p.do_not_save_samples = not save_normally
30
31 state.job_count = len(images) * p.n_iter
32
33 for i, image in enumerate(images):
34 state.job = f"{i+1} out of {len(images)}"
35 if state.skipped:
36 state.skipped = False
37
38 if state.interrupted:
39 break
40
41 img = Image.open(image)
42 p.init_images = [img] * p.batch_size
43
44 proc = modules.scripts.scripts_img2img.run(p, *args)
45 if proc is None:
46 proc = process_images(p)
47
48 for n, processed_image in enumerate(proc.images):
49 filename = os.path.basename(image)
50
51 if n > 0:
52 left, right = os.path.splitext(filename)
53 filename = f"{left}-{n}{right}"
54
55 if not save_normally:
56 processed_image.save(os.path.join(output_dir, filename))
57
58
59 def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_img_inpaint, init_mask_inpaint, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args):
60 is_inpaint = mode == 1
61 is_batch = mode == 2
62
63 if is_inpaint:
64 if mask_mode == 0:
65 image = init_img_with_mask['image']
66 mask = init_img_with_mask['mask']
67 alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
68 mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')
69 image = image.convert('RGB')
70 else:
71 image = init_img_inpaint
72 mask = init_mask_inpaint
73 else:
74 image = init_img
75 mask = None
76
77 assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
78
79 p = StableDiffusionProcessingImg2Img(
80 sd_model=shared.sd_model,
81 outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,
82 outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,
83 prompt=prompt,
84 negative_prompt=negative_prompt,
85 styles=[prompt_style, prompt_style2],
86 seed=seed,
87 subseed=subseed,
88 subseed_strength=subseed_strength,
89 seed_resize_from_h=seed_resize_from_h,
90 seed_resize_from_w=seed_resize_from_w,
91 seed_enable_extras=seed_enable_extras,
92 sampler_index=sampler_index,
93 batch_size=batch_size,
94 n_iter=n_iter,
95 steps=steps,
96 cfg_scale=cfg_scale,
97 width=width,
98 height=height,
99 restore_faces=restore_faces,
100 tiling=tiling,
101 init_images=[image],
102 mask=mask,
103 mask_blur=mask_blur,
104 inpainting_fill=inpainting_fill,
105 resize_mode=resize_mode,
106 denoising_strength=denoising_strength,
107 inpaint_full_res=inpaint_full_res,
108 inpaint_full_res_padding=inpaint_full_res_padding,
109 inpainting_mask_invert=inpainting_mask_invert,
110 )
111
112 p.scripts = modules.scripts.scripts_txt2img
113 p.script_args = args
114
115 if shared.cmd_opts.enable_console_prompts:
116 print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
117
118 p.extra_generation_params["Mask blur"] = mask_blur
119
120 if is_batch:
121 assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
122
123 process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, args)
124
125 processed = Processed(p, [], p.seed, "")
126 else:
127 processed = modules.scripts.scripts_img2img.run(p, *args)
128 if processed is None:
129 processed = process_images(p)
130
131 shared.total_tqdm.clear()
132
133 generation_info_js = processed.js()
134 if opts.samples_log_stdout:
135 print(generation_info_js)
136
137 if opts.do_not_show_images:
138 processed.images = []
139
140 return processed.images, generation_info_js, plaintext_to_html(processed.info)
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modules/img2img.py b/modules/img2img.py
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -39,6 +39,8 @@
break
img = Image.open(image)
+ # Use the EXIF orientation of photos taken by smartphones.
+ img = ImageOps.exif_transpose(img)
p.init_images = [img] * p.batch_size
proc = modules.scripts.scripts_img2img.run(p, *args)
@@ -61,19 +63,25 @@
is_batch = mode == 2
if is_inpaint:
+ # Drawn mask
if mask_mode == 0:
image = init_img_with_mask['image']
mask = init_img_with_mask['mask']
alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')
image = image.convert('RGB')
+ # Uploaded mask
else:
image = init_img_inpaint
mask = init_mask_inpaint
+ # No mask
else:
image = init_img
mask = None
+ # Use the EXIF orientation of photos taken by smartphones.
+ image = ImageOps.exif_transpose(image)
+
assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
p = StableDiffusionProcessingImg2Img(
| {"golden_diff": "diff --git a/modules/img2img.py b/modules/img2img.py\n--- a/modules/img2img.py\n+++ b/modules/img2img.py\n@@ -39,6 +39,8 @@\n break\r\n \r\n img = Image.open(image)\r\n+ # Use the EXIF orientation of photos taken by smartphones.\r\n+ img = ImageOps.exif_transpose(img) \r\n p.init_images = [img] * p.batch_size\r\n \r\n proc = modules.scripts.scripts_img2img.run(p, *args)\r\n@@ -61,19 +63,25 @@\n is_batch = mode == 2\r\n \r\n if is_inpaint:\r\n+ # Drawn mask\r\n if mask_mode == 0:\r\n image = init_img_with_mask['image']\r\n mask = init_img_with_mask['mask']\r\n alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')\r\n mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')\r\n image = image.convert('RGB')\r\n+ # Uploaded mask\r\n else:\r\n image = init_img_inpaint\r\n mask = init_mask_inpaint\r\n+ # No mask\r\n else:\r\n image = init_img\r\n mask = None\r\n \r\n+ # Use the EXIF orientation of photos taken by smartphones.\r\n+ image = ImageOps.exif_transpose(image) \r\n+\r\n assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'\r\n \r\n p = StableDiffusionProcessingImg2Img(\n", "issue": "Portrait mode images generates in landscape mode in img2img [Bug]: \n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues and checked the recent builds/commits\n\n### What happened?\n\nImage in portrait mode shows up fine in the preview, but when the alternative image is generated it is rotated to landscape mode.\n\n### Steps to reproduce the problem\n\n1. Load a image that was taken using a phone in portrait mode.\r\n2. Set a prompt and press generate.\r\n\n\n### What should have happened?\n\nIt should have generated the output image in portrait mode as well.\n\n### Commit where the problem happens\n\n6bd6154a92eb05c80d66df661a38f8b70cc13729\n\n### What platforms do you use to access UI ?\n\nWindows\n\n### What browsers do you use to access the UI ?\n\nMicrosoft Edge\n\n### Command Line Arguments\n\n```Shell\n--xformers\n```\n\n\n### Additional information, context and logs\n\nWhen images are taken in portrait mode, they are often stored as landscape, but have information that it is portrait so that they can be displayed correctly in image viewers, this should be used to determine how the output image should be generated.\n", "before_files": [{"content": "import math\r\nimport os\r\nimport sys\r\nimport traceback\r\n\r\nimport numpy as np\r\nfrom PIL import Image, ImageOps, ImageChops\r\n\r\nfrom modules import devices\r\nfrom modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images\r\nfrom modules.shared import opts, state\r\nimport modules.shared as shared\r\nimport modules.processing as processing\r\nfrom modules.ui import plaintext_to_html\r\nimport modules.images as images\r\nimport modules.scripts\r\n\r\n\r\ndef process_batch(p, input_dir, output_dir, args):\r\n processing.fix_seed(p)\r\n\r\n images = [file for file in [os.path.join(input_dir, x) for x in os.listdir(input_dir)] if os.path.isfile(file)]\r\n\r\n print(f\"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.\")\r\n\r\n save_normally = output_dir == ''\r\n\r\n p.do_not_save_grid = True\r\n p.do_not_save_samples = not save_normally\r\n\r\n state.job_count = len(images) * p.n_iter\r\n\r\n for i, image in enumerate(images):\r\n state.job = f\"{i+1} out of {len(images)}\"\r\n if state.skipped:\r\n state.skipped = False\r\n\r\n if state.interrupted:\r\n break\r\n\r\n img = Image.open(image)\r\n p.init_images = [img] * p.batch_size\r\n\r\n proc = modules.scripts.scripts_img2img.run(p, *args)\r\n if proc is None:\r\n proc = process_images(p)\r\n\r\n for n, processed_image in enumerate(proc.images):\r\n filename = os.path.basename(image)\r\n\r\n if n > 0:\r\n left, right = os.path.splitext(filename)\r\n filename = f\"{left}-{n}{right}\"\r\n\r\n if not save_normally:\r\n processed_image.save(os.path.join(output_dir, filename))\r\n\r\n\r\ndef img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_img_inpaint, init_mask_inpaint, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args):\r\n is_inpaint = mode == 1\r\n is_batch = mode == 2\r\n\r\n if is_inpaint:\r\n if mask_mode == 0:\r\n image = init_img_with_mask['image']\r\n mask = init_img_with_mask['mask']\r\n alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')\r\n mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')\r\n image = image.convert('RGB')\r\n else:\r\n image = init_img_inpaint\r\n mask = init_mask_inpaint\r\n else:\r\n image = init_img\r\n mask = None\r\n\r\n assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'\r\n\r\n p = StableDiffusionProcessingImg2Img(\r\n sd_model=shared.sd_model,\r\n outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,\r\n outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,\r\n prompt=prompt,\r\n negative_prompt=negative_prompt,\r\n styles=[prompt_style, prompt_style2],\r\n seed=seed,\r\n subseed=subseed,\r\n subseed_strength=subseed_strength,\r\n seed_resize_from_h=seed_resize_from_h,\r\n seed_resize_from_w=seed_resize_from_w,\r\n seed_enable_extras=seed_enable_extras,\r\n sampler_index=sampler_index,\r\n batch_size=batch_size,\r\n n_iter=n_iter,\r\n steps=steps,\r\n cfg_scale=cfg_scale,\r\n width=width,\r\n height=height,\r\n restore_faces=restore_faces,\r\n tiling=tiling,\r\n init_images=[image],\r\n mask=mask,\r\n mask_blur=mask_blur,\r\n inpainting_fill=inpainting_fill,\r\n resize_mode=resize_mode,\r\n denoising_strength=denoising_strength,\r\n inpaint_full_res=inpaint_full_res,\r\n inpaint_full_res_padding=inpaint_full_res_padding,\r\n inpainting_mask_invert=inpainting_mask_invert,\r\n )\r\n\r\n p.scripts = modules.scripts.scripts_txt2img\r\n p.script_args = args\r\n\r\n if shared.cmd_opts.enable_console_prompts:\r\n print(f\"\\nimg2img: {prompt}\", file=shared.progress_print_out)\r\n\r\n p.extra_generation_params[\"Mask blur\"] = mask_blur\r\n\r\n if is_batch:\r\n assert not shared.cmd_opts.hide_ui_dir_config, \"Launched with --hide-ui-dir-config, batch img2img disabled\"\r\n\r\n process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, args)\r\n\r\n processed = Processed(p, [], p.seed, \"\")\r\n else:\r\n processed = modules.scripts.scripts_img2img.run(p, *args)\r\n if processed is None:\r\n processed = process_images(p)\r\n\r\n shared.total_tqdm.clear()\r\n\r\n generation_info_js = processed.js()\r\n if opts.samples_log_stdout:\r\n print(generation_info_js)\r\n\r\n if opts.do_not_show_images:\r\n processed.images = []\r\n\r\n return processed.images, generation_info_js, plaintext_to_html(processed.info)\r\n", "path": "modules/img2img.py"}], "after_files": [{"content": "import math\r\nimport os\r\nimport sys\r\nimport traceback\r\n\r\nimport numpy as np\r\nfrom PIL import Image, ImageOps, ImageChops\r\n\r\nfrom modules import devices\r\nfrom modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images\r\nfrom modules.shared import opts, state\r\nimport modules.shared as shared\r\nimport modules.processing as processing\r\nfrom modules.ui import plaintext_to_html\r\nimport modules.images as images\r\nimport modules.scripts\r\n\r\n\r\ndef process_batch(p, input_dir, output_dir, args):\r\n processing.fix_seed(p)\r\n\r\n images = [file for file in [os.path.join(input_dir, x) for x in os.listdir(input_dir)] if os.path.isfile(file)]\r\n\r\n print(f\"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.\")\r\n\r\n save_normally = output_dir == ''\r\n\r\n p.do_not_save_grid = True\r\n p.do_not_save_samples = not save_normally\r\n\r\n state.job_count = len(images) * p.n_iter\r\n\r\n for i, image in enumerate(images):\r\n state.job = f\"{i+1} out of {len(images)}\"\r\n if state.skipped:\r\n state.skipped = False\r\n\r\n if state.interrupted:\r\n break\r\n\r\n img = Image.open(image)\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n img = ImageOps.exif_transpose(img) \r\n p.init_images = [img] * p.batch_size\r\n\r\n proc = modules.scripts.scripts_img2img.run(p, *args)\r\n if proc is None:\r\n proc = process_images(p)\r\n\r\n for n, processed_image in enumerate(proc.images):\r\n filename = os.path.basename(image)\r\n\r\n if n > 0:\r\n left, right = os.path.splitext(filename)\r\n filename = f\"{left}-{n}{right}\"\r\n\r\n if not save_normally:\r\n processed_image.save(os.path.join(output_dir, filename))\r\n\r\n\r\ndef img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_img_inpaint, init_mask_inpaint, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args):\r\n is_inpaint = mode == 1\r\n is_batch = mode == 2\r\n\r\n if is_inpaint:\r\n # Drawn mask\r\n if mask_mode == 0:\r\n image = init_img_with_mask['image']\r\n mask = init_img_with_mask['mask']\r\n alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')\r\n mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')\r\n image = image.convert('RGB')\r\n # Uploaded mask\r\n else:\r\n image = init_img_inpaint\r\n mask = init_mask_inpaint\r\n # No mask\r\n else:\r\n image = init_img\r\n mask = None\r\n\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n image = ImageOps.exif_transpose(image) \r\n\r\n assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'\r\n\r\n p = StableDiffusionProcessingImg2Img(\r\n sd_model=shared.sd_model,\r\n outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,\r\n outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,\r\n prompt=prompt,\r\n negative_prompt=negative_prompt,\r\n styles=[prompt_style, prompt_style2],\r\n seed=seed,\r\n subseed=subseed,\r\n subseed_strength=subseed_strength,\r\n seed_resize_from_h=seed_resize_from_h,\r\n seed_resize_from_w=seed_resize_from_w,\r\n seed_enable_extras=seed_enable_extras,\r\n sampler_index=sampler_index,\r\n batch_size=batch_size,\r\n n_iter=n_iter,\r\n steps=steps,\r\n cfg_scale=cfg_scale,\r\n width=width,\r\n height=height,\r\n restore_faces=restore_faces,\r\n tiling=tiling,\r\n init_images=[image],\r\n mask=mask,\r\n mask_blur=mask_blur,\r\n inpainting_fill=inpainting_fill,\r\n resize_mode=resize_mode,\r\n denoising_strength=denoising_strength,\r\n inpaint_full_res=inpaint_full_res,\r\n inpaint_full_res_padding=inpaint_full_res_padding,\r\n inpainting_mask_invert=inpainting_mask_invert,\r\n )\r\n\r\n p.scripts = modules.scripts.scripts_txt2img\r\n p.script_args = args\r\n\r\n if shared.cmd_opts.enable_console_prompts:\r\n print(f\"\\nimg2img: {prompt}\", file=shared.progress_print_out)\r\n\r\n p.extra_generation_params[\"Mask blur\"] = mask_blur\r\n\r\n if is_batch:\r\n assert not shared.cmd_opts.hide_ui_dir_config, \"Launched with --hide-ui-dir-config, batch img2img disabled\"\r\n\r\n process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, args)\r\n\r\n processed = Processed(p, [], p.seed, \"\")\r\n else:\r\n processed = modules.scripts.scripts_img2img.run(p, *args)\r\n if processed is None:\r\n processed = process_images(p)\r\n\r\n shared.total_tqdm.clear()\r\n\r\n generation_info_js = processed.js()\r\n if opts.samples_log_stdout:\r\n print(generation_info_js)\r\n\r\n if opts.do_not_show_images:\r\n processed.images = []\r\n\r\n return processed.images, generation_info_js, plaintext_to_html(processed.info)\r\n", "path": "modules/img2img.py"}]} | 2,119 | 364 |
gh_patches_debug_5348 | rasdani/github-patches | git_diff | localstack__localstack-536 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix Java Lambda Handler Static Imports
By mistake - autoimport functionality wrong static methods has been imported. This fix the issues reported by in #534
It is unrelated to the JavaFX as that has been imported by mistake. Will prepare another PR with a bit more tests don't understand how it could have passed the CI / CD build with the wrong imports.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/constants.py`
Content:
```
1 import os
2 import localstack_client.config
3
4 # LocalStack version
5 VERSION = '0.8.4'
6
7 # default AWS region
8 if 'DEFAULT_REGION' not in os.environ:
9 os.environ['DEFAULT_REGION'] = 'us-east-1'
10 DEFAULT_REGION = os.environ['DEFAULT_REGION']
11
12 # constant to represent the "local" region, i.e., local machine
13 REGION_LOCAL = 'local'
14
15 # dev environment
16 ENV_DEV = 'dev'
17
18 # backend service ports, for services that are behind a proxy (counting down from 4566)
19 DEFAULT_PORT_APIGATEWAY_BACKEND = 4566
20 DEFAULT_PORT_KINESIS_BACKEND = 4565
21 DEFAULT_PORT_DYNAMODB_BACKEND = 4564
22 DEFAULT_PORT_S3_BACKEND = 4563
23 DEFAULT_PORT_SNS_BACKEND = 4562
24 DEFAULT_PORT_SQS_BACKEND = 4561
25 DEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560
26 DEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559
27
28 DEFAULT_PORT_WEB_UI = 8080
29
30 LOCALHOST = 'localhost'
31
32 # version of the Maven dependency with Java utility code
33 LOCALSTACK_MAVEN_VERSION = '0.1.9'
34
35 # map of default service APIs and ports to be spun up (fetch map from localstack_client)
36 DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()
37
38 # host to bind to when starting the services
39 BIND_HOST = '0.0.0.0'
40
41 # AWS user account ID used for tests
42 TEST_AWS_ACCOUNT_ID = '000000000000'
43 os.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID
44
45 # root code folder
46 LOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
47
48 # virtualenv folder
49 LOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')
50 if not os.path.isdir(LOCALSTACK_VENV_FOLDER):
51 # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/
52 LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))
53
54 # API Gateway path to indicate a user request sent to the gateway
55 PATH_USER_REQUEST = '_user_request_'
56
57 # name of LocalStack Docker image
58 DOCKER_IMAGE_NAME = 'localstack/localstack'
59
60 # environment variable name to tag local test runs
61 ENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'
62
63 # content types
64 APPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'
65 APPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'
66 APPLICATION_JSON = 'application/json'
67
68 # Lambda defaults
69 LAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID
70
71 # installation constants
72 ELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.3.0.zip'
73 DYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'
74 ELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.13.8.jar'
75 STS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'
76
77 # API endpoint for analytics events
78 API_ENDPOINT = 'https://api.localstack.cloud/v1'
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/localstack/constants.py b/localstack/constants.py
--- a/localstack/constants.py
+++ b/localstack/constants.py
@@ -30,7 +30,7 @@
LOCALHOST = 'localhost'
# version of the Maven dependency with Java utility code
-LOCALSTACK_MAVEN_VERSION = '0.1.9'
+LOCALSTACK_MAVEN_VERSION = '0.1.10'
# map of default service APIs and ports to be spun up (fetch map from localstack_client)
DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()
| {"golden_diff": "diff --git a/localstack/constants.py b/localstack/constants.py\n--- a/localstack/constants.py\n+++ b/localstack/constants.py\n@@ -30,7 +30,7 @@\n LOCALHOST = 'localhost'\n \n # version of the Maven dependency with Java utility code\n-LOCALSTACK_MAVEN_VERSION = '0.1.9'\n+LOCALSTACK_MAVEN_VERSION = '0.1.10'\n \n # map of default service APIs and ports to be spun up (fetch map from localstack_client)\n DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()\n", "issue": "Fix Java Lambda Handler Static Imports\nBy mistake - autoimport functionality wrong static methods has been imported. This fix the issues reported by in #534 \r\n\r\nIt is unrelated to the JavaFX as that has been imported by mistake. Will prepare another PR with a bit more tests don't understand how it could have passed the CI / CD build with the wrong imports.\r\n\n", "before_files": [{"content": "import os\nimport localstack_client.config\n\n# LocalStack version\nVERSION = '0.8.4'\n\n# default AWS region\nif 'DEFAULT_REGION' not in os.environ:\n os.environ['DEFAULT_REGION'] = 'us-east-1'\nDEFAULT_REGION = os.environ['DEFAULT_REGION']\n\n# constant to represent the \"local\" region, i.e., local machine\nREGION_LOCAL = 'local'\n\n# dev environment\nENV_DEV = 'dev'\n\n# backend service ports, for services that are behind a proxy (counting down from 4566)\nDEFAULT_PORT_APIGATEWAY_BACKEND = 4566\nDEFAULT_PORT_KINESIS_BACKEND = 4565\nDEFAULT_PORT_DYNAMODB_BACKEND = 4564\nDEFAULT_PORT_S3_BACKEND = 4563\nDEFAULT_PORT_SNS_BACKEND = 4562\nDEFAULT_PORT_SQS_BACKEND = 4561\nDEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560\nDEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559\n\nDEFAULT_PORT_WEB_UI = 8080\n\nLOCALHOST = 'localhost'\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = '0.1.9'\n\n# map of default service APIs and ports to be spun up (fetch map from localstack_client)\nDEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()\n\n# host to bind to when starting the services\nBIND_HOST = '0.0.0.0'\n\n# AWS user account ID used for tests\nTEST_AWS_ACCOUNT_ID = '000000000000'\nos.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID\n\n# root code folder\nLOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\n\n# virtualenv folder\nLOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')\nif not os.path.isdir(LOCALSTACK_VENV_FOLDER):\n # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/\n LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))\n\n# API Gateway path to indicate a user request sent to the gateway\nPATH_USER_REQUEST = '_user_request_'\n\n# name of LocalStack Docker image\nDOCKER_IMAGE_NAME = 'localstack/localstack'\n\n# environment variable name to tag local test runs\nENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'\n\n# content types\nAPPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'\nAPPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'\nAPPLICATION_JSON = 'application/json'\n\n# Lambda defaults\nLAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID\n\n# installation constants\nELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.3.0.zip'\nDYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'\nELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.13.8.jar'\nSTS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'\n\n# API endpoint for analytics events\nAPI_ENDPOINT = 'https://api.localstack.cloud/v1'\n", "path": "localstack/constants.py"}], "after_files": [{"content": "import os\nimport localstack_client.config\n\n# LocalStack version\nVERSION = '0.8.4'\n\n# default AWS region\nif 'DEFAULT_REGION' not in os.environ:\n os.environ['DEFAULT_REGION'] = 'us-east-1'\nDEFAULT_REGION = os.environ['DEFAULT_REGION']\n\n# constant to represent the \"local\" region, i.e., local machine\nREGION_LOCAL = 'local'\n\n# dev environment\nENV_DEV = 'dev'\n\n# backend service ports, for services that are behind a proxy (counting down from 4566)\nDEFAULT_PORT_APIGATEWAY_BACKEND = 4566\nDEFAULT_PORT_KINESIS_BACKEND = 4565\nDEFAULT_PORT_DYNAMODB_BACKEND = 4564\nDEFAULT_PORT_S3_BACKEND = 4563\nDEFAULT_PORT_SNS_BACKEND = 4562\nDEFAULT_PORT_SQS_BACKEND = 4561\nDEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560\nDEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559\n\nDEFAULT_PORT_WEB_UI = 8080\n\nLOCALHOST = 'localhost'\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = '0.1.10'\n\n# map of default service APIs and ports to be spun up (fetch map from localstack_client)\nDEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()\n\n# host to bind to when starting the services\nBIND_HOST = '0.0.0.0'\n\n# AWS user account ID used for tests\nTEST_AWS_ACCOUNT_ID = '000000000000'\nos.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID\n\n# root code folder\nLOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\n\n# virtualenv folder\nLOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')\nif not os.path.isdir(LOCALSTACK_VENV_FOLDER):\n # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/\n LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))\n\n# API Gateway path to indicate a user request sent to the gateway\nPATH_USER_REQUEST = '_user_request_'\n\n# name of LocalStack Docker image\nDOCKER_IMAGE_NAME = 'localstack/localstack'\n\n# environment variable name to tag local test runs\nENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'\n\n# content types\nAPPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'\nAPPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'\nAPPLICATION_JSON = 'application/json'\n\n# Lambda defaults\nLAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID\n\n# installation constants\nELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.3.0.zip'\nDYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'\nELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.13.8.jar'\nSTS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'\n\n# API endpoint for analytics events\nAPI_ENDPOINT = 'https://api.localstack.cloud/v1'\n", "path": "localstack/constants.py"}]} | 1,281 | 122 |
gh_patches_debug_4064 | rasdani/github-patches | git_diff | dmlc__dgl-490 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Data missing for JTNN
## ❓ Questions and Help
Hello,I come across the problem that the website you put your jtnn.zip is missing.Can you offer me a new website?
thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/pytorch/jtnn/jtnn/datautils.py`
Content:
```
1 import torch
2 from torch.utils.data import Dataset
3 import numpy as np
4
5 import dgl
6 from dgl.data.utils import download, extract_archive, get_download_dir
7 from .mol_tree_nx import DGLMolTree
8 from .mol_tree import Vocab
9
10 from .mpn import mol2dgl_single as mol2dgl_enc
11 from .jtmpn import mol2dgl_single as mol2dgl_dec
12 from .jtmpn import ATOM_FDIM as ATOM_FDIM_DEC
13 from .jtmpn import BOND_FDIM as BOND_FDIM_DEC
14
15 _url = 'https://www.dropbox.com/s/4ypr0e0abcbsvoh/jtnn.zip?dl=1'
16
17 def _unpack_field(examples, field):
18 return [e[field] for e in examples]
19
20 def _set_node_id(mol_tree, vocab):
21 wid = []
22 for i, node in enumerate(mol_tree.nodes_dict):
23 mol_tree.nodes_dict[node]['idx'] = i
24 wid.append(vocab.get_index(mol_tree.nodes_dict[node]['smiles']))
25
26 return wid
27
28 class JTNNDataset(Dataset):
29 def __init__(self, data, vocab, training=True):
30 self.dir = get_download_dir()
31 self.zip_file_path='{}/jtnn.zip'.format(self.dir)
32 download(_url, path=self.zip_file_path)
33 extract_archive(self.zip_file_path, '{}/jtnn'.format(self.dir))
34 print('Loading data...')
35 data_file = '{}/jtnn/{}.txt'.format(self.dir, data)
36 with open(data_file) as f:
37 self.data = [line.strip("\r\n ").split()[0] for line in f]
38 self.vocab_file = '{}/jtnn/{}.txt'.format(self.dir, vocab)
39 print('Loading finished.')
40 print('\tNum samples:', len(self.data))
41 print('\tVocab file:', self.vocab_file)
42 self.training = training
43 self.vocab = Vocab([x.strip("\r\n ") for x in open(self.vocab_file)])
44
45 def __len__(self):
46 return len(self.data)
47
48 def __getitem__(self, idx):
49 smiles = self.data[idx]
50 mol_tree = DGLMolTree(smiles)
51 mol_tree.recover()
52 mol_tree.assemble()
53
54 wid = _set_node_id(mol_tree, self.vocab)
55
56 # prebuild the molecule graph
57 mol_graph, atom_x_enc, bond_x_enc = mol2dgl_enc(mol_tree.smiles)
58
59 result = {
60 'mol_tree': mol_tree,
61 'mol_graph': mol_graph,
62 'atom_x_enc': atom_x_enc,
63 'bond_x_enc': bond_x_enc,
64 'wid': wid,
65 }
66
67 if not self.training:
68 return result
69
70 # prebuild the candidate graph list
71 cands = []
72 for node_id, node in mol_tree.nodes_dict.items():
73 # fill in ground truth
74 if node['label'] not in node['cands']:
75 node['cands'].append(node['label'])
76 node['cand_mols'].append(node['label_mol'])
77
78 if node['is_leaf'] or len(node['cands']) == 1:
79 continue
80 cands.extend([(cand, mol_tree, node_id)
81 for cand in node['cand_mols']])
82 if len(cands) > 0:
83 cand_graphs, atom_x_dec, bond_x_dec, tree_mess_src_e, \
84 tree_mess_tgt_e, tree_mess_tgt_n = mol2dgl_dec(cands)
85 else:
86 cand_graphs = []
87 atom_x_dec = torch.zeros(0, ATOM_FDIM_DEC)
88 bond_x_dec = torch.zeros(0, BOND_FDIM_DEC)
89 tree_mess_src_e = torch.zeros(0, 2).long()
90 tree_mess_tgt_e = torch.zeros(0, 2).long()
91 tree_mess_tgt_n = torch.zeros(0).long()
92
93 # prebuild the stereoisomers
94 cands = mol_tree.stereo_cands
95 if len(cands) > 1:
96 if mol_tree.smiles3D not in cands:
97 cands.append(mol_tree.smiles3D)
98
99 stereo_graphs = [mol2dgl_enc(c) for c in cands]
100 stereo_cand_graphs, stereo_atom_x_enc, stereo_bond_x_enc = \
101 zip(*stereo_graphs)
102 stereo_atom_x_enc = torch.cat(stereo_atom_x_enc)
103 stereo_bond_x_enc = torch.cat(stereo_bond_x_enc)
104 stereo_cand_label = [(cands.index(mol_tree.smiles3D), len(cands))]
105 else:
106 stereo_cand_graphs = []
107 stereo_atom_x_enc = torch.zeros(0, atom_x_enc.shape[1])
108 stereo_bond_x_enc = torch.zeros(0, bond_x_enc.shape[1])
109 stereo_cand_label = []
110
111 result.update({
112 'cand_graphs': cand_graphs,
113 'atom_x_dec': atom_x_dec,
114 'bond_x_dec': bond_x_dec,
115 'tree_mess_src_e': tree_mess_src_e,
116 'tree_mess_tgt_e': tree_mess_tgt_e,
117 'tree_mess_tgt_n': tree_mess_tgt_n,
118 'stereo_cand_graphs': stereo_cand_graphs,
119 'stereo_atom_x_enc': stereo_atom_x_enc,
120 'stereo_bond_x_enc': stereo_bond_x_enc,
121 'stereo_cand_label': stereo_cand_label,
122 })
123
124 return result
125
126 class JTNNCollator(object):
127 def __init__(self, vocab, training):
128 self.vocab = vocab
129 self.training = training
130
131 @staticmethod
132 def _batch_and_set(graphs, atom_x, bond_x, flatten):
133 if flatten:
134 graphs = [g for f in graphs for g in f]
135 graph_batch = dgl.batch(graphs)
136 graph_batch.ndata['x'] = atom_x
137 graph_batch.edata.update({
138 'x': bond_x,
139 'src_x': atom_x.new(bond_x.shape[0], atom_x.shape[1]).zero_(),
140 })
141 return graph_batch
142
143 def __call__(self, examples):
144 # get list of trees
145 mol_trees = _unpack_field(examples, 'mol_tree')
146 wid = _unpack_field(examples, 'wid')
147 for _wid, mol_tree in zip(wid, mol_trees):
148 mol_tree.ndata['wid'] = torch.LongTensor(_wid)
149
150 # TODO: either support pickling or get around ctypes pointers using scipy
151 # batch molecule graphs
152 mol_graphs = _unpack_field(examples, 'mol_graph')
153 atom_x = torch.cat(_unpack_field(examples, 'atom_x_enc'))
154 bond_x = torch.cat(_unpack_field(examples, 'bond_x_enc'))
155 mol_graph_batch = self._batch_and_set(mol_graphs, atom_x, bond_x, False)
156
157 result = {
158 'mol_trees': mol_trees,
159 'mol_graph_batch': mol_graph_batch,
160 }
161
162 if not self.training:
163 return result
164
165 # batch candidate graphs
166 cand_graphs = _unpack_field(examples, 'cand_graphs')
167 cand_batch_idx = []
168 atom_x = torch.cat(_unpack_field(examples, 'atom_x_dec'))
169 bond_x = torch.cat(_unpack_field(examples, 'bond_x_dec'))
170 tree_mess_src_e = _unpack_field(examples, 'tree_mess_src_e')
171 tree_mess_tgt_e = _unpack_field(examples, 'tree_mess_tgt_e')
172 tree_mess_tgt_n = _unpack_field(examples, 'tree_mess_tgt_n')
173
174 n_graph_nodes = 0
175 n_tree_nodes = 0
176 for i in range(len(cand_graphs)):
177 tree_mess_tgt_e[i] += n_graph_nodes
178 tree_mess_src_e[i] += n_tree_nodes
179 tree_mess_tgt_n[i] += n_graph_nodes
180 n_graph_nodes += sum(g.number_of_nodes() for g in cand_graphs[i])
181 n_tree_nodes += mol_trees[i].number_of_nodes()
182 cand_batch_idx.extend([i] * len(cand_graphs[i]))
183 tree_mess_tgt_e = torch.cat(tree_mess_tgt_e)
184 tree_mess_src_e = torch.cat(tree_mess_src_e)
185 tree_mess_tgt_n = torch.cat(tree_mess_tgt_n)
186
187 cand_graph_batch = self._batch_and_set(cand_graphs, atom_x, bond_x, True)
188
189 # batch stereoisomers
190 stereo_cand_graphs = _unpack_field(examples, 'stereo_cand_graphs')
191 atom_x = torch.cat(_unpack_field(examples, 'stereo_atom_x_enc'))
192 bond_x = torch.cat(_unpack_field(examples, 'stereo_bond_x_enc'))
193 stereo_cand_batch_idx = []
194 for i in range(len(stereo_cand_graphs)):
195 stereo_cand_batch_idx.extend([i] * len(stereo_cand_graphs[i]))
196
197 if len(stereo_cand_batch_idx) > 0:
198 stereo_cand_labels = [
199 (label, length)
200 for ex in _unpack_field(examples, 'stereo_cand_label')
201 for label, length in ex
202 ]
203 stereo_cand_labels, stereo_cand_lengths = zip(*stereo_cand_labels)
204 stereo_cand_graph_batch = self._batch_and_set(
205 stereo_cand_graphs, atom_x, bond_x, True)
206 else:
207 stereo_cand_labels = []
208 stereo_cand_lengths = []
209 stereo_cand_graph_batch = None
210 stereo_cand_batch_idx = []
211
212 result.update({
213 'cand_graph_batch': cand_graph_batch,
214 'cand_batch_idx': cand_batch_idx,
215 'tree_mess_tgt_e': tree_mess_tgt_e,
216 'tree_mess_src_e': tree_mess_src_e,
217 'tree_mess_tgt_n': tree_mess_tgt_n,
218 'stereo_cand_graph_batch': stereo_cand_graph_batch,
219 'stereo_cand_batch_idx': stereo_cand_batch_idx,
220 'stereo_cand_labels': stereo_cand_labels,
221 'stereo_cand_lengths': stereo_cand_lengths,
222 })
223
224 return result
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/pytorch/jtnn/jtnn/datautils.py b/examples/pytorch/jtnn/jtnn/datautils.py
--- a/examples/pytorch/jtnn/jtnn/datautils.py
+++ b/examples/pytorch/jtnn/jtnn/datautils.py
@@ -12,7 +12,7 @@
from .jtmpn import ATOM_FDIM as ATOM_FDIM_DEC
from .jtmpn import BOND_FDIM as BOND_FDIM_DEC
-_url = 'https://www.dropbox.com/s/4ypr0e0abcbsvoh/jtnn.zip?dl=1'
+_url = 'https://s3-ap-southeast-1.amazonaws.com/dgl-data-cn/dataset/jtnn.zip'
def _unpack_field(examples, field):
return [e[field] for e in examples]
| {"golden_diff": "diff --git a/examples/pytorch/jtnn/jtnn/datautils.py b/examples/pytorch/jtnn/jtnn/datautils.py\n--- a/examples/pytorch/jtnn/jtnn/datautils.py\n+++ b/examples/pytorch/jtnn/jtnn/datautils.py\n@@ -12,7 +12,7 @@\n from .jtmpn import ATOM_FDIM as ATOM_FDIM_DEC\n from .jtmpn import BOND_FDIM as BOND_FDIM_DEC\n \n-_url = 'https://www.dropbox.com/s/4ypr0e0abcbsvoh/jtnn.zip?dl=1'\n+_url = 'https://s3-ap-southeast-1.amazonaws.com/dgl-data-cn/dataset/jtnn.zip'\n \n def _unpack_field(examples, field):\n return [e[field] for e in examples]\n", "issue": "Data missing for JTNN\n## \u2753 Questions and Help\r\n\r\n Hello,I come across the problem that the website you put your jtnn.zip is missing.Can you offer me a new website?\r\n thanks!\r\n\n", "before_files": [{"content": "import torch\nfrom torch.utils.data import Dataset\nimport numpy as np\n\nimport dgl\nfrom dgl.data.utils import download, extract_archive, get_download_dir\nfrom .mol_tree_nx import DGLMolTree\nfrom .mol_tree import Vocab\n\nfrom .mpn import mol2dgl_single as mol2dgl_enc\nfrom .jtmpn import mol2dgl_single as mol2dgl_dec\nfrom .jtmpn import ATOM_FDIM as ATOM_FDIM_DEC\nfrom .jtmpn import BOND_FDIM as BOND_FDIM_DEC\n\n_url = 'https://www.dropbox.com/s/4ypr0e0abcbsvoh/jtnn.zip?dl=1'\n\ndef _unpack_field(examples, field):\n return [e[field] for e in examples]\n\ndef _set_node_id(mol_tree, vocab):\n wid = []\n for i, node in enumerate(mol_tree.nodes_dict):\n mol_tree.nodes_dict[node]['idx'] = i\n wid.append(vocab.get_index(mol_tree.nodes_dict[node]['smiles']))\n\n return wid\n\nclass JTNNDataset(Dataset):\n def __init__(self, data, vocab, training=True):\n self.dir = get_download_dir()\n self.zip_file_path='{}/jtnn.zip'.format(self.dir)\n download(_url, path=self.zip_file_path)\n extract_archive(self.zip_file_path, '{}/jtnn'.format(self.dir))\n print('Loading data...')\n data_file = '{}/jtnn/{}.txt'.format(self.dir, data)\n with open(data_file) as f:\n self.data = [line.strip(\"\\r\\n \").split()[0] for line in f]\n self.vocab_file = '{}/jtnn/{}.txt'.format(self.dir, vocab)\n print('Loading finished.')\n print('\\tNum samples:', len(self.data))\n print('\\tVocab file:', self.vocab_file)\n self.training = training\n self.vocab = Vocab([x.strip(\"\\r\\n \") for x in open(self.vocab_file)])\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n smiles = self.data[idx]\n mol_tree = DGLMolTree(smiles)\n mol_tree.recover()\n mol_tree.assemble()\n\n wid = _set_node_id(mol_tree, self.vocab)\n\n # prebuild the molecule graph\n mol_graph, atom_x_enc, bond_x_enc = mol2dgl_enc(mol_tree.smiles)\n\n result = {\n 'mol_tree': mol_tree,\n 'mol_graph': mol_graph,\n 'atom_x_enc': atom_x_enc,\n 'bond_x_enc': bond_x_enc,\n 'wid': wid,\n }\n\n if not self.training:\n return result\n\n # prebuild the candidate graph list\n cands = []\n for node_id, node in mol_tree.nodes_dict.items():\n # fill in ground truth\n if node['label'] not in node['cands']:\n node['cands'].append(node['label'])\n node['cand_mols'].append(node['label_mol'])\n\n if node['is_leaf'] or len(node['cands']) == 1:\n continue\n cands.extend([(cand, mol_tree, node_id)\n for cand in node['cand_mols']])\n if len(cands) > 0:\n cand_graphs, atom_x_dec, bond_x_dec, tree_mess_src_e, \\\n tree_mess_tgt_e, tree_mess_tgt_n = mol2dgl_dec(cands)\n else:\n cand_graphs = []\n atom_x_dec = torch.zeros(0, ATOM_FDIM_DEC)\n bond_x_dec = torch.zeros(0, BOND_FDIM_DEC)\n tree_mess_src_e = torch.zeros(0, 2).long()\n tree_mess_tgt_e = torch.zeros(0, 2).long()\n tree_mess_tgt_n = torch.zeros(0).long()\n\n # prebuild the stereoisomers\n cands = mol_tree.stereo_cands\n if len(cands) > 1:\n if mol_tree.smiles3D not in cands:\n cands.append(mol_tree.smiles3D)\n\n stereo_graphs = [mol2dgl_enc(c) for c in cands]\n stereo_cand_graphs, stereo_atom_x_enc, stereo_bond_x_enc = \\\n zip(*stereo_graphs)\n stereo_atom_x_enc = torch.cat(stereo_atom_x_enc)\n stereo_bond_x_enc = torch.cat(stereo_bond_x_enc)\n stereo_cand_label = [(cands.index(mol_tree.smiles3D), len(cands))]\n else:\n stereo_cand_graphs = []\n stereo_atom_x_enc = torch.zeros(0, atom_x_enc.shape[1])\n stereo_bond_x_enc = torch.zeros(0, bond_x_enc.shape[1])\n stereo_cand_label = []\n\n result.update({\n 'cand_graphs': cand_graphs,\n 'atom_x_dec': atom_x_dec,\n 'bond_x_dec': bond_x_dec,\n 'tree_mess_src_e': tree_mess_src_e,\n 'tree_mess_tgt_e': tree_mess_tgt_e,\n 'tree_mess_tgt_n': tree_mess_tgt_n,\n 'stereo_cand_graphs': stereo_cand_graphs,\n 'stereo_atom_x_enc': stereo_atom_x_enc,\n 'stereo_bond_x_enc': stereo_bond_x_enc,\n 'stereo_cand_label': stereo_cand_label,\n })\n\n return result\n\nclass JTNNCollator(object):\n def __init__(self, vocab, training):\n self.vocab = vocab\n self.training = training\n\n @staticmethod\n def _batch_and_set(graphs, atom_x, bond_x, flatten):\n if flatten:\n graphs = [g for f in graphs for g in f]\n graph_batch = dgl.batch(graphs)\n graph_batch.ndata['x'] = atom_x\n graph_batch.edata.update({\n 'x': bond_x,\n 'src_x': atom_x.new(bond_x.shape[0], atom_x.shape[1]).zero_(),\n })\n return graph_batch\n\n def __call__(self, examples):\n # get list of trees\n mol_trees = _unpack_field(examples, 'mol_tree')\n wid = _unpack_field(examples, 'wid')\n for _wid, mol_tree in zip(wid, mol_trees):\n mol_tree.ndata['wid'] = torch.LongTensor(_wid)\n\n # TODO: either support pickling or get around ctypes pointers using scipy\n # batch molecule graphs\n mol_graphs = _unpack_field(examples, 'mol_graph')\n atom_x = torch.cat(_unpack_field(examples, 'atom_x_enc'))\n bond_x = torch.cat(_unpack_field(examples, 'bond_x_enc'))\n mol_graph_batch = self._batch_and_set(mol_graphs, atom_x, bond_x, False)\n\n result = {\n 'mol_trees': mol_trees,\n 'mol_graph_batch': mol_graph_batch,\n }\n\n if not self.training:\n return result\n\n # batch candidate graphs\n cand_graphs = _unpack_field(examples, 'cand_graphs')\n cand_batch_idx = []\n atom_x = torch.cat(_unpack_field(examples, 'atom_x_dec'))\n bond_x = torch.cat(_unpack_field(examples, 'bond_x_dec'))\n tree_mess_src_e = _unpack_field(examples, 'tree_mess_src_e')\n tree_mess_tgt_e = _unpack_field(examples, 'tree_mess_tgt_e')\n tree_mess_tgt_n = _unpack_field(examples, 'tree_mess_tgt_n')\n\n n_graph_nodes = 0\n n_tree_nodes = 0\n for i in range(len(cand_graphs)):\n tree_mess_tgt_e[i] += n_graph_nodes\n tree_mess_src_e[i] += n_tree_nodes\n tree_mess_tgt_n[i] += n_graph_nodes\n n_graph_nodes += sum(g.number_of_nodes() for g in cand_graphs[i])\n n_tree_nodes += mol_trees[i].number_of_nodes()\n cand_batch_idx.extend([i] * len(cand_graphs[i]))\n tree_mess_tgt_e = torch.cat(tree_mess_tgt_e)\n tree_mess_src_e = torch.cat(tree_mess_src_e)\n tree_mess_tgt_n = torch.cat(tree_mess_tgt_n)\n\n cand_graph_batch = self._batch_and_set(cand_graphs, atom_x, bond_x, True)\n\n # batch stereoisomers\n stereo_cand_graphs = _unpack_field(examples, 'stereo_cand_graphs')\n atom_x = torch.cat(_unpack_field(examples, 'stereo_atom_x_enc'))\n bond_x = torch.cat(_unpack_field(examples, 'stereo_bond_x_enc'))\n stereo_cand_batch_idx = []\n for i in range(len(stereo_cand_graphs)):\n stereo_cand_batch_idx.extend([i] * len(stereo_cand_graphs[i]))\n\n if len(stereo_cand_batch_idx) > 0:\n stereo_cand_labels = [\n (label, length)\n for ex in _unpack_field(examples, 'stereo_cand_label')\n for label, length in ex\n ]\n stereo_cand_labels, stereo_cand_lengths = zip(*stereo_cand_labels)\n stereo_cand_graph_batch = self._batch_and_set(\n stereo_cand_graphs, atom_x, bond_x, True)\n else:\n stereo_cand_labels = []\n stereo_cand_lengths = []\n stereo_cand_graph_batch = None\n stereo_cand_batch_idx = []\n\n result.update({\n 'cand_graph_batch': cand_graph_batch,\n 'cand_batch_idx': cand_batch_idx,\n 'tree_mess_tgt_e': tree_mess_tgt_e,\n 'tree_mess_src_e': tree_mess_src_e,\n 'tree_mess_tgt_n': tree_mess_tgt_n,\n 'stereo_cand_graph_batch': stereo_cand_graph_batch,\n 'stereo_cand_batch_idx': stereo_cand_batch_idx,\n 'stereo_cand_labels': stereo_cand_labels,\n 'stereo_cand_lengths': stereo_cand_lengths,\n })\n\n return result\n", "path": "examples/pytorch/jtnn/jtnn/datautils.py"}], "after_files": [{"content": "import torch\nfrom torch.utils.data import Dataset\nimport numpy as np\n\nimport dgl\nfrom dgl.data.utils import download, extract_archive, get_download_dir\nfrom .mol_tree_nx import DGLMolTree\nfrom .mol_tree import Vocab\n\nfrom .mpn import mol2dgl_single as mol2dgl_enc\nfrom .jtmpn import mol2dgl_single as mol2dgl_dec\nfrom .jtmpn import ATOM_FDIM as ATOM_FDIM_DEC\nfrom .jtmpn import BOND_FDIM as BOND_FDIM_DEC\n\n_url = 'https://s3-ap-southeast-1.amazonaws.com/dgl-data-cn/dataset/jtnn.zip'\n\ndef _unpack_field(examples, field):\n return [e[field] for e in examples]\n\ndef _set_node_id(mol_tree, vocab):\n wid = []\n for i, node in enumerate(mol_tree.nodes_dict):\n mol_tree.nodes_dict[node]['idx'] = i\n wid.append(vocab.get_index(mol_tree.nodes_dict[node]['smiles']))\n\n return wid\n\nclass JTNNDataset(Dataset):\n def __init__(self, data, vocab, training=True):\n self.dir = get_download_dir()\n self.zip_file_path='{}/jtnn.zip'.format(self.dir)\n download(_url, path=self.zip_file_path)\n extract_archive(self.zip_file_path, '{}/jtnn'.format(self.dir))\n print('Loading data...')\n data_file = '{}/jtnn/{}.txt'.format(self.dir, data)\n with open(data_file) as f:\n self.data = [line.strip(\"\\r\\n \").split()[0] for line in f]\n self.vocab_file = '{}/jtnn/{}.txt'.format(self.dir, vocab)\n print('Loading finished.')\n print('\\tNum samples:', len(self.data))\n print('\\tVocab file:', self.vocab_file)\n self.training = training\n self.vocab = Vocab([x.strip(\"\\r\\n \") for x in open(self.vocab_file)])\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n smiles = self.data[idx]\n mol_tree = DGLMolTree(smiles)\n mol_tree.recover()\n mol_tree.assemble()\n\n wid = _set_node_id(mol_tree, self.vocab)\n\n # prebuild the molecule graph\n mol_graph, atom_x_enc, bond_x_enc = mol2dgl_enc(mol_tree.smiles)\n\n result = {\n 'mol_tree': mol_tree,\n 'mol_graph': mol_graph,\n 'atom_x_enc': atom_x_enc,\n 'bond_x_enc': bond_x_enc,\n 'wid': wid,\n }\n\n if not self.training:\n return result\n\n # prebuild the candidate graph list\n cands = []\n for node_id, node in mol_tree.nodes_dict.items():\n # fill in ground truth\n if node['label'] not in node['cands']:\n node['cands'].append(node['label'])\n node['cand_mols'].append(node['label_mol'])\n\n if node['is_leaf'] or len(node['cands']) == 1:\n continue\n cands.extend([(cand, mol_tree, node_id)\n for cand in node['cand_mols']])\n if len(cands) > 0:\n cand_graphs, atom_x_dec, bond_x_dec, tree_mess_src_e, \\\n tree_mess_tgt_e, tree_mess_tgt_n = mol2dgl_dec(cands)\n else:\n cand_graphs = []\n atom_x_dec = torch.zeros(0, ATOM_FDIM_DEC)\n bond_x_dec = torch.zeros(0, BOND_FDIM_DEC)\n tree_mess_src_e = torch.zeros(0, 2).long()\n tree_mess_tgt_e = torch.zeros(0, 2).long()\n tree_mess_tgt_n = torch.zeros(0).long()\n\n # prebuild the stereoisomers\n cands = mol_tree.stereo_cands\n if len(cands) > 1:\n if mol_tree.smiles3D not in cands:\n cands.append(mol_tree.smiles3D)\n\n stereo_graphs = [mol2dgl_enc(c) for c in cands]\n stereo_cand_graphs, stereo_atom_x_enc, stereo_bond_x_enc = \\\n zip(*stereo_graphs)\n stereo_atom_x_enc = torch.cat(stereo_atom_x_enc)\n stereo_bond_x_enc = torch.cat(stereo_bond_x_enc)\n stereo_cand_label = [(cands.index(mol_tree.smiles3D), len(cands))]\n else:\n stereo_cand_graphs = []\n stereo_atom_x_enc = torch.zeros(0, atom_x_enc.shape[1])\n stereo_bond_x_enc = torch.zeros(0, bond_x_enc.shape[1])\n stereo_cand_label = []\n\n result.update({\n 'cand_graphs': cand_graphs,\n 'atom_x_dec': atom_x_dec,\n 'bond_x_dec': bond_x_dec,\n 'tree_mess_src_e': tree_mess_src_e,\n 'tree_mess_tgt_e': tree_mess_tgt_e,\n 'tree_mess_tgt_n': tree_mess_tgt_n,\n 'stereo_cand_graphs': stereo_cand_graphs,\n 'stereo_atom_x_enc': stereo_atom_x_enc,\n 'stereo_bond_x_enc': stereo_bond_x_enc,\n 'stereo_cand_label': stereo_cand_label,\n })\n\n return result\n\nclass JTNNCollator(object):\n def __init__(self, vocab, training):\n self.vocab = vocab\n self.training = training\n\n @staticmethod\n def _batch_and_set(graphs, atom_x, bond_x, flatten):\n if flatten:\n graphs = [g for f in graphs for g in f]\n graph_batch = dgl.batch(graphs)\n graph_batch.ndata['x'] = atom_x\n graph_batch.edata.update({\n 'x': bond_x,\n 'src_x': atom_x.new(bond_x.shape[0], atom_x.shape[1]).zero_(),\n })\n return graph_batch\n\n def __call__(self, examples):\n # get list of trees\n mol_trees = _unpack_field(examples, 'mol_tree')\n wid = _unpack_field(examples, 'wid')\n for _wid, mol_tree in zip(wid, mol_trees):\n mol_tree.ndata['wid'] = torch.LongTensor(_wid)\n\n # TODO: either support pickling or get around ctypes pointers using scipy\n # batch molecule graphs\n mol_graphs = _unpack_field(examples, 'mol_graph')\n atom_x = torch.cat(_unpack_field(examples, 'atom_x_enc'))\n bond_x = torch.cat(_unpack_field(examples, 'bond_x_enc'))\n mol_graph_batch = self._batch_and_set(mol_graphs, atom_x, bond_x, False)\n\n result = {\n 'mol_trees': mol_trees,\n 'mol_graph_batch': mol_graph_batch,\n }\n\n if not self.training:\n return result\n\n # batch candidate graphs\n cand_graphs = _unpack_field(examples, 'cand_graphs')\n cand_batch_idx = []\n atom_x = torch.cat(_unpack_field(examples, 'atom_x_dec'))\n bond_x = torch.cat(_unpack_field(examples, 'bond_x_dec'))\n tree_mess_src_e = _unpack_field(examples, 'tree_mess_src_e')\n tree_mess_tgt_e = _unpack_field(examples, 'tree_mess_tgt_e')\n tree_mess_tgt_n = _unpack_field(examples, 'tree_mess_tgt_n')\n\n n_graph_nodes = 0\n n_tree_nodes = 0\n for i in range(len(cand_graphs)):\n tree_mess_tgt_e[i] += n_graph_nodes\n tree_mess_src_e[i] += n_tree_nodes\n tree_mess_tgt_n[i] += n_graph_nodes\n n_graph_nodes += sum(g.number_of_nodes() for g in cand_graphs[i])\n n_tree_nodes += mol_trees[i].number_of_nodes()\n cand_batch_idx.extend([i] * len(cand_graphs[i]))\n tree_mess_tgt_e = torch.cat(tree_mess_tgt_e)\n tree_mess_src_e = torch.cat(tree_mess_src_e)\n tree_mess_tgt_n = torch.cat(tree_mess_tgt_n)\n\n cand_graph_batch = self._batch_and_set(cand_graphs, atom_x, bond_x, True)\n\n # batch stereoisomers\n stereo_cand_graphs = _unpack_field(examples, 'stereo_cand_graphs')\n atom_x = torch.cat(_unpack_field(examples, 'stereo_atom_x_enc'))\n bond_x = torch.cat(_unpack_field(examples, 'stereo_bond_x_enc'))\n stereo_cand_batch_idx = []\n for i in range(len(stereo_cand_graphs)):\n stereo_cand_batch_idx.extend([i] * len(stereo_cand_graphs[i]))\n\n if len(stereo_cand_batch_idx) > 0:\n stereo_cand_labels = [\n (label, length)\n for ex in _unpack_field(examples, 'stereo_cand_label')\n for label, length in ex\n ]\n stereo_cand_labels, stereo_cand_lengths = zip(*stereo_cand_labels)\n stereo_cand_graph_batch = self._batch_and_set(\n stereo_cand_graphs, atom_x, bond_x, True)\n else:\n stereo_cand_labels = []\n stereo_cand_lengths = []\n stereo_cand_graph_batch = None\n stereo_cand_batch_idx = []\n\n result.update({\n 'cand_graph_batch': cand_graph_batch,\n 'cand_batch_idx': cand_batch_idx,\n 'tree_mess_tgt_e': tree_mess_tgt_e,\n 'tree_mess_src_e': tree_mess_src_e,\n 'tree_mess_tgt_n': tree_mess_tgt_n,\n 'stereo_cand_graph_batch': stereo_cand_graph_batch,\n 'stereo_cand_batch_idx': stereo_cand_batch_idx,\n 'stereo_cand_labels': stereo_cand_labels,\n 'stereo_cand_lengths': stereo_cand_lengths,\n })\n\n return result\n", "path": "examples/pytorch/jtnn/jtnn/datautils.py"}]} | 3,127 | 185 |
gh_patches_debug_1472 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-1324 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Stephen Girard] resource list shows sources as None
To be fixed for the 0.13.0 (Stephen Girard) release. An invocation of `dbt run` shows:
```
Found 162 models, 320 tests, 0 archives, 0 analyses, 236 macros, 2 operations, 4 seed files, 34 None
^
|
```
We should also add an assert, as this should fail immediately in development (it's easy to miss!)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/dbt/compilation.py`
Content:
```
1 import itertools
2 import os
3 import json
4 from collections import OrderedDict, defaultdict
5 import sqlparse
6
7 import dbt.utils
8 import dbt.include
9 import dbt.tracking
10
11 from dbt import deprecations
12 from dbt.utils import get_materialization, NodeType, is_type
13 from dbt.linker import Linker
14
15 import dbt.compat
16 import dbt.context.runtime
17 import dbt.contracts.project
18 import dbt.exceptions
19 import dbt.flags
20 import dbt.loader
21 import dbt.config
22 from dbt.contracts.graph.compiled import CompiledNode, CompiledGraph
23
24 from dbt.clients.system import write_json
25 from dbt.logger import GLOBAL_LOGGER as logger
26
27 graph_file_name = 'graph.gpickle'
28
29
30 def print_compile_stats(stats):
31 names = {
32 NodeType.Model: 'models',
33 NodeType.Test: 'tests',
34 NodeType.Archive: 'archives',
35 NodeType.Analysis: 'analyses',
36 NodeType.Macro: 'macros',
37 NodeType.Operation: 'operations',
38 NodeType.Seed: 'seed files',
39 }
40
41 results = {k: 0 for k in names.keys()}
42 results.update(stats)
43
44 stat_line = ", ".join(
45 ["{} {}".format(ct, names.get(t)) for t, ct in results.items()])
46
47 logger.info("Found {}".format(stat_line))
48
49
50 def _add_prepended_cte(prepended_ctes, new_cte):
51 for dct in prepended_ctes:
52 if dct['id'] == new_cte['id']:
53 dct['sql'] = new_cte['sql']
54 return
55 prepended_ctes.append(new_cte)
56
57
58 def _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):
59 for new_cte in new_prepended_ctes:
60 _add_prepended_cte(prepended_ctes, new_cte)
61
62
63 def prepend_ctes(model, manifest):
64 model, _, manifest = recursively_prepend_ctes(model, manifest)
65
66 return (model, manifest)
67
68
69 def recursively_prepend_ctes(model, manifest):
70 if model.extra_ctes_injected:
71 return (model, model.extra_ctes, manifest)
72
73 if dbt.flags.STRICT_MODE:
74 # ensure that the cte we're adding to is compiled
75 CompiledNode(**model.serialize())
76
77 prepended_ctes = []
78
79 for cte in model.extra_ctes:
80 cte_id = cte['id']
81 cte_to_add = manifest.nodes.get(cte_id)
82 cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(
83 cte_to_add, manifest)
84 _extend_prepended_ctes(prepended_ctes, new_prepended_ctes)
85 new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name'))
86 sql = ' {} as (\n{}\n)'.format(new_cte_name, cte_to_add.compiled_sql)
87 _add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql})
88
89 model.prepend_ctes(prepended_ctes)
90
91 manifest.nodes[model.unique_id] = model
92
93 return (model, prepended_ctes, manifest)
94
95
96 class Compiler(object):
97 def __init__(self, config):
98 self.config = config
99
100 def initialize(self):
101 dbt.clients.system.make_directory(self.config.target_path)
102 dbt.clients.system.make_directory(self.config.modules_path)
103
104 def compile_node(self, node, manifest, extra_context=None):
105 if extra_context is None:
106 extra_context = {}
107
108 logger.debug("Compiling {}".format(node.get('unique_id')))
109
110 data = node.to_dict()
111 data.update({
112 'compiled': False,
113 'compiled_sql': None,
114 'extra_ctes_injected': False,
115 'extra_ctes': [],
116 'injected_sql': None,
117 })
118 compiled_node = CompiledNode(**data)
119
120 context = dbt.context.runtime.generate(
121 compiled_node, self.config, manifest)
122 context.update(extra_context)
123
124 compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(
125 node.get('raw_sql'),
126 context,
127 node)
128
129 compiled_node.compiled = True
130
131 injected_node, _ = prepend_ctes(compiled_node, manifest)
132
133 should_wrap = {NodeType.Test, NodeType.Operation}
134 if injected_node.resource_type in should_wrap:
135 # data tests get wrapped in count(*)
136 # TODO : move this somewhere more reasonable
137 if 'data' in injected_node.tags and \
138 is_type(injected_node, NodeType.Test):
139 injected_node.wrapped_sql = (
140 "select count(*) from (\n{test_sql}\n) sbq").format(
141 test_sql=injected_node.injected_sql)
142 else:
143 # don't wrap schema tests or analyses.
144 injected_node.wrapped_sql = injected_node.injected_sql
145
146 elif is_type(injected_node, NodeType.Archive):
147 # unfortunately we do everything automagically for
148 # archives. in the future it'd be nice to generate
149 # the SQL at the parser level.
150 pass
151
152 elif(is_type(injected_node, NodeType.Model) and
153 get_materialization(injected_node) == 'ephemeral'):
154 pass
155
156 else:
157 injected_node.wrapped_sql = None
158
159 return injected_node
160
161 def write_graph_file(self, linker, manifest):
162 filename = graph_file_name
163 graph_path = os.path.join(self.config.target_path, filename)
164 linker.write_graph(graph_path, manifest)
165
166 def link_node(self, linker, node, manifest):
167 linker.add_node(node.unique_id)
168
169 for dependency in node.depends_on_nodes:
170 if manifest.nodes.get(dependency):
171 linker.dependency(
172 node.unique_id,
173 (manifest.nodes.get(dependency).unique_id))
174 else:
175 dbt.exceptions.dependency_not_found(node, dependency)
176
177 def link_graph(self, linker, manifest):
178 for node in manifest.nodes.values():
179 self.link_node(linker, node, manifest)
180
181 cycle = linker.find_cycles()
182
183 if cycle:
184 raise RuntimeError("Found a cycle: {}".format(cycle))
185
186 def compile(self, manifest):
187 linker = Linker()
188
189 self.link_graph(linker, manifest)
190
191 stats = defaultdict(int)
192
193 for node_name, node in itertools.chain(
194 manifest.nodes.items(),
195 manifest.macros.items()):
196 stats[node.resource_type] += 1
197
198 self.write_graph_file(linker, manifest)
199 print_compile_stats(stats)
200
201 return linker
202
203
204 def compile_manifest(config, manifest):
205 compiler = Compiler(config)
206 compiler.initialize()
207 return compiler.compile(manifest)
208
209
210 def compile_node(adapter, config, node, manifest, extra_context):
211 compiler = Compiler(config)
212 node = compiler.compile_node(node, manifest, extra_context)
213 node = _inject_runtime_config(adapter, node, extra_context)
214
215 if(node.injected_sql is not None and
216 not (dbt.utils.is_type(node, NodeType.Archive))):
217 logger.debug('Writing injected SQL for node "{}"'.format(
218 node.unique_id))
219
220 written_path = dbt.writer.write_node(
221 node,
222 config.target_path,
223 'compiled',
224 node.injected_sql)
225
226 node.build_path = written_path
227
228 return node
229
230
231 def _inject_runtime_config(adapter, node, extra_context):
232 wrapped_sql = node.wrapped_sql
233 context = _node_context(adapter, node)
234 context.update(extra_context)
235 sql = dbt.clients.jinja.get_rendered(wrapped_sql, context)
236 node.wrapped_sql = sql
237 return node
238
239
240 def _node_context(adapter, node):
241 return {
242 "run_started_at": dbt.tracking.active_user.run_started_at,
243 "invocation_id": dbt.tracking.active_user.invocation_id,
244 }
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py
--- a/core/dbt/compilation.py
+++ b/core/dbt/compilation.py
@@ -36,6 +36,7 @@
NodeType.Macro: 'macros',
NodeType.Operation: 'operations',
NodeType.Seed: 'seed files',
+ NodeType.Source: 'sources',
}
results = {k: 0 for k in names.keys()}
| {"golden_diff": "diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py\n--- a/core/dbt/compilation.py\n+++ b/core/dbt/compilation.py\n@@ -36,6 +36,7 @@\n NodeType.Macro: 'macros',\n NodeType.Operation: 'operations',\n NodeType.Seed: 'seed files',\n+ NodeType.Source: 'sources',\n }\n \n results = {k: 0 for k in names.keys()}\n", "issue": "[Stephen Girard] resource list shows sources as None\nTo be fixed for the 0.13.0 (Stephen Girard) release. An invocation of `dbt run` shows:\r\n\r\n```\r\nFound 162 models, 320 tests, 0 archives, 0 analyses, 236 macros, 2 operations, 4 seed files, 34 None\r\n ^\r\n |\r\n```\r\n\r\nWe should also add an assert, as this should fail immediately in development (it's easy to miss!)\n", "before_files": [{"content": "import itertools\nimport os\nimport json\nfrom collections import OrderedDict, defaultdict\nimport sqlparse\n\nimport dbt.utils\nimport dbt.include\nimport dbt.tracking\n\nfrom dbt import deprecations\nfrom dbt.utils import get_materialization, NodeType, is_type\nfrom dbt.linker import Linker\n\nimport dbt.compat\nimport dbt.context.runtime\nimport dbt.contracts.project\nimport dbt.exceptions\nimport dbt.flags\nimport dbt.loader\nimport dbt.config\nfrom dbt.contracts.graph.compiled import CompiledNode, CompiledGraph\n\nfrom dbt.clients.system import write_json\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\ngraph_file_name = 'graph.gpickle'\n\n\ndef print_compile_stats(stats):\n names = {\n NodeType.Model: 'models',\n NodeType.Test: 'tests',\n NodeType.Archive: 'archives',\n NodeType.Analysis: 'analyses',\n NodeType.Macro: 'macros',\n NodeType.Operation: 'operations',\n NodeType.Seed: 'seed files',\n }\n\n results = {k: 0 for k in names.keys()}\n results.update(stats)\n\n stat_line = \", \".join(\n [\"{} {}\".format(ct, names.get(t)) for t, ct in results.items()])\n\n logger.info(\"Found {}\".format(stat_line))\n\n\ndef _add_prepended_cte(prepended_ctes, new_cte):\n for dct in prepended_ctes:\n if dct['id'] == new_cte['id']:\n dct['sql'] = new_cte['sql']\n return\n prepended_ctes.append(new_cte)\n\n\ndef _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):\n for new_cte in new_prepended_ctes:\n _add_prepended_cte(prepended_ctes, new_cte)\n\n\ndef prepend_ctes(model, manifest):\n model, _, manifest = recursively_prepend_ctes(model, manifest)\n\n return (model, manifest)\n\n\ndef recursively_prepend_ctes(model, manifest):\n if model.extra_ctes_injected:\n return (model, model.extra_ctes, manifest)\n\n if dbt.flags.STRICT_MODE:\n # ensure that the cte we're adding to is compiled\n CompiledNode(**model.serialize())\n\n prepended_ctes = []\n\n for cte in model.extra_ctes:\n cte_id = cte['id']\n cte_to_add = manifest.nodes.get(cte_id)\n cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(\n cte_to_add, manifest)\n _extend_prepended_ctes(prepended_ctes, new_prepended_ctes)\n new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name'))\n sql = ' {} as (\\n{}\\n)'.format(new_cte_name, cte_to_add.compiled_sql)\n _add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql})\n\n model.prepend_ctes(prepended_ctes)\n\n manifest.nodes[model.unique_id] = model\n\n return (model, prepended_ctes, manifest)\n\n\nclass Compiler(object):\n def __init__(self, config):\n self.config = config\n\n def initialize(self):\n dbt.clients.system.make_directory(self.config.target_path)\n dbt.clients.system.make_directory(self.config.modules_path)\n\n def compile_node(self, node, manifest, extra_context=None):\n if extra_context is None:\n extra_context = {}\n\n logger.debug(\"Compiling {}\".format(node.get('unique_id')))\n\n data = node.to_dict()\n data.update({\n 'compiled': False,\n 'compiled_sql': None,\n 'extra_ctes_injected': False,\n 'extra_ctes': [],\n 'injected_sql': None,\n })\n compiled_node = CompiledNode(**data)\n\n context = dbt.context.runtime.generate(\n compiled_node, self.config, manifest)\n context.update(extra_context)\n\n compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(\n node.get('raw_sql'),\n context,\n node)\n\n compiled_node.compiled = True\n\n injected_node, _ = prepend_ctes(compiled_node, manifest)\n\n should_wrap = {NodeType.Test, NodeType.Operation}\n if injected_node.resource_type in should_wrap:\n # data tests get wrapped in count(*)\n # TODO : move this somewhere more reasonable\n if 'data' in injected_node.tags and \\\n is_type(injected_node, NodeType.Test):\n injected_node.wrapped_sql = (\n \"select count(*) from (\\n{test_sql}\\n) sbq\").format(\n test_sql=injected_node.injected_sql)\n else:\n # don't wrap schema tests or analyses.\n injected_node.wrapped_sql = injected_node.injected_sql\n\n elif is_type(injected_node, NodeType.Archive):\n # unfortunately we do everything automagically for\n # archives. in the future it'd be nice to generate\n # the SQL at the parser level.\n pass\n\n elif(is_type(injected_node, NodeType.Model) and\n get_materialization(injected_node) == 'ephemeral'):\n pass\n\n else:\n injected_node.wrapped_sql = None\n\n return injected_node\n\n def write_graph_file(self, linker, manifest):\n filename = graph_file_name\n graph_path = os.path.join(self.config.target_path, filename)\n linker.write_graph(graph_path, manifest)\n\n def link_node(self, linker, node, manifest):\n linker.add_node(node.unique_id)\n\n for dependency in node.depends_on_nodes:\n if manifest.nodes.get(dependency):\n linker.dependency(\n node.unique_id,\n (manifest.nodes.get(dependency).unique_id))\n else:\n dbt.exceptions.dependency_not_found(node, dependency)\n\n def link_graph(self, linker, manifest):\n for node in manifest.nodes.values():\n self.link_node(linker, node, manifest)\n\n cycle = linker.find_cycles()\n\n if cycle:\n raise RuntimeError(\"Found a cycle: {}\".format(cycle))\n\n def compile(self, manifest):\n linker = Linker()\n\n self.link_graph(linker, manifest)\n\n stats = defaultdict(int)\n\n for node_name, node in itertools.chain(\n manifest.nodes.items(),\n manifest.macros.items()):\n stats[node.resource_type] += 1\n\n self.write_graph_file(linker, manifest)\n print_compile_stats(stats)\n\n return linker\n\n\ndef compile_manifest(config, manifest):\n compiler = Compiler(config)\n compiler.initialize()\n return compiler.compile(manifest)\n\n\ndef compile_node(adapter, config, node, manifest, extra_context):\n compiler = Compiler(config)\n node = compiler.compile_node(node, manifest, extra_context)\n node = _inject_runtime_config(adapter, node, extra_context)\n\n if(node.injected_sql is not None and\n not (dbt.utils.is_type(node, NodeType.Archive))):\n logger.debug('Writing injected SQL for node \"{}\"'.format(\n node.unique_id))\n\n written_path = dbt.writer.write_node(\n node,\n config.target_path,\n 'compiled',\n node.injected_sql)\n\n node.build_path = written_path\n\n return node\n\n\ndef _inject_runtime_config(adapter, node, extra_context):\n wrapped_sql = node.wrapped_sql\n context = _node_context(adapter, node)\n context.update(extra_context)\n sql = dbt.clients.jinja.get_rendered(wrapped_sql, context)\n node.wrapped_sql = sql\n return node\n\n\ndef _node_context(adapter, node):\n return {\n \"run_started_at\": dbt.tracking.active_user.run_started_at,\n \"invocation_id\": dbt.tracking.active_user.invocation_id,\n }\n", "path": "core/dbt/compilation.py"}], "after_files": [{"content": "import itertools\nimport os\nimport json\nfrom collections import OrderedDict, defaultdict\nimport sqlparse\n\nimport dbt.utils\nimport dbt.include\nimport dbt.tracking\n\nfrom dbt import deprecations\nfrom dbt.utils import get_materialization, NodeType, is_type\nfrom dbt.linker import Linker\n\nimport dbt.compat\nimport dbt.context.runtime\nimport dbt.contracts.project\nimport dbt.exceptions\nimport dbt.flags\nimport dbt.loader\nimport dbt.config\nfrom dbt.contracts.graph.compiled import CompiledNode, CompiledGraph\n\nfrom dbt.clients.system import write_json\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\ngraph_file_name = 'graph.gpickle'\n\n\ndef print_compile_stats(stats):\n names = {\n NodeType.Model: 'models',\n NodeType.Test: 'tests',\n NodeType.Archive: 'archives',\n NodeType.Analysis: 'analyses',\n NodeType.Macro: 'macros',\n NodeType.Operation: 'operations',\n NodeType.Seed: 'seed files',\n NodeType.Source: 'sources',\n }\n\n results = {k: 0 for k in names.keys()}\n results.update(stats)\n\n stat_line = \", \".join(\n [\"{} {}\".format(ct, names.get(t)) for t, ct in results.items()])\n\n logger.info(\"Found {}\".format(stat_line))\n\n\ndef _add_prepended_cte(prepended_ctes, new_cte):\n for dct in prepended_ctes:\n if dct['id'] == new_cte['id']:\n dct['sql'] = new_cte['sql']\n return\n prepended_ctes.append(new_cte)\n\n\ndef _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):\n for new_cte in new_prepended_ctes:\n _add_prepended_cte(prepended_ctes, new_cte)\n\n\ndef prepend_ctes(model, manifest):\n model, _, manifest = recursively_prepend_ctes(model, manifest)\n\n return (model, manifest)\n\n\ndef recursively_prepend_ctes(model, manifest):\n if model.extra_ctes_injected:\n return (model, model.extra_ctes, manifest)\n\n if dbt.flags.STRICT_MODE:\n # ensure that the cte we're adding to is compiled\n CompiledNode(**model.serialize())\n\n prepended_ctes = []\n\n for cte in model.extra_ctes:\n cte_id = cte['id']\n cte_to_add = manifest.nodes.get(cte_id)\n cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(\n cte_to_add, manifest)\n _extend_prepended_ctes(prepended_ctes, new_prepended_ctes)\n new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name'))\n sql = ' {} as (\\n{}\\n)'.format(new_cte_name, cte_to_add.compiled_sql)\n _add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql})\n\n model.prepend_ctes(prepended_ctes)\n\n manifest.nodes[model.unique_id] = model\n\n return (model, prepended_ctes, manifest)\n\n\nclass Compiler(object):\n def __init__(self, config):\n self.config = config\n\n def initialize(self):\n dbt.clients.system.make_directory(self.config.target_path)\n dbt.clients.system.make_directory(self.config.modules_path)\n\n def compile_node(self, node, manifest, extra_context=None):\n if extra_context is None:\n extra_context = {}\n\n logger.debug(\"Compiling {}\".format(node.get('unique_id')))\n\n data = node.to_dict()\n data.update({\n 'compiled': False,\n 'compiled_sql': None,\n 'extra_ctes_injected': False,\n 'extra_ctes': [],\n 'injected_sql': None,\n })\n compiled_node = CompiledNode(**data)\n\n context = dbt.context.runtime.generate(\n compiled_node, self.config, manifest)\n context.update(extra_context)\n\n compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(\n node.get('raw_sql'),\n context,\n node)\n\n compiled_node.compiled = True\n\n injected_node, _ = prepend_ctes(compiled_node, manifest)\n\n should_wrap = {NodeType.Test, NodeType.Operation}\n if injected_node.resource_type in should_wrap:\n # data tests get wrapped in count(*)\n # TODO : move this somewhere more reasonable\n if 'data' in injected_node.tags and \\\n is_type(injected_node, NodeType.Test):\n injected_node.wrapped_sql = (\n \"select count(*) from (\\n{test_sql}\\n) sbq\").format(\n test_sql=injected_node.injected_sql)\n else:\n # don't wrap schema tests or analyses.\n injected_node.wrapped_sql = injected_node.injected_sql\n\n elif is_type(injected_node, NodeType.Archive):\n # unfortunately we do everything automagically for\n # archives. in the future it'd be nice to generate\n # the SQL at the parser level.\n pass\n\n elif(is_type(injected_node, NodeType.Model) and\n get_materialization(injected_node) == 'ephemeral'):\n pass\n\n else:\n injected_node.wrapped_sql = None\n\n return injected_node\n\n def write_graph_file(self, linker, manifest):\n filename = graph_file_name\n graph_path = os.path.join(self.config.target_path, filename)\n linker.write_graph(graph_path, manifest)\n\n def link_node(self, linker, node, manifest):\n linker.add_node(node.unique_id)\n\n for dependency in node.depends_on_nodes:\n if manifest.nodes.get(dependency):\n linker.dependency(\n node.unique_id,\n (manifest.nodes.get(dependency).unique_id))\n else:\n dbt.exceptions.dependency_not_found(node, dependency)\n\n def link_graph(self, linker, manifest):\n for node in manifest.nodes.values():\n self.link_node(linker, node, manifest)\n\n cycle = linker.find_cycles()\n\n if cycle:\n raise RuntimeError(\"Found a cycle: {}\".format(cycle))\n\n def compile(self, manifest):\n linker = Linker()\n\n self.link_graph(linker, manifest)\n\n stats = defaultdict(int)\n\n for node_name, node in itertools.chain(\n manifest.nodes.items(),\n manifest.macros.items()):\n stats[node.resource_type] += 1\n\n self.write_graph_file(linker, manifest)\n print_compile_stats(stats)\n\n return linker\n\n\ndef compile_manifest(config, manifest):\n compiler = Compiler(config)\n compiler.initialize()\n return compiler.compile(manifest)\n\n\ndef compile_node(adapter, config, node, manifest, extra_context):\n compiler = Compiler(config)\n node = compiler.compile_node(node, manifest, extra_context)\n node = _inject_runtime_config(adapter, node, extra_context)\n\n if(node.injected_sql is not None and\n not (dbt.utils.is_type(node, NodeType.Archive))):\n logger.debug('Writing injected SQL for node \"{}\"'.format(\n node.unique_id))\n\n written_path = dbt.writer.write_node(\n node,\n config.target_path,\n 'compiled',\n node.injected_sql)\n\n node.build_path = written_path\n\n return node\n\n\ndef _inject_runtime_config(adapter, node, extra_context):\n wrapped_sql = node.wrapped_sql\n context = _node_context(adapter, node)\n context.update(extra_context)\n sql = dbt.clients.jinja.get_rendered(wrapped_sql, context)\n node.wrapped_sql = sql\n return node\n\n\ndef _node_context(adapter, node):\n return {\n \"run_started_at\": dbt.tracking.active_user.run_started_at,\n \"invocation_id\": dbt.tracking.active_user.invocation_id,\n }\n", "path": "core/dbt/compilation.py"}]} | 2,692 | 101 |
gh_patches_debug_21589 | rasdani/github-patches | git_diff | conan-io__conan-center-index-7891 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] sqlpp11/0.60: scripts are not in the package
sqlpp11 provides some scripts that can be used by the consumer: https://github.com/rbock/sqlpp11/tree/develop/scripts
But these scripts are not in the conan package.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/sqlpp11/all/conanfile.py`
Content:
```
1 from conans import ConanFile, tools
2 import os
3
4 required_conan_version = ">=1.33.0"
5
6
7 class Sqlpp11Conan(ConanFile):
8 name = "sqlpp11"
9 license = "BSD-2-Clause"
10 url = "https://github.com/conan-io/conan-center-index"
11 homepage = "https://github.com/rbock/sqlpp11"
12 description = "A type safe SQL template library for C++"
13 topics = ("SQL", "DSL", "embedded", "data-base")
14 no_copy_source = True
15
16 @property
17 def _source_subfolder(self):
18 return "source_subfolder"
19
20 def requirements(self):
21 self.requires("date/3.0.1")
22
23 def package_id(self):
24 self.info.header_only()
25
26 def source(self):
27 tools.get(**self.conan_data["sources"][self.version],
28 destination=self._source_subfolder, strip_root=True)
29
30 def package(self):
31 self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
32 self.copy("*.h", dst="include", src=os.path.join(self._source_subfolder, "include"))
33
34 def package_info(self):
35 self.cpp_info.filenames["cmake_find_package"] = "Sqlpp11"
36 self.cpp_info.filenames["cmake_find_package_multi"] = "Sqlpp11"
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/sqlpp11/all/conanfile.py b/recipes/sqlpp11/all/conanfile.py
--- a/recipes/sqlpp11/all/conanfile.py
+++ b/recipes/sqlpp11/all/conanfile.py
@@ -24,13 +24,21 @@
self.info.header_only()
def source(self):
- tools.get(**self.conan_data["sources"][self.version],
- destination=self._source_subfolder, strip_root=True)
+ tools.get(
+ **self.conan_data["sources"][self.version],
+ destination=self._source_subfolder,
+ strip_root=True
+ )
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("*.h", dst="include", src=os.path.join(self._source_subfolder, "include"))
+ self.copy("*", dst="bin", src=os.path.join(self._source_subfolder, "scripts"))
def package_info(self):
self.cpp_info.filenames["cmake_find_package"] = "Sqlpp11"
self.cpp_info.filenames["cmake_find_package_multi"] = "Sqlpp11"
+
+ bindir = os.path.join(self.package_folder, "bin")
+ self.output.info("Appending PATH environment variable: {}".format(bindir))
+ self.env_info.PATH.append(bindir)
| {"golden_diff": "diff --git a/recipes/sqlpp11/all/conanfile.py b/recipes/sqlpp11/all/conanfile.py\n--- a/recipes/sqlpp11/all/conanfile.py\n+++ b/recipes/sqlpp11/all/conanfile.py\n@@ -24,13 +24,21 @@\n self.info.header_only()\n \n def source(self):\n- tools.get(**self.conan_data[\"sources\"][self.version],\n- destination=self._source_subfolder, strip_root=True)\n+ tools.get(\n+ **self.conan_data[\"sources\"][self.version],\n+ destination=self._source_subfolder,\n+ strip_root=True\n+ )\n \n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"*.h\", dst=\"include\", src=os.path.join(self._source_subfolder, \"include\"))\n+ self.copy(\"*\", dst=\"bin\", src=os.path.join(self._source_subfolder, \"scripts\"))\n \n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"Sqlpp11\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"Sqlpp11\"\n+\n+ bindir = os.path.join(self.package_folder, \"bin\")\n+ self.output.info(\"Appending PATH environment variable: {}\".format(bindir))\n+ self.env_info.PATH.append(bindir)\n", "issue": "[package] sqlpp11/0.60: scripts are not in the package\nsqlpp11 provides some scripts that can be used by the consumer: https://github.com/rbock/sqlpp11/tree/develop/scripts \r\nBut these scripts are not in the conan package.\n", "before_files": [{"content": "from conans import ConanFile, tools\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass Sqlpp11Conan(ConanFile):\n name = \"sqlpp11\"\n license = \"BSD-2-Clause\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/rbock/sqlpp11\"\n description = \"A type safe SQL template library for C++\"\n topics = (\"SQL\", \"DSL\", \"embedded\", \"data-base\")\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def requirements(self):\n self.requires(\"date/3.0.1\")\n\n def package_id(self):\n self.info.header_only()\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"*.h\", dst=\"include\", src=os.path.join(self._source_subfolder, \"include\"))\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"Sqlpp11\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"Sqlpp11\"\n", "path": "recipes/sqlpp11/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, tools\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass Sqlpp11Conan(ConanFile):\n name = \"sqlpp11\"\n license = \"BSD-2-Clause\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/rbock/sqlpp11\"\n description = \"A type safe SQL template library for C++\"\n topics = (\"SQL\", \"DSL\", \"embedded\", \"data-base\")\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def requirements(self):\n self.requires(\"date/3.0.1\")\n\n def package_id(self):\n self.info.header_only()\n\n def source(self):\n tools.get(\n **self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder,\n strip_root=True\n )\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"*.h\", dst=\"include\", src=os.path.join(self._source_subfolder, \"include\"))\n self.copy(\"*\", dst=\"bin\", src=os.path.join(self._source_subfolder, \"scripts\"))\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"Sqlpp11\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"Sqlpp11\"\n\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bindir))\n self.env_info.PATH.append(bindir)\n", "path": "recipes/sqlpp11/all/conanfile.py"}]} | 695 | 305 |
gh_patches_debug_29861 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-1786 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ubuntu 22.10's upgrade to 1.1.5 broke stuff. ("'NoneType' object is not iterable") w/fix
Hey fwiw in Ubuntu 22.10 (beta), I just got an upgrade of solaar from 1.1.1 to 1.1.5 ([solaar_1.1.5+dfsg-1_all.deb](https://packages.ubuntu.com/kinetic/solaar)) and noticed that solaar was now broken. Running it manually resulted in an error that ended like this:
```
....
File "/usr/share/solaar/lib/logitech_receiver/device.py", line 352, in persister
self._persister = _configuration.persister(self)
File "/usr/share/solaar/lib/solaar/configuration.py", line 214, in persister
_load()
File "/usr/share/solaar/lib/solaar/configuration.py", line 71, in _load
_config = _cleanup_load(loaded_config)
File "/usr/share/solaar/lib/solaar/configuration.py", line 137, in _cleanup_load
for element in c:
TypeError: 'NoneType' object is not iterable
```
This was running as the user (not rute) and seemed to be an issue parsing the `~/.config/solaar/config.yaml` file. When I looked at that file, it was completely blank, though there was a file there `config.json` modified five months back that looked like this:
```
{
"_version": "1.1.1"
}
```
On a hunch, I set the blank `config.yaml` to instead look like:
```
_version:1.1.5
```
and started solar and it came back! It repopulated the config.yaml, so I'm guessing it just wanted any values in there so it wouldn't error out.
While this is probably a bug w/ubuntu's packaging and may even be due to me running as a normal user, the `configuration.yaml` file's `_cleanup_load(c)` function should probably gracefully behave if there are no yaml entries in there rather than silently crashing.
That is all! This is probably going to have to also be addressed in the Ubuntu release, but I figured this was upstream so maybe it should be here as well. Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/solaar/configuration.py`
Content:
```
1 # -*- python-mode -*-
2
3 ## Copyright (C) 2012-2013 Daniel Pavel
4 ##
5 ## This program is free software; you can redistribute it and/or modify
6 ## it under the terms of the GNU General Public License as published by
7 ## the Free Software Foundation; either version 2 of the License, or
8 ## (at your option) any later version.
9 ##
10 ## This program is distributed in the hope that it will be useful,
11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ## GNU General Public License for more details.
14 ##
15 ## You should have received a copy of the GNU General Public License along
16 ## with this program; if not, write to the Free Software Foundation, Inc.,
17 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
19 import json as _json
20 import os as _os
21 import os.path as _path
22
23 from logging import DEBUG as _DEBUG
24 from logging import INFO as _INFO
25 from logging import getLogger
26 from threading import Lock as _Lock
27 from threading import Timer as _Timer
28
29 import yaml as _yaml
30
31 from gi.repository import GLib
32 from logitech_receiver.common import NamedInt as _NamedInt
33 from solaar import __version__
34
35 _log = getLogger(__name__)
36 del getLogger
37
38 _XDG_CONFIG_HOME = _os.environ.get('XDG_CONFIG_HOME') or _path.expanduser(_path.join('~', '.config'))
39 _file_path = _path.join(_XDG_CONFIG_HOME, 'solaar', 'config.json')
40 _yaml_file_path = _path.join(_XDG_CONFIG_HOME, 'solaar', 'config.yaml')
41
42 _KEY_VERSION = '_version'
43 _KEY_NAME = '_NAME'
44 _KEY_WPID = '_wpid'
45 _KEY_SERIAL = '_serial'
46 _KEY_MODEL_ID = '_modelId'
47 _KEY_UNIT_ID = '_unitId'
48 _KEY_ABSENT = '_absent'
49 _KEY_SENSITIVE = '_sensitive'
50 _config = []
51
52
53 def _load():
54 global _config
55 loaded_config = []
56 if _path.isfile(_yaml_file_path):
57 try:
58 with open(_yaml_file_path) as config_file:
59 loaded_config = _yaml.safe_load(config_file)
60 except Exception as e:
61 _log.error('failed to load from %s: %s', _yaml_file_path, e)
62 elif _path.isfile(_file_path):
63 try:
64 with open(_file_path) as config_file:
65 loaded_config = _json.load(config_file)
66 except Exception as e:
67 _log.error('failed to load from %s: %s', _file_path, e)
68 loaded_config = _convert_json(loaded_config)
69 if _log.isEnabledFor(_DEBUG):
70 _log.debug('load => %s', loaded_config)
71 _config = _cleanup_load(loaded_config)
72
73
74 save_timer = None
75 save_lock = _Lock()
76
77
78 def save(defer=False):
79 global save_timer
80 if not _config:
81 return
82 dirname = _os.path.dirname(_yaml_file_path)
83 if not _path.isdir(dirname):
84 try:
85 _os.makedirs(dirname)
86 except Exception:
87 _log.error('failed to create %s', dirname)
88 return
89 if not defer:
90 do_save()
91 else:
92 with save_lock:
93 if not save_timer:
94 save_timer = _Timer(5.0, lambda: GLib.idle_add(do_save))
95 save_timer.start()
96
97
98 def do_save():
99 global save_timer
100 with save_lock:
101 if save_timer:
102 save_timer.cancel()
103 save_timer = None
104 try:
105 with open(_yaml_file_path, 'w') as config_file:
106 _yaml.dump(_config, config_file, default_flow_style=None, width=150)
107 if _log.isEnabledFor(_INFO):
108 _log.info('saved %s to %s', _config, _yaml_file_path)
109 except Exception as e:
110 _log.error('failed to save to %s: %s', _yaml_file_path, e)
111
112
113 def _convert_json(json_dict):
114 config = [json_dict.get(_KEY_VERSION)]
115 for key, dev in json_dict.items():
116 key = key.split(':')
117 if len(key) == 2:
118 dev[_KEY_WPID] = dev.get(_KEY_WPID) if dev.get(_KEY_WPID) else key[0]
119 dev[_KEY_SERIAL] = dev.get(_KEY_SERIAL) if dev.get(_KEY_SERIAL) else key[1]
120 for k, v in dev.items():
121 if type(k) == str and not k.startswith('_') and type(v) == dict: # convert string keys to ints
122 v = {int(dk) if type(dk) == str else dk: dv for dk, dv in v.items()}
123 dev[k] = v
124 for k in ['mouse-gestures', 'dpi-sliding']:
125 v = dev.get(k, None)
126 if v is True or v is False:
127 dev.pop(k)
128 if '_name' in dev:
129 dev[_KEY_NAME] = dev['_name']
130 dev.pop('_name')
131 config.append(dev)
132 return config
133
134
135 def _cleanup_load(c):
136 _config = [__version__]
137 for element in c:
138 if isinstance(element, dict):
139 divert = element.get('divert-keys')
140 if divert:
141 sliding = element.get('dpi-sliding')
142 if sliding: # convert old-style dpi-sliding setting to divert-keys entry
143 divert[int(sliding)] = 3
144 element.pop('dpi-sliding', None)
145 gestures = element.get('mouse-gestures')
146 if gestures: # convert old-style mouse-gestures setting to divert-keys entry
147 divert[int(gestures)] = 2
148 element.pop('mouse-gestures', None)
149 # remove any string entries (from bad conversions)
150 element['divert-keys'] = {k: v for k, v in divert.items() if isinstance(k, int)}
151 # convert to device entries
152 element = _DeviceEntry(**element)
153 _config.append(element)
154 return _config
155
156
157 class _DeviceEntry(dict):
158 def __init__(self, **kwargs):
159 super().__init__(**kwargs)
160
161 def __setitem__(self, key, value):
162 super().__setitem__(key, value)
163 save(defer=True)
164
165 def update(self, device, modelId):
166 if device.name and device.name != self.get(_KEY_NAME):
167 super().__setitem__(_KEY_NAME, device.name)
168 if device.wpid and device.wpid != self.get(_KEY_WPID):
169 super().__setitem__(_KEY_WPID, device.wpid)
170 if device.serial and device.serial != '?' and device.serial != self.get(_KEY_SERIAL):
171 super().__setitem__(_KEY_SERIAL, device.serial)
172 if modelId and modelId != self.get(_KEY_MODEL_ID):
173 super().__setitem__(_KEY_MODEL_ID, modelId)
174 if device.unitId and device.unitId != self.get(_KEY_UNIT_ID):
175 super().__setitem__(_KEY_UNIT_ID, device.unitId)
176
177 def get_sensitivity(self, name):
178 return self.get(_KEY_SENSITIVE, {}).get(name, False)
179
180 def set_sensitivity(self, name, value):
181 sensitives = self.get(_KEY_SENSITIVE, {})
182 if sensitives.get(name) != value:
183 sensitives[name] = value
184 self.__setitem__(_KEY_SENSITIVE, sensitives)
185
186
187 def device_representer(dumper, data):
188 return dumper.represent_mapping('tag:yaml.org,2002:map', data)
189
190
191 _yaml.add_representer(_DeviceEntry, device_representer)
192
193
194 def named_int_representer(dumper, data):
195 return dumper.represent_scalar('tag:yaml.org,2002:int', str(int(data)))
196
197
198 _yaml.add_representer(_NamedInt, named_int_representer)
199
200
201 # A device can be identified by a combination of WPID and serial number (for receiver-connected devices)
202 # or a combination of modelId and unitId (for direct-connected devices).
203 # But some devices have empty (all zero) modelIds and unitIds. Use the device name as a backup for the modelId.
204 # The worst situation is a receiver-connected device that Solaar has never seen on-line
205 # that is directly connected. Here there is no way to realize that the two devices are the same.
206 # So new entries are not created for unseen off-line receiver-connected devices except for those with protocol 1.0
207 def persister(device):
208 def match(wpid, serial, modelId, unitId, c):
209 return ((wpid and wpid == c.get(_KEY_WPID) and serial and serial == c.get(_KEY_SERIAL)) or (
210 modelId and modelId != '000000000000' and modelId == c.get(_KEY_MODEL_ID) and unitId
211 and unitId == c.get(_KEY_UNIT_ID)
212 ))
213
214 if not _config:
215 _load()
216 entry = None
217 modelId = device.modelId if device.modelId != '000000000000' else device.name if device.modelId else None
218 for c in _config:
219 if isinstance(c, _DeviceEntry) and match(device.wpid, device.serial, modelId, device.unitId, c):
220 entry = c
221 break
222 if not entry:
223 if not device.online and not device.serial: # don't create entry for offline devices without serial number
224 if _log.isEnabledFor(_INFO):
225 _log.info('not setting up persister for offline device %s with missing serial number', device.name)
226 return
227 if _log.isEnabledFor(_INFO):
228 _log.info('setting up persister for device %s', device.name)
229 entry = _DeviceEntry()
230 _config.append(entry)
231 entry.update(device, modelId)
232 return entry
233
234
235 def attach_to(device):
236 pass
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/solaar/configuration.py b/lib/solaar/configuration.py
--- a/lib/solaar/configuration.py
+++ b/lib/solaar/configuration.py
@@ -134,23 +134,26 @@
def _cleanup_load(c):
_config = [__version__]
- for element in c:
- if isinstance(element, dict):
- divert = element.get('divert-keys')
- if divert:
- sliding = element.get('dpi-sliding')
- if sliding: # convert old-style dpi-sliding setting to divert-keys entry
- divert[int(sliding)] = 3
- element.pop('dpi-sliding', None)
- gestures = element.get('mouse-gestures')
- if gestures: # convert old-style mouse-gestures setting to divert-keys entry
- divert[int(gestures)] = 2
- element.pop('mouse-gestures', None)
- # remove any string entries (from bad conversions)
- element['divert-keys'] = {k: v for k, v in divert.items() if isinstance(k, int)}
- # convert to device entries
- element = _DeviceEntry(**element)
- _config.append(element)
+ try:
+ for element in c:
+ if isinstance(element, dict):
+ divert = element.get('divert-keys')
+ if divert:
+ sliding = element.get('dpi-sliding')
+ if sliding: # convert old-style dpi-sliding setting to divert-keys entry
+ divert[int(sliding)] = 3
+ element.pop('dpi-sliding', None)
+ gestures = element.get('mouse-gestures')
+ if gestures: # convert old-style mouse-gestures setting to divert-keys entry
+ divert[int(gestures)] = 2
+ element.pop('mouse-gestures', None)
+ # remove any string entries (from bad conversions)
+ element['divert-keys'] = {k: v for k, v in divert.items() if isinstance(k, int)}
+ # convert to device entries
+ element = _DeviceEntry(**element)
+ _config.append(element)
+ except Exception as e:
+ _log.warn('Exception processing config.yaml file, ignoring contents: %s', e)
return _config
| {"golden_diff": "diff --git a/lib/solaar/configuration.py b/lib/solaar/configuration.py\n--- a/lib/solaar/configuration.py\n+++ b/lib/solaar/configuration.py\n@@ -134,23 +134,26 @@\n \n def _cleanup_load(c):\n _config = [__version__]\n- for element in c:\n- if isinstance(element, dict):\n- divert = element.get('divert-keys')\n- if divert:\n- sliding = element.get('dpi-sliding')\n- if sliding: # convert old-style dpi-sliding setting to divert-keys entry\n- divert[int(sliding)] = 3\n- element.pop('dpi-sliding', None)\n- gestures = element.get('mouse-gestures')\n- if gestures: # convert old-style mouse-gestures setting to divert-keys entry\n- divert[int(gestures)] = 2\n- element.pop('mouse-gestures', None)\n- # remove any string entries (from bad conversions)\n- element['divert-keys'] = {k: v for k, v in divert.items() if isinstance(k, int)}\n- # convert to device entries\n- element = _DeviceEntry(**element)\n- _config.append(element)\n+ try:\n+ for element in c:\n+ if isinstance(element, dict):\n+ divert = element.get('divert-keys')\n+ if divert:\n+ sliding = element.get('dpi-sliding')\n+ if sliding: # convert old-style dpi-sliding setting to divert-keys entry\n+ divert[int(sliding)] = 3\n+ element.pop('dpi-sliding', None)\n+ gestures = element.get('mouse-gestures')\n+ if gestures: # convert old-style mouse-gestures setting to divert-keys entry\n+ divert[int(gestures)] = 2\n+ element.pop('mouse-gestures', None)\n+ # remove any string entries (from bad conversions)\n+ element['divert-keys'] = {k: v for k, v in divert.items() if isinstance(k, int)}\n+ # convert to device entries\n+ element = _DeviceEntry(**element)\n+ _config.append(element)\n+ except Exception as e:\n+ _log.warn('Exception processing config.yaml file, ignoring contents: %s', e)\n return _config\n", "issue": "Ubuntu 22.10's upgrade to 1.1.5 broke stuff. (\"'NoneType' object is not iterable\") w/fix\nHey fwiw in Ubuntu 22.10 (beta), I just got an upgrade of solaar from 1.1.1 to 1.1.5 ([solaar_1.1.5+dfsg-1_all.deb](https://packages.ubuntu.com/kinetic/solaar)) and noticed that solaar was now broken. Running it manually resulted in an error that ended like this:\r\n\r\n```\r\n....\r\n File \"/usr/share/solaar/lib/logitech_receiver/device.py\", line 352, in persister\r\n self._persister = _configuration.persister(self)\r\n File \"/usr/share/solaar/lib/solaar/configuration.py\", line 214, in persister\r\n _load()\r\n File \"/usr/share/solaar/lib/solaar/configuration.py\", line 71, in _load\r\n _config = _cleanup_load(loaded_config)\r\n File \"/usr/share/solaar/lib/solaar/configuration.py\", line 137, in _cleanup_load\r\n for element in c:\r\nTypeError: 'NoneType' object is not iterable\r\n```\r\n\r\nThis was running as the user (not rute) and seemed to be an issue parsing the `~/.config/solaar/config.yaml` file. When I looked at that file, it was completely blank, though there was a file there `config.json` modified five months back that looked like this:\r\n\r\n```\r\n{\r\n \"_version\": \"1.1.1\"\r\n}\r\n```\r\n\r\nOn a hunch, I set the blank `config.yaml` to instead look like:\r\n\r\n```\r\n_version:1.1.5\r\n```\r\n\r\nand started solar and it came back! It repopulated the config.yaml, so I'm guessing it just wanted any values in there so it wouldn't error out.\r\n\r\nWhile this is probably a bug w/ubuntu's packaging and may even be due to me running as a normal user, the `configuration.yaml` file's `_cleanup_load(c)` function should probably gracefully behave if there are no yaml entries in there rather than silently crashing.\r\n\r\nThat is all! This is probably going to have to also be addressed in the Ubuntu release, but I figured this was upstream so maybe it should be here as well. Thanks!\r\n\n", "before_files": [{"content": "# -*- python-mode -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport json as _json\nimport os as _os\nimport os.path as _path\n\nfrom logging import DEBUG as _DEBUG\nfrom logging import INFO as _INFO\nfrom logging import getLogger\nfrom threading import Lock as _Lock\nfrom threading import Timer as _Timer\n\nimport yaml as _yaml\n\nfrom gi.repository import GLib\nfrom logitech_receiver.common import NamedInt as _NamedInt\nfrom solaar import __version__\n\n_log = getLogger(__name__)\ndel getLogger\n\n_XDG_CONFIG_HOME = _os.environ.get('XDG_CONFIG_HOME') or _path.expanduser(_path.join('~', '.config'))\n_file_path = _path.join(_XDG_CONFIG_HOME, 'solaar', 'config.json')\n_yaml_file_path = _path.join(_XDG_CONFIG_HOME, 'solaar', 'config.yaml')\n\n_KEY_VERSION = '_version'\n_KEY_NAME = '_NAME'\n_KEY_WPID = '_wpid'\n_KEY_SERIAL = '_serial'\n_KEY_MODEL_ID = '_modelId'\n_KEY_UNIT_ID = '_unitId'\n_KEY_ABSENT = '_absent'\n_KEY_SENSITIVE = '_sensitive'\n_config = []\n\n\ndef _load():\n global _config\n loaded_config = []\n if _path.isfile(_yaml_file_path):\n try:\n with open(_yaml_file_path) as config_file:\n loaded_config = _yaml.safe_load(config_file)\n except Exception as e:\n _log.error('failed to load from %s: %s', _yaml_file_path, e)\n elif _path.isfile(_file_path):\n try:\n with open(_file_path) as config_file:\n loaded_config = _json.load(config_file)\n except Exception as e:\n _log.error('failed to load from %s: %s', _file_path, e)\n loaded_config = _convert_json(loaded_config)\n if _log.isEnabledFor(_DEBUG):\n _log.debug('load => %s', loaded_config)\n _config = _cleanup_load(loaded_config)\n\n\nsave_timer = None\nsave_lock = _Lock()\n\n\ndef save(defer=False):\n global save_timer\n if not _config:\n return\n dirname = _os.path.dirname(_yaml_file_path)\n if not _path.isdir(dirname):\n try:\n _os.makedirs(dirname)\n except Exception:\n _log.error('failed to create %s', dirname)\n return\n if not defer:\n do_save()\n else:\n with save_lock:\n if not save_timer:\n save_timer = _Timer(5.0, lambda: GLib.idle_add(do_save))\n save_timer.start()\n\n\ndef do_save():\n global save_timer\n with save_lock:\n if save_timer:\n save_timer.cancel()\n save_timer = None\n try:\n with open(_yaml_file_path, 'w') as config_file:\n _yaml.dump(_config, config_file, default_flow_style=None, width=150)\n if _log.isEnabledFor(_INFO):\n _log.info('saved %s to %s', _config, _yaml_file_path)\n except Exception as e:\n _log.error('failed to save to %s: %s', _yaml_file_path, e)\n\n\ndef _convert_json(json_dict):\n config = [json_dict.get(_KEY_VERSION)]\n for key, dev in json_dict.items():\n key = key.split(':')\n if len(key) == 2:\n dev[_KEY_WPID] = dev.get(_KEY_WPID) if dev.get(_KEY_WPID) else key[0]\n dev[_KEY_SERIAL] = dev.get(_KEY_SERIAL) if dev.get(_KEY_SERIAL) else key[1]\n for k, v in dev.items():\n if type(k) == str and not k.startswith('_') and type(v) == dict: # convert string keys to ints\n v = {int(dk) if type(dk) == str else dk: dv for dk, dv in v.items()}\n dev[k] = v\n for k in ['mouse-gestures', 'dpi-sliding']:\n v = dev.get(k, None)\n if v is True or v is False:\n dev.pop(k)\n if '_name' in dev:\n dev[_KEY_NAME] = dev['_name']\n dev.pop('_name')\n config.append(dev)\n return config\n\n\ndef _cleanup_load(c):\n _config = [__version__]\n for element in c:\n if isinstance(element, dict):\n divert = element.get('divert-keys')\n if divert:\n sliding = element.get('dpi-sliding')\n if sliding: # convert old-style dpi-sliding setting to divert-keys entry\n divert[int(sliding)] = 3\n element.pop('dpi-sliding', None)\n gestures = element.get('mouse-gestures')\n if gestures: # convert old-style mouse-gestures setting to divert-keys entry\n divert[int(gestures)] = 2\n element.pop('mouse-gestures', None)\n # remove any string entries (from bad conversions)\n element['divert-keys'] = {k: v for k, v in divert.items() if isinstance(k, int)}\n # convert to device entries\n element = _DeviceEntry(**element)\n _config.append(element)\n return _config\n\n\nclass _DeviceEntry(dict):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def __setitem__(self, key, value):\n super().__setitem__(key, value)\n save(defer=True)\n\n def update(self, device, modelId):\n if device.name and device.name != self.get(_KEY_NAME):\n super().__setitem__(_KEY_NAME, device.name)\n if device.wpid and device.wpid != self.get(_KEY_WPID):\n super().__setitem__(_KEY_WPID, device.wpid)\n if device.serial and device.serial != '?' and device.serial != self.get(_KEY_SERIAL):\n super().__setitem__(_KEY_SERIAL, device.serial)\n if modelId and modelId != self.get(_KEY_MODEL_ID):\n super().__setitem__(_KEY_MODEL_ID, modelId)\n if device.unitId and device.unitId != self.get(_KEY_UNIT_ID):\n super().__setitem__(_KEY_UNIT_ID, device.unitId)\n\n def get_sensitivity(self, name):\n return self.get(_KEY_SENSITIVE, {}).get(name, False)\n\n def set_sensitivity(self, name, value):\n sensitives = self.get(_KEY_SENSITIVE, {})\n if sensitives.get(name) != value:\n sensitives[name] = value\n self.__setitem__(_KEY_SENSITIVE, sensitives)\n\n\ndef device_representer(dumper, data):\n return dumper.represent_mapping('tag:yaml.org,2002:map', data)\n\n\n_yaml.add_representer(_DeviceEntry, device_representer)\n\n\ndef named_int_representer(dumper, data):\n return dumper.represent_scalar('tag:yaml.org,2002:int', str(int(data)))\n\n\n_yaml.add_representer(_NamedInt, named_int_representer)\n\n\n# A device can be identified by a combination of WPID and serial number (for receiver-connected devices)\n# or a combination of modelId and unitId (for direct-connected devices).\n# But some devices have empty (all zero) modelIds and unitIds. Use the device name as a backup for the modelId.\n# The worst situation is a receiver-connected device that Solaar has never seen on-line\n# that is directly connected. Here there is no way to realize that the two devices are the same.\n# So new entries are not created for unseen off-line receiver-connected devices except for those with protocol 1.0\ndef persister(device):\n def match(wpid, serial, modelId, unitId, c):\n return ((wpid and wpid == c.get(_KEY_WPID) and serial and serial == c.get(_KEY_SERIAL)) or (\n modelId and modelId != '000000000000' and modelId == c.get(_KEY_MODEL_ID) and unitId\n and unitId == c.get(_KEY_UNIT_ID)\n ))\n\n if not _config:\n _load()\n entry = None\n modelId = device.modelId if device.modelId != '000000000000' else device.name if device.modelId else None\n for c in _config:\n if isinstance(c, _DeviceEntry) and match(device.wpid, device.serial, modelId, device.unitId, c):\n entry = c\n break\n if not entry:\n if not device.online and not device.serial: # don't create entry for offline devices without serial number\n if _log.isEnabledFor(_INFO):\n _log.info('not setting up persister for offline device %s with missing serial number', device.name)\n return\n if _log.isEnabledFor(_INFO):\n _log.info('setting up persister for device %s', device.name)\n entry = _DeviceEntry()\n _config.append(entry)\n entry.update(device, modelId)\n return entry\n\n\ndef attach_to(device):\n pass\n", "path": "lib/solaar/configuration.py"}], "after_files": [{"content": "# -*- python-mode -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport json as _json\nimport os as _os\nimport os.path as _path\n\nfrom logging import DEBUG as _DEBUG\nfrom logging import INFO as _INFO\nfrom logging import getLogger\nfrom threading import Lock as _Lock\nfrom threading import Timer as _Timer\n\nimport yaml as _yaml\n\nfrom gi.repository import GLib\nfrom logitech_receiver.common import NamedInt as _NamedInt\nfrom solaar import __version__\n\n_log = getLogger(__name__)\ndel getLogger\n\n_XDG_CONFIG_HOME = _os.environ.get('XDG_CONFIG_HOME') or _path.expanduser(_path.join('~', '.config'))\n_file_path = _path.join(_XDG_CONFIG_HOME, 'solaar', 'config.json')\n_yaml_file_path = _path.join(_XDG_CONFIG_HOME, 'solaar', 'config.yaml')\n\n_KEY_VERSION = '_version'\n_KEY_NAME = '_NAME'\n_KEY_WPID = '_wpid'\n_KEY_SERIAL = '_serial'\n_KEY_MODEL_ID = '_modelId'\n_KEY_UNIT_ID = '_unitId'\n_KEY_ABSENT = '_absent'\n_KEY_SENSITIVE = '_sensitive'\n_config = []\n\n\ndef _load():\n global _config\n loaded_config = []\n if _path.isfile(_yaml_file_path):\n try:\n with open(_yaml_file_path) as config_file:\n loaded_config = _yaml.safe_load(config_file)\n except Exception as e:\n _log.error('failed to load from %s: %s', _yaml_file_path, e)\n elif _path.isfile(_file_path):\n try:\n with open(_file_path) as config_file:\n loaded_config = _json.load(config_file)\n except Exception as e:\n _log.error('failed to load from %s: %s', _file_path, e)\n loaded_config = _convert_json(loaded_config)\n if _log.isEnabledFor(_DEBUG):\n _log.debug('load => %s', loaded_config)\n _config = _cleanup_load(loaded_config)\n\n\nsave_timer = None\nsave_lock = _Lock()\n\n\ndef save(defer=False):\n global save_timer\n if not _config:\n return\n dirname = _os.path.dirname(_yaml_file_path)\n if not _path.isdir(dirname):\n try:\n _os.makedirs(dirname)\n except Exception:\n _log.error('failed to create %s', dirname)\n return\n if not defer:\n do_save()\n else:\n with save_lock:\n if not save_timer:\n save_timer = _Timer(5.0, lambda: GLib.idle_add(do_save))\n save_timer.start()\n\n\ndef do_save():\n global save_timer\n with save_lock:\n if save_timer:\n save_timer.cancel()\n save_timer = None\n try:\n with open(_yaml_file_path, 'w') as config_file:\n _yaml.dump(_config, config_file, default_flow_style=None, width=150)\n if _log.isEnabledFor(_INFO):\n _log.info('saved %s to %s', _config, _yaml_file_path)\n except Exception as e:\n _log.error('failed to save to %s: %s', _yaml_file_path, e)\n\n\ndef _convert_json(json_dict):\n config = [json_dict.get(_KEY_VERSION)]\n for key, dev in json_dict.items():\n key = key.split(':')\n if len(key) == 2:\n dev[_KEY_WPID] = dev.get(_KEY_WPID) if dev.get(_KEY_WPID) else key[0]\n dev[_KEY_SERIAL] = dev.get(_KEY_SERIAL) if dev.get(_KEY_SERIAL) else key[1]\n for k, v in dev.items():\n if type(k) == str and not k.startswith('_') and type(v) == dict: # convert string keys to ints\n v = {int(dk) if type(dk) == str else dk: dv for dk, dv in v.items()}\n dev[k] = v\n for k in ['mouse-gestures', 'dpi-sliding']:\n v = dev.get(k, None)\n if v is True or v is False:\n dev.pop(k)\n if '_name' in dev:\n dev[_KEY_NAME] = dev['_name']\n dev.pop('_name')\n config.append(dev)\n return config\n\n\ndef _cleanup_load(c):\n _config = [__version__]\n try:\n for element in c:\n if isinstance(element, dict):\n divert = element.get('divert-keys')\n if divert:\n sliding = element.get('dpi-sliding')\n if sliding: # convert old-style dpi-sliding setting to divert-keys entry\n divert[int(sliding)] = 3\n element.pop('dpi-sliding', None)\n gestures = element.get('mouse-gestures')\n if gestures: # convert old-style mouse-gestures setting to divert-keys entry\n divert[int(gestures)] = 2\n element.pop('mouse-gestures', None)\n # remove any string entries (from bad conversions)\n element['divert-keys'] = {k: v for k, v in divert.items() if isinstance(k, int)}\n # convert to device entries\n element = _DeviceEntry(**element)\n _config.append(element)\n except Exception as e:\n _log.warn('Exception processing config.yaml file, ignoring contents: %s', e)\n return _config\n\n\nclass _DeviceEntry(dict):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def __setitem__(self, key, value):\n super().__setitem__(key, value)\n save(defer=True)\n\n def update(self, device, modelId):\n if device.name and device.name != self.get(_KEY_NAME):\n super().__setitem__(_KEY_NAME, device.name)\n if device.wpid and device.wpid != self.get(_KEY_WPID):\n super().__setitem__(_KEY_WPID, device.wpid)\n if device.serial and device.serial != '?' and device.serial != self.get(_KEY_SERIAL):\n super().__setitem__(_KEY_SERIAL, device.serial)\n if modelId and modelId != self.get(_KEY_MODEL_ID):\n super().__setitem__(_KEY_MODEL_ID, modelId)\n if device.unitId and device.unitId != self.get(_KEY_UNIT_ID):\n super().__setitem__(_KEY_UNIT_ID, device.unitId)\n\n def get_sensitivity(self, name):\n return self.get(_KEY_SENSITIVE, {}).get(name, False)\n\n def set_sensitivity(self, name, value):\n sensitives = self.get(_KEY_SENSITIVE, {})\n if sensitives.get(name) != value:\n sensitives[name] = value\n self.__setitem__(_KEY_SENSITIVE, sensitives)\n\n\ndef device_representer(dumper, data):\n return dumper.represent_mapping('tag:yaml.org,2002:map', data)\n\n\n_yaml.add_representer(_DeviceEntry, device_representer)\n\n\ndef named_int_representer(dumper, data):\n return dumper.represent_scalar('tag:yaml.org,2002:int', str(int(data)))\n\n\n_yaml.add_representer(_NamedInt, named_int_representer)\n\n\n# A device can be identified by a combination of WPID and serial number (for receiver-connected devices)\n# or a combination of modelId and unitId (for direct-connected devices).\n# But some devices have empty (all zero) modelIds and unitIds. Use the device name as a backup for the modelId.\n# The worst situation is a receiver-connected device that Solaar has never seen on-line\n# that is directly connected. Here there is no way to realize that the two devices are the same.\n# So new entries are not created for unseen off-line receiver-connected devices except for those with protocol 1.0\ndef persister(device):\n def match(wpid, serial, modelId, unitId, c):\n return ((wpid and wpid == c.get(_KEY_WPID) and serial and serial == c.get(_KEY_SERIAL)) or (\n modelId and modelId != '000000000000' and modelId == c.get(_KEY_MODEL_ID) and unitId\n and unitId == c.get(_KEY_UNIT_ID)\n ))\n\n if not _config:\n _load()\n entry = None\n modelId = device.modelId if device.modelId != '000000000000' else device.name if device.modelId else None\n for c in _config:\n if isinstance(c, _DeviceEntry) and match(device.wpid, device.serial, modelId, device.unitId, c):\n entry = c\n break\n if not entry:\n if not device.online and not device.serial: # don't create entry for offline devices without serial number\n if _log.isEnabledFor(_INFO):\n _log.info('not setting up persister for offline device %s with missing serial number', device.name)\n return\n if _log.isEnabledFor(_INFO):\n _log.info('setting up persister for device %s', device.name)\n entry = _DeviceEntry()\n _config.append(entry)\n entry.update(device, modelId)\n return entry\n\n\ndef attach_to(device):\n pass\n", "path": "lib/solaar/configuration.py"}]} | 3,567 | 510 |
gh_patches_debug_35494 | rasdani/github-patches | git_diff | ycm-core__ycmd-645 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
exit code not correct if importing ycm_core in global config fails
Hi,
I am not sure this is a real bug, but I encountered this while implementing handling of exit code in `emacs-ycmd`.
I had a `import ycm_core` in my global config. If importing fails there the line with `code = CompatibleWithCurrentCore()` in `__main__.py` will never be reached to return the correct exit code and then I just get an exit code 1.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ycmd/extra_conf_store.py`
Content:
```
1 # Copyright (C) 2011, 2012 Google Inc.
2 #
3 # This file is part of ycmd.
4 #
5 # ycmd is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # ycmd is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with ycmd. If not, see <http://www.gnu.org/licenses/>.
17
18 # NOTE: This module is used as a Singleton
19
20 from __future__ import unicode_literals
21 from __future__ import print_function
22 from __future__ import division
23 from __future__ import absolute_import
24 from future import standard_library
25 standard_library.install_aliases()
26 from builtins import * # noqa
27
28 import os
29 import random
30 import string
31 import sys
32 import logging
33 from threading import Lock
34 from ycmd import user_options_store
35 from ycmd.responses import UnknownExtraConf, YCM_EXTRA_CONF_FILENAME
36 from ycmd.utils import LoadPythonSource, PathsToAllParentFolders
37 from fnmatch import fnmatch
38
39
40 # Singleton variables
41 _module_for_module_file = {}
42 _module_for_module_file_lock = Lock()
43 _module_file_for_source_file = {}
44 _module_file_for_source_file_lock = Lock()
45
46
47 def Reset():
48 global _module_for_module_file, _module_file_for_source_file
49 _module_for_module_file = {}
50 _module_file_for_source_file = {}
51
52
53 def ModuleForSourceFile( filename ):
54 return Load( ModuleFileForSourceFile( filename ) )
55
56
57 def ModuleFileForSourceFile( filename ):
58 """This will try all files returned by _ExtraConfModuleSourceFilesForFile in
59 order and return the filename of the first module that was allowed to load.
60 If no module was found or allowed to load, None is returned."""
61
62 with _module_file_for_source_file_lock:
63 if filename not in _module_file_for_source_file:
64 for module_file in _ExtraConfModuleSourceFilesForFile( filename ):
65 if Load( module_file ):
66 _module_file_for_source_file[ filename ] = module_file
67 break
68
69 return _module_file_for_source_file.setdefault( filename )
70
71
72 def CallGlobalExtraConfYcmCorePreloadIfExists():
73 _CallGlobalExtraConfMethod( 'YcmCorePreload' )
74
75
76 def Shutdown():
77 # VimClose is for the sake of backwards compatibility; it's a no-op when it
78 # doesn't exist.
79 _CallGlobalExtraConfMethod( 'VimClose' )
80 _CallGlobalExtraConfMethod( 'Shutdown' )
81
82
83 def _CallGlobalExtraConfMethod( function_name ):
84 logger = _Logger()
85 global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()
86 if not ( global_ycm_extra_conf and
87 os.path.exists( global_ycm_extra_conf ) ):
88 logger.debug( 'No global extra conf, not calling method ' + function_name )
89 return
90
91 module = Load( global_ycm_extra_conf, force = True )
92 if not module or not hasattr( module, function_name ):
93 logger.debug( 'Global extra conf not loaded or no function ' +
94 function_name )
95 return
96
97 logger.info( 'Calling global extra conf method {0} on conf file {1}'.format(
98 function_name, global_ycm_extra_conf ) )
99 getattr( module, function_name )()
100
101
102 def Disable( module_file ):
103 """Disables the loading of a module for the current session."""
104 with _module_for_module_file_lock:
105 _module_for_module_file[ module_file ] = None
106
107
108 def _ShouldLoad( module_file ):
109 """Checks if a module is safe to be loaded. By default this will try to
110 decide using a white-/blacklist and ask the user for confirmation as a
111 fallback."""
112
113 if ( module_file == _GlobalYcmExtraConfFileLocation() or
114 not user_options_store.Value( 'confirm_extra_conf' ) ):
115 return True
116
117 globlist = user_options_store.Value( 'extra_conf_globlist' )
118 for glob in globlist:
119 is_blacklisted = glob[0] == '!'
120 if _MatchesGlobPattern( module_file, glob.lstrip('!') ):
121 return not is_blacklisted
122
123 raise UnknownExtraConf( module_file )
124
125
126 def Load( module_file, force = False ):
127 """Load and return the module contained in a file.
128 Using force = True the module will be loaded regardless
129 of the criteria in _ShouldLoad.
130 This will return None if the module was not allowed to be loaded."""
131
132 if not module_file:
133 return None
134
135 if not force:
136 with _module_for_module_file_lock:
137 if module_file in _module_for_module_file:
138 return _module_for_module_file[ module_file ]
139
140 if not _ShouldLoad( module_file ):
141 Disable( module_file )
142 return None
143
144 # This has to be here because a long time ago, the ycm_extra_conf.py files
145 # used to import clang_helpers.py from the cpp folder. This is not needed
146 # anymore, but there are a lot of old ycm_extra_conf.py files that we don't
147 # want to break.
148 sys.path.insert( 0, _PathToCppCompleterFolder() )
149
150 # By default, the Python interpreter compiles source files into bytecode to
151 # load them faster next time they are run. These *.pyc files are generated
152 # along the source files prior to Python 3.2 or in a __pycache__ folder for
153 # newer versions. We disable the generation of these files when loading
154 # ycm_extra_conf.py files as users do not want them inside their projects.
155 # The drawback is negligible since ycm_extra_conf.py files are generally small
156 # files thus really fast to compile and only loaded once by editing session.
157 old_dont_write_bytecode = sys.dont_write_bytecode
158 sys.dont_write_bytecode = True
159 try:
160 module = LoadPythonSource( _RandomName(), module_file )
161 finally:
162 sys.dont_write_bytecode = old_dont_write_bytecode
163
164 del sys.path[ 0 ]
165
166 with _module_for_module_file_lock:
167 _module_for_module_file[ module_file ] = module
168 return module
169
170
171 def _MatchesGlobPattern( filename, glob ):
172 """Returns true if a filename matches a given pattern. A '~' in glob will be
173 expanded to the home directory and checking will be performed using absolute
174 paths. See the documentation of fnmatch for the supported patterns."""
175
176 abspath = os.path.abspath( filename )
177 return fnmatch( abspath, os.path.abspath( os.path.expanduser( glob ) ) )
178
179
180 def _ExtraConfModuleSourceFilesForFile( filename ):
181 """For a given filename, search all parent folders for YCM_EXTRA_CONF_FILENAME
182 files that will compute the flags necessary to compile the file.
183 If _GlobalYcmExtraConfFileLocation() exists it is returned as a fallback."""
184
185 for folder in PathsToAllParentFolders( filename ):
186 candidate = os.path.join( folder, YCM_EXTRA_CONF_FILENAME )
187 if os.path.exists( candidate ):
188 yield candidate
189 global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()
190 if ( global_ycm_extra_conf
191 and os.path.exists( global_ycm_extra_conf ) ):
192 yield global_ycm_extra_conf
193
194
195 def _PathToCppCompleterFolder():
196 """Returns the path to the 'cpp' completer folder. This is necessary
197 because ycm_extra_conf files need it on the path."""
198 return os.path.join( _DirectoryOfThisScript(), 'completers', 'cpp' )
199
200
201 def _DirectoryOfThisScript():
202 return os.path.dirname( os.path.abspath( __file__ ) )
203
204
205 def _RandomName():
206 """Generates a random module name."""
207 return ''.join( random.choice( string.ascii_lowercase ) for x in range( 15 ) )
208
209
210 def _GlobalYcmExtraConfFileLocation():
211 return os.path.expanduser(
212 user_options_store.Value( 'global_ycm_extra_conf' ) )
213
214
215 def _Logger():
216 return logging.getLogger( __name__ )
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ycmd/extra_conf_store.py b/ycmd/extra_conf_store.py
--- a/ycmd/extra_conf_store.py
+++ b/ycmd/extra_conf_store.py
@@ -1,4 +1,5 @@
-# Copyright (C) 2011, 2012 Google Inc.
+# Copyright (C) 2011-2012 Google Inc.
+# 2016 ycmd contributors
#
# This file is part of ycmd.
#
@@ -37,6 +38,8 @@
from fnmatch import fnmatch
+_logger = logging.getLogger( __name__ )
+
# Singleton variables
_module_for_module_file = {}
_module_for_module_file_lock = Lock()
@@ -81,22 +84,34 @@
def _CallGlobalExtraConfMethod( function_name ):
- logger = _Logger()
global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()
if not ( global_ycm_extra_conf and
os.path.exists( global_ycm_extra_conf ) ):
- logger.debug( 'No global extra conf, not calling method ' + function_name )
+ _logger.debug( 'No global extra conf, '
+ 'not calling method {0}'.format( function_name ) )
+ return
+
+ try:
+ module = Load( global_ycm_extra_conf, force = True )
+ except Exception:
+ _logger.exception( 'Error occurred while loading '
+ 'global extra conf {0}'.format( global_ycm_extra_conf ) )
return
- module = Load( global_ycm_extra_conf, force = True )
if not module or not hasattr( module, function_name ):
- logger.debug( 'Global extra conf not loaded or no function ' +
- function_name )
+ _logger.debug( 'Global extra conf not loaded or no function ' +
+ function_name )
return
- logger.info( 'Calling global extra conf method {0} on conf file {1}'.format(
- function_name, global_ycm_extra_conf ) )
- getattr( module, function_name )()
+ try:
+ _logger.info(
+ 'Calling global extra conf method {0} '
+ 'on conf file {1}'.format( function_name, global_ycm_extra_conf ) )
+ getattr( module, function_name )()
+ except Exception:
+ _logger.exception(
+ 'Error occurred while calling global extra conf method {0} '
+ 'on conf file {1}'.format( function_name, global_ycm_extra_conf ) )
def Disable( module_file ):
@@ -210,7 +225,3 @@
def _GlobalYcmExtraConfFileLocation():
return os.path.expanduser(
user_options_store.Value( 'global_ycm_extra_conf' ) )
-
-
-def _Logger():
- return logging.getLogger( __name__ )
| {"golden_diff": "diff --git a/ycmd/extra_conf_store.py b/ycmd/extra_conf_store.py\n--- a/ycmd/extra_conf_store.py\n+++ b/ycmd/extra_conf_store.py\n@@ -1,4 +1,5 @@\n-# Copyright (C) 2011, 2012 Google Inc.\n+# Copyright (C) 2011-2012 Google Inc.\n+# 2016 ycmd contributors\n #\n # This file is part of ycmd.\n #\n@@ -37,6 +38,8 @@\n from fnmatch import fnmatch\n \n \n+_logger = logging.getLogger( __name__ )\n+\n # Singleton variables\n _module_for_module_file = {}\n _module_for_module_file_lock = Lock()\n@@ -81,22 +84,34 @@\n \n \n def _CallGlobalExtraConfMethod( function_name ):\n- logger = _Logger()\n global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()\n if not ( global_ycm_extra_conf and\n os.path.exists( global_ycm_extra_conf ) ):\n- logger.debug( 'No global extra conf, not calling method ' + function_name )\n+ _logger.debug( 'No global extra conf, '\n+ 'not calling method {0}'.format( function_name ) )\n+ return\n+\n+ try:\n+ module = Load( global_ycm_extra_conf, force = True )\n+ except Exception:\n+ _logger.exception( 'Error occurred while loading '\n+ 'global extra conf {0}'.format( global_ycm_extra_conf ) )\n return\n \n- module = Load( global_ycm_extra_conf, force = True )\n if not module or not hasattr( module, function_name ):\n- logger.debug( 'Global extra conf not loaded or no function ' +\n- function_name )\n+ _logger.debug( 'Global extra conf not loaded or no function ' +\n+ function_name )\n return\n \n- logger.info( 'Calling global extra conf method {0} on conf file {1}'.format(\n- function_name, global_ycm_extra_conf ) )\n- getattr( module, function_name )()\n+ try:\n+ _logger.info(\n+ 'Calling global extra conf method {0} '\n+ 'on conf file {1}'.format( function_name, global_ycm_extra_conf ) )\n+ getattr( module, function_name )()\n+ except Exception:\n+ _logger.exception(\n+ 'Error occurred while calling global extra conf method {0} '\n+ 'on conf file {1}'.format( function_name, global_ycm_extra_conf ) )\n \n \n def Disable( module_file ):\n@@ -210,7 +225,3 @@\n def _GlobalYcmExtraConfFileLocation():\n return os.path.expanduser(\n user_options_store.Value( 'global_ycm_extra_conf' ) )\n-\n-\n-def _Logger():\n- return logging.getLogger( __name__ )\n", "issue": "exit code not correct if importing ycm_core in global config fails\nHi,\n\nI am not sure this is a real bug, but I encountered this while implementing handling of exit code in `emacs-ycmd`.\n\nI had a `import ycm_core` in my global config. If importing fails there the line with `code = CompatibleWithCurrentCore()` in `__main__.py` will never be reached to return the correct exit code and then I just get an exit code 1.\n\n", "before_files": [{"content": "# Copyright (C) 2011, 2012 Google Inc.\n#\n# This file is part of ycmd.\n#\n# ycmd is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ycmd is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ycmd. If not, see <http://www.gnu.org/licenses/>.\n\n# NOTE: This module is used as a Singleton\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import * # noqa\n\nimport os\nimport random\nimport string\nimport sys\nimport logging\nfrom threading import Lock\nfrom ycmd import user_options_store\nfrom ycmd.responses import UnknownExtraConf, YCM_EXTRA_CONF_FILENAME\nfrom ycmd.utils import LoadPythonSource, PathsToAllParentFolders\nfrom fnmatch import fnmatch\n\n\n# Singleton variables\n_module_for_module_file = {}\n_module_for_module_file_lock = Lock()\n_module_file_for_source_file = {}\n_module_file_for_source_file_lock = Lock()\n\n\ndef Reset():\n global _module_for_module_file, _module_file_for_source_file\n _module_for_module_file = {}\n _module_file_for_source_file = {}\n\n\ndef ModuleForSourceFile( filename ):\n return Load( ModuleFileForSourceFile( filename ) )\n\n\ndef ModuleFileForSourceFile( filename ):\n \"\"\"This will try all files returned by _ExtraConfModuleSourceFilesForFile in\n order and return the filename of the first module that was allowed to load.\n If no module was found or allowed to load, None is returned.\"\"\"\n\n with _module_file_for_source_file_lock:\n if filename not in _module_file_for_source_file:\n for module_file in _ExtraConfModuleSourceFilesForFile( filename ):\n if Load( module_file ):\n _module_file_for_source_file[ filename ] = module_file\n break\n\n return _module_file_for_source_file.setdefault( filename )\n\n\ndef CallGlobalExtraConfYcmCorePreloadIfExists():\n _CallGlobalExtraConfMethod( 'YcmCorePreload' )\n\n\ndef Shutdown():\n # VimClose is for the sake of backwards compatibility; it's a no-op when it\n # doesn't exist.\n _CallGlobalExtraConfMethod( 'VimClose' )\n _CallGlobalExtraConfMethod( 'Shutdown' )\n\n\ndef _CallGlobalExtraConfMethod( function_name ):\n logger = _Logger()\n global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()\n if not ( global_ycm_extra_conf and\n os.path.exists( global_ycm_extra_conf ) ):\n logger.debug( 'No global extra conf, not calling method ' + function_name )\n return\n\n module = Load( global_ycm_extra_conf, force = True )\n if not module or not hasattr( module, function_name ):\n logger.debug( 'Global extra conf not loaded or no function ' +\n function_name )\n return\n\n logger.info( 'Calling global extra conf method {0} on conf file {1}'.format(\n function_name, global_ycm_extra_conf ) )\n getattr( module, function_name )()\n\n\ndef Disable( module_file ):\n \"\"\"Disables the loading of a module for the current session.\"\"\"\n with _module_for_module_file_lock:\n _module_for_module_file[ module_file ] = None\n\n\ndef _ShouldLoad( module_file ):\n \"\"\"Checks if a module is safe to be loaded. By default this will try to\n decide using a white-/blacklist and ask the user for confirmation as a\n fallback.\"\"\"\n\n if ( module_file == _GlobalYcmExtraConfFileLocation() or\n not user_options_store.Value( 'confirm_extra_conf' ) ):\n return True\n\n globlist = user_options_store.Value( 'extra_conf_globlist' )\n for glob in globlist:\n is_blacklisted = glob[0] == '!'\n if _MatchesGlobPattern( module_file, glob.lstrip('!') ):\n return not is_blacklisted\n\n raise UnknownExtraConf( module_file )\n\n\ndef Load( module_file, force = False ):\n \"\"\"Load and return the module contained in a file.\n Using force = True the module will be loaded regardless\n of the criteria in _ShouldLoad.\n This will return None if the module was not allowed to be loaded.\"\"\"\n\n if not module_file:\n return None\n\n if not force:\n with _module_for_module_file_lock:\n if module_file in _module_for_module_file:\n return _module_for_module_file[ module_file ]\n\n if not _ShouldLoad( module_file ):\n Disable( module_file )\n return None\n\n # This has to be here because a long time ago, the ycm_extra_conf.py files\n # used to import clang_helpers.py from the cpp folder. This is not needed\n # anymore, but there are a lot of old ycm_extra_conf.py files that we don't\n # want to break.\n sys.path.insert( 0, _PathToCppCompleterFolder() )\n\n # By default, the Python interpreter compiles source files into bytecode to\n # load them faster next time they are run. These *.pyc files are generated\n # along the source files prior to Python 3.2 or in a __pycache__ folder for\n # newer versions. We disable the generation of these files when loading\n # ycm_extra_conf.py files as users do not want them inside their projects.\n # The drawback is negligible since ycm_extra_conf.py files are generally small\n # files thus really fast to compile and only loaded once by editing session.\n old_dont_write_bytecode = sys.dont_write_bytecode\n sys.dont_write_bytecode = True\n try:\n module = LoadPythonSource( _RandomName(), module_file )\n finally:\n sys.dont_write_bytecode = old_dont_write_bytecode\n\n del sys.path[ 0 ]\n\n with _module_for_module_file_lock:\n _module_for_module_file[ module_file ] = module\n return module\n\n\ndef _MatchesGlobPattern( filename, glob ):\n \"\"\"Returns true if a filename matches a given pattern. A '~' in glob will be\n expanded to the home directory and checking will be performed using absolute\n paths. See the documentation of fnmatch for the supported patterns.\"\"\"\n\n abspath = os.path.abspath( filename )\n return fnmatch( abspath, os.path.abspath( os.path.expanduser( glob ) ) )\n\n\ndef _ExtraConfModuleSourceFilesForFile( filename ):\n \"\"\"For a given filename, search all parent folders for YCM_EXTRA_CONF_FILENAME\n files that will compute the flags necessary to compile the file.\n If _GlobalYcmExtraConfFileLocation() exists it is returned as a fallback.\"\"\"\n\n for folder in PathsToAllParentFolders( filename ):\n candidate = os.path.join( folder, YCM_EXTRA_CONF_FILENAME )\n if os.path.exists( candidate ):\n yield candidate\n global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()\n if ( global_ycm_extra_conf\n and os.path.exists( global_ycm_extra_conf ) ):\n yield global_ycm_extra_conf\n\n\ndef _PathToCppCompleterFolder():\n \"\"\"Returns the path to the 'cpp' completer folder. This is necessary\n because ycm_extra_conf files need it on the path.\"\"\"\n return os.path.join( _DirectoryOfThisScript(), 'completers', 'cpp' )\n\n\ndef _DirectoryOfThisScript():\n return os.path.dirname( os.path.abspath( __file__ ) )\n\n\ndef _RandomName():\n \"\"\"Generates a random module name.\"\"\"\n return ''.join( random.choice( string.ascii_lowercase ) for x in range( 15 ) )\n\n\ndef _GlobalYcmExtraConfFileLocation():\n return os.path.expanduser(\n user_options_store.Value( 'global_ycm_extra_conf' ) )\n\n\ndef _Logger():\n return logging.getLogger( __name__ )\n", "path": "ycmd/extra_conf_store.py"}], "after_files": [{"content": "# Copyright (C) 2011-2012 Google Inc.\n# 2016 ycmd contributors\n#\n# This file is part of ycmd.\n#\n# ycmd is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ycmd is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ycmd. If not, see <http://www.gnu.org/licenses/>.\n\n# NOTE: This module is used as a Singleton\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import * # noqa\n\nimport os\nimport random\nimport string\nimport sys\nimport logging\nfrom threading import Lock\nfrom ycmd import user_options_store\nfrom ycmd.responses import UnknownExtraConf, YCM_EXTRA_CONF_FILENAME\nfrom ycmd.utils import LoadPythonSource, PathsToAllParentFolders\nfrom fnmatch import fnmatch\n\n\n_logger = logging.getLogger( __name__ )\n\n# Singleton variables\n_module_for_module_file = {}\n_module_for_module_file_lock = Lock()\n_module_file_for_source_file = {}\n_module_file_for_source_file_lock = Lock()\n\n\ndef Reset():\n global _module_for_module_file, _module_file_for_source_file\n _module_for_module_file = {}\n _module_file_for_source_file = {}\n\n\ndef ModuleForSourceFile( filename ):\n return Load( ModuleFileForSourceFile( filename ) )\n\n\ndef ModuleFileForSourceFile( filename ):\n \"\"\"This will try all files returned by _ExtraConfModuleSourceFilesForFile in\n order and return the filename of the first module that was allowed to load.\n If no module was found or allowed to load, None is returned.\"\"\"\n\n with _module_file_for_source_file_lock:\n if filename not in _module_file_for_source_file:\n for module_file in _ExtraConfModuleSourceFilesForFile( filename ):\n if Load( module_file ):\n _module_file_for_source_file[ filename ] = module_file\n break\n\n return _module_file_for_source_file.setdefault( filename )\n\n\ndef CallGlobalExtraConfYcmCorePreloadIfExists():\n _CallGlobalExtraConfMethod( 'YcmCorePreload' )\n\n\ndef Shutdown():\n # VimClose is for the sake of backwards compatibility; it's a no-op when it\n # doesn't exist.\n _CallGlobalExtraConfMethod( 'VimClose' )\n _CallGlobalExtraConfMethod( 'Shutdown' )\n\n\ndef _CallGlobalExtraConfMethod( function_name ):\n global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()\n if not ( global_ycm_extra_conf and\n os.path.exists( global_ycm_extra_conf ) ):\n _logger.debug( 'No global extra conf, '\n 'not calling method {0}'.format( function_name ) )\n return\n\n try:\n module = Load( global_ycm_extra_conf, force = True )\n except Exception:\n _logger.exception( 'Error occurred while loading '\n 'global extra conf {0}'.format( global_ycm_extra_conf ) )\n return\n\n if not module or not hasattr( module, function_name ):\n _logger.debug( 'Global extra conf not loaded or no function ' +\n function_name )\n return\n\n try:\n _logger.info(\n 'Calling global extra conf method {0} '\n 'on conf file {1}'.format( function_name, global_ycm_extra_conf ) )\n getattr( module, function_name )()\n except Exception:\n _logger.exception(\n 'Error occurred while calling global extra conf method {0} '\n 'on conf file {1}'.format( function_name, global_ycm_extra_conf ) )\n\n\ndef Disable( module_file ):\n \"\"\"Disables the loading of a module for the current session.\"\"\"\n with _module_for_module_file_lock:\n _module_for_module_file[ module_file ] = None\n\n\ndef _ShouldLoad( module_file ):\n \"\"\"Checks if a module is safe to be loaded. By default this will try to\n decide using a white-/blacklist and ask the user for confirmation as a\n fallback.\"\"\"\n\n if ( module_file == _GlobalYcmExtraConfFileLocation() or\n not user_options_store.Value( 'confirm_extra_conf' ) ):\n return True\n\n globlist = user_options_store.Value( 'extra_conf_globlist' )\n for glob in globlist:\n is_blacklisted = glob[0] == '!'\n if _MatchesGlobPattern( module_file, glob.lstrip('!') ):\n return not is_blacklisted\n\n raise UnknownExtraConf( module_file )\n\n\ndef Load( module_file, force = False ):\n \"\"\"Load and return the module contained in a file.\n Using force = True the module will be loaded regardless\n of the criteria in _ShouldLoad.\n This will return None if the module was not allowed to be loaded.\"\"\"\n\n if not module_file:\n return None\n\n if not force:\n with _module_for_module_file_lock:\n if module_file in _module_for_module_file:\n return _module_for_module_file[ module_file ]\n\n if not _ShouldLoad( module_file ):\n Disable( module_file )\n return None\n\n # This has to be here because a long time ago, the ycm_extra_conf.py files\n # used to import clang_helpers.py from the cpp folder. This is not needed\n # anymore, but there are a lot of old ycm_extra_conf.py files that we don't\n # want to break.\n sys.path.insert( 0, _PathToCppCompleterFolder() )\n\n # By default, the Python interpreter compiles source files into bytecode to\n # load them faster next time they are run. These *.pyc files are generated\n # along the source files prior to Python 3.2 or in a __pycache__ folder for\n # newer versions. We disable the generation of these files when loading\n # ycm_extra_conf.py files as users do not want them inside their projects.\n # The drawback is negligible since ycm_extra_conf.py files are generally small\n # files thus really fast to compile and only loaded once by editing session.\n old_dont_write_bytecode = sys.dont_write_bytecode\n sys.dont_write_bytecode = True\n try:\n module = LoadPythonSource( _RandomName(), module_file )\n finally:\n sys.dont_write_bytecode = old_dont_write_bytecode\n\n del sys.path[ 0 ]\n\n with _module_for_module_file_lock:\n _module_for_module_file[ module_file ] = module\n return module\n\n\ndef _MatchesGlobPattern( filename, glob ):\n \"\"\"Returns true if a filename matches a given pattern. A '~' in glob will be\n expanded to the home directory and checking will be performed using absolute\n paths. See the documentation of fnmatch for the supported patterns.\"\"\"\n\n abspath = os.path.abspath( filename )\n return fnmatch( abspath, os.path.abspath( os.path.expanduser( glob ) ) )\n\n\ndef _ExtraConfModuleSourceFilesForFile( filename ):\n \"\"\"For a given filename, search all parent folders for YCM_EXTRA_CONF_FILENAME\n files that will compute the flags necessary to compile the file.\n If _GlobalYcmExtraConfFileLocation() exists it is returned as a fallback.\"\"\"\n\n for folder in PathsToAllParentFolders( filename ):\n candidate = os.path.join( folder, YCM_EXTRA_CONF_FILENAME )\n if os.path.exists( candidate ):\n yield candidate\n global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()\n if ( global_ycm_extra_conf\n and os.path.exists( global_ycm_extra_conf ) ):\n yield global_ycm_extra_conf\n\n\ndef _PathToCppCompleterFolder():\n \"\"\"Returns the path to the 'cpp' completer folder. This is necessary\n because ycm_extra_conf files need it on the path.\"\"\"\n return os.path.join( _DirectoryOfThisScript(), 'completers', 'cpp' )\n\n\ndef _DirectoryOfThisScript():\n return os.path.dirname( os.path.abspath( __file__ ) )\n\n\ndef _RandomName():\n \"\"\"Generates a random module name.\"\"\"\n return ''.join( random.choice( string.ascii_lowercase ) for x in range( 15 ) )\n\n\ndef _GlobalYcmExtraConfFileLocation():\n return os.path.expanduser(\n user_options_store.Value( 'global_ycm_extra_conf' ) )\n", "path": "ycmd/extra_conf_store.py"}]} | 2,762 | 646 |
gh_patches_debug_761 | rasdani/github-patches | git_diff | encode__uvicorn-324 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: __init__() when run "uvicorn app:App"
I'm working on Mac Os Sierra 10.12.6, python 3.7.2 and uvicorn via pip3 0.5.1.
When I run the example uvicorn app:App get the following error:
Traceback (most recent call last):
File "/usr/local/bin/uvicorn", line 11, in <module>
load_entry_point('uvicorn==0.5.1', 'console_scripts', 'uvicorn')()
File "/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py", line 489, in load_entry_point
return get_distribution(dist).load_entry_point(group, name)
File "/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2793, in load_entry_point
return ep.load()
File "/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2411, in load
return self.resolve()
File "/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2417, in resolve
module = __import__(self.module_name, fromlist=['__name__'], level=0)
File "/usr/local/lib/python3.7/site-packages/uvicorn/__init__.py", line 2, in <module>
from uvicorn.main import Server, main, run
File "/usr/local/lib/python3.7/site-packages/uvicorn/main.py", line 212, in <module>
ssl_ciphers: str,
File "/usr/local/lib/python3.7/site-packages/click/decorators.py", line 170, in decorator
_param_memo(f, OptionClass(param_decls, **attrs))
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1460, in __init__
Parameter.__init__(self, param_decls, type=type, **attrs)
TypeError: __init__() got an unexpected keyword argument 'hidden'
Thank you
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import os
5 import re
6 import sys
7 import platform
8
9 from setuptools import setup
10
11
12 def get_version(package):
13 """
14 Return package version as listed in `__version__` in `init.py`.
15 """
16 path = os.path.join(package, '__init__.py')
17 init_py = open(path, 'r', encoding='utf8').read()
18 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
19
20
21 def get_long_description():
22 """
23 Return the README.
24 """
25 return open('README.md', 'r', encoding='utf8').read()
26
27
28 def get_packages(package):
29 """
30 Return root package and all sub-packages.
31 """
32 return [dirpath
33 for dirpath, dirnames, filenames in os.walk(package)
34 if os.path.exists(os.path.join(dirpath, '__init__.py'))]
35
36
37 env_marker = (
38 "sys_platform != 'win32'"
39 " and sys_platform != 'cygwin'"
40 " and platform_python_implementation != 'pypy'"
41 )
42
43 requirements = [
44 "click",
45 "h11",
46 "websockets>=6.0",
47 "httptools;" + env_marker,
48 "uvloop;" + env_marker,
49 ]
50
51
52 setup(
53 name='uvicorn',
54 version=get_version('uvicorn'),
55 url='https://github.com/encode/uvicorn',
56 license='BSD',
57 description='The lightning-fast ASGI server.',
58 long_description=get_long_description(),
59 long_description_content_type='text/markdown',
60 author='Tom Christie',
61 author_email='[email protected]',
62 packages=get_packages('uvicorn'),
63 install_requires=requirements,
64 data_files = [("", ["LICENSE.md"])],
65 classifiers=[
66 'Development Status :: 3 - Alpha',
67 'Environment :: Web Environment',
68 'Intended Audience :: Developers',
69 'License :: OSI Approved :: BSD License',
70 'Operating System :: OS Independent',
71 'Topic :: Internet :: WWW/HTTP',
72 'Programming Language :: Python :: 3',
73 'Programming Language :: Python :: 3.5',
74 'Programming Language :: Python :: 3.6',
75 'Programming Language :: Python :: 3.7',
76 'Programming Language :: Python :: Implementation :: CPython',
77 'Programming Language :: Python :: Implementation :: PyPy',
78 ],
79 entry_points="""
80 [console_scripts]
81 uvicorn=uvicorn.main:main
82 """
83 )
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,11 +41,11 @@
)
requirements = [
- "click",
- "h11",
- "websockets>=6.0",
- "httptools;" + env_marker,
- "uvloop;" + env_marker,
+ "click==7.*",
+ "h11==0.8.*",
+ "websockets==7.*",
+ "httptools==0.0.13 ;" + env_marker,
+ "uvloop==0.12.* ;" + env_marker,
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,11 +41,11 @@\n )\n \n requirements = [\n- \"click\",\n- \"h11\",\n- \"websockets>=6.0\",\n- \"httptools;\" + env_marker,\n- \"uvloop;\" + env_marker,\n+ \"click==7.*\",\n+ \"h11==0.8.*\",\n+ \"websockets==7.*\",\n+ \"httptools==0.0.13 ;\" + env_marker,\n+ \"uvloop==0.12.* ;\" + env_marker,\n ]\n", "issue": "TypeError: __init__() when run \"uvicorn app:App\"\nI'm working on Mac Os Sierra 10.12.6, python 3.7.2 and uvicorn via pip3 0.5.1.\r\nWhen I run the example uvicorn app:App get the following error:\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/uvicorn\", line 11, in <module>\r\n load_entry_point('uvicorn==0.5.1', 'console_scripts', 'uvicorn')()\r\n File \"/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 489, in load_entry_point\r\n return get_distribution(dist).load_entry_point(group, name)\r\n File \"/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 2793, in load_entry_point\r\n return ep.load()\r\n File \"/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 2411, in load\r\n return self.resolve()\r\n File \"/usr/local/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 2417, in resolve\r\n module = __import__(self.module_name, fromlist=['__name__'], level=0)\r\n File \"/usr/local/lib/python3.7/site-packages/uvicorn/__init__.py\", line 2, in <module>\r\n from uvicorn.main import Server, main, run\r\n File \"/usr/local/lib/python3.7/site-packages/uvicorn/main.py\", line 212, in <module>\r\n ssl_ciphers: str,\r\n File \"/usr/local/lib/python3.7/site-packages/click/decorators.py\", line 170, in decorator\r\n _param_memo(f, OptionClass(param_decls, **attrs))\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 1460, in __init__\r\n Parameter.__init__(self, param_decls, type=type, **attrs)\r\nTypeError: __init__() got an unexpected keyword argument 'hidden'\r\n\r\nThank you\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport platform\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, '__init__.py')\n init_py = open(path, 'r', encoding='utf8').read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open('README.md', 'r', encoding='utf8').read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nenv_marker = (\n \"sys_platform != 'win32'\"\n \" and sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'pypy'\"\n)\n\nrequirements = [\n \"click\",\n \"h11\",\n \"websockets>=6.0\",\n \"httptools;\" + env_marker,\n \"uvloop;\" + env_marker,\n]\n\n\nsetup(\n name='uvicorn',\n version=get_version('uvicorn'),\n url='https://github.com/encode/uvicorn',\n license='BSD',\n description='The lightning-fast ASGI server.',\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n author='Tom Christie',\n author_email='[email protected]',\n packages=get_packages('uvicorn'),\n install_requires=requirements,\n data_files = [(\"\", [\"LICENSE.md\"])],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\"\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport platform\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, '__init__.py')\n init_py = open(path, 'r', encoding='utf8').read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open('README.md', 'r', encoding='utf8').read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nenv_marker = (\n \"sys_platform != 'win32'\"\n \" and sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'pypy'\"\n)\n\nrequirements = [\n \"click==7.*\",\n \"h11==0.8.*\",\n \"websockets==7.*\",\n \"httptools==0.0.13 ;\" + env_marker,\n \"uvloop==0.12.* ;\" + env_marker,\n]\n\n\nsetup(\n name='uvicorn',\n version=get_version('uvicorn'),\n url='https://github.com/encode/uvicorn',\n license='BSD',\n description='The lightning-fast ASGI server.',\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n author='Tom Christie',\n author_email='[email protected]',\n packages=get_packages('uvicorn'),\n install_requires=requirements,\n data_files = [(\"\", [\"LICENSE.md\"])],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\"\n)\n", "path": "setup.py"}]} | 1,419 | 148 |
gh_patches_debug_26776 | rasdani/github-patches | git_diff | quantumlib__Cirq-1865 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add QASM importer
As the other leg of #44 and maybe a partial solution to #862 depending on the gate sets - as we discussed on today's sync meeting, a QASM importer would be useful.
I'm happy to design and implement it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/contrib/qasm_import/__init__.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from cirq.contrib.qasm_import.exception import (QasmException)
16 from cirq.contrib.qasm_import.qasm import (QasmCircuitParser)
17
```
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # coverage: ignore
3
4 # Configuration file for the Sphinx documentation builder.
5 # See http://www.sphinx-doc.org/en/master/config for help
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 from typing import List, Any
14
15 import os
16 import sys
17
18 import pypandoc
19
20 cirq_root_path = os.path.dirname(os.path.dirname(__file__))
21 sys.path.insert(0, cirq_root_path)
22
23
24 def setup(app):
25 app.add_config_value('pandoc_use_parser', 'markdown', True)
26 app.connect('autodoc-process-docstring', pandoc_process)
27
28
29 def convert_markdown_mathjax_for_rst(lines: List[str]) -> List[str]:
30 if all('$$' not in line for line in lines):
31 return lines
32
33 data = '\n'.join(lines)
34 sections = data.split('$$')
35 if len(sections) % 2 != 1:
36 raise ValueError('Mismatched number of "$$" latex tokens.')
37
38 result = []
39 for i, s in enumerate(sections):
40 if i % 2:
41 # Avoid getting split across divs.
42 s = ' '.join(s.split('\n'))
43 # Avoid intermediate layers turning our newlines into slashes.
44 s = s.replace('\\\\', '\\newline')
45 # Keep the $$ so MathJax can find it.
46 result.append('$${}$$'.format(s))
47 else:
48 # Work around bad table detection in pandoc by concatenating
49 # lines from the same paragraph.
50 s = '\n\n'.join(e.replace('\n', ' ') for e in s.split('\n\n'))
51
52 # Convert markdown to rst.
53 out = pypandoc.convert(s, to='rst', format='markdown_github')
54
55 # Not sure why pandoc is escaping these...
56 out = out.replace(r'\|', '|')
57
58 result.extend(out.split('\n'))
59
60 return result
61
62
63 def pandoc_process(app,
64 what: str,
65 name: str,
66 obj: Any,
67 options,
68 lines: List[str]
69 ) -> None:
70 if not getattr(obj, '__module__', 'cirq').startswith('cirq'):
71 # Don't convert objects from other modules.
72 return
73
74 # Don't convert output from Napoleon extension, which is already rst.
75 i = 0
76 while i < len(lines) and not lines[i].startswith(':'):
77 i += 1
78 if not i:
79 return
80
81 converted_lines = convert_markdown_mathjax_for_rst(lines[:i])
82 kept_lines = lines[i:]
83
84 data = pypandoc.convert(
85 '\n'.join(converted_lines),
86 to='rst',
87 format='markdown_github',
88 )
89
90 lines[:] = data.split('\n') + kept_lines
91
92
93 # -- Project information -----------------------------------------------------
94
95 project = 'Cirq'
96 copyright = '2018, The Cirq Developers' # pylint: disable=redefined-builtin
97 author = 'The Cirq Developers'
98
99 # The full version, including alpha/beta/rc tags
100 __version__ = ''
101 exec(open(os.path.join(cirq_root_path, 'cirq', '_version.py')).read())
102 release = __version__
103
104 # The short X.Y version
105 version = release # '.'.join(release.split('.')[:2])
106
107 # -- General configuration ---------------------------------------------------
108
109 # If your documentation needs a minimal Sphinx version, state it here.
110 # needs_sphinx = '1.0'
111
112 # Add any Sphinx extension module names here, as strings. They can be
113 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
114 # ones.
115 extensions = [
116 'sphinx.ext.autodoc',
117 'sphinx.ext.autosummary',
118 'sphinx.ext.doctest',
119 'sphinx.ext.mathjax',
120 'sphinx.ext.napoleon',
121 'sphinx.ext.viewcode'
122 ]
123
124 # Add any paths that contain templates here, relative to this directory.
125 templates_path = ['_templates']
126
127 # Allow markdown includes.
128 # http://www.sphinx-doc.org/en/master/markdown.html
129 source_parsers = {
130 '.md': 'recommonmark.parser.CommonMarkParser',
131 }
132
133 # The suffix(es) of source filenames.
134 # You can specify multiple suffix as a list of string:
135 #
136 source_suffix = ['.rst', '.md']
137
138 # The master toctree document.
139 master_doc = 'index'
140
141 # The language for content autogenerated by Sphinx. Refer to documentation
142 # for a list of supported languages.
143 #
144 # This is also used if you do content translation via gettext catalogs.
145 # Usually you set "language" from the command line for these cases.
146 language = None
147
148 # List of patterns, relative to source directory, that match files and
149 # directories to ignore when looking for source files.
150 # This pattern also affects html_static_path and html_extra_path .
151 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
152
153 # The name of the Pygments (syntax highlighting) style to use.
154 pygments_style = 'sphinx'
155
156
157 # -- Options for HTML output ---------------------------------------------
158
159 html_theme = 'sphinx_rtd_theme'
160 html_favicon = 'favicon.ico'
161 # html_theme_options = {}
162
163 # Add any paths that contain custom static files (such as style sheets) here,
164 # relative to this directory. They are copied after the builtin static files,
165 # so a file named "default.css" will overwrite the builtin "default.css".
166 # html_static_path = ['_static']
167
168 # Custom sidebar templates, must be a dictionary that maps document names
169 # to template names.
170 #
171 # The default sidebars (for documents that don't match any pattern) are
172 # defined by theme itself. Builtin themes are using these templates by
173 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
174 # 'searchbox.html']``.
175 #
176 # html_sidebars = {}
177
178
179 # -- Options for HTMLHelp output -----------------------------------------
180
181 # Output file base name for HTML help builder.
182 htmlhelp_basename = 'Cirqdoc'
183
184
185 # -- Options for LaTeX output --------------------------------------------
186
187 latex_elements = {
188 # The paper size ('letterpaper' or 'a4paper').
189 'papersize': 'letterpaper',
190
191 # The font size ('10pt', '11pt' or '12pt').
192 # 'pointsize': '10pt',
193
194 # Additional stuff for the LaTeX preamble.
195 # 'preamble': '',
196
197 # Latex figure (float) alignment
198 # 'figure_align': 'htbp',
199 }
200
201 # Grouping the document tree into LaTeX files. List of tuples
202 # (source start file, target name, title,
203 # author, documentclass [howto, manual, or own class]).
204 latex_documents = [
205 (master_doc, 'Cirq.tex', 'Cirq Documentation',
206 'The Cirq Developers', 'manual'),
207 ]
208
209
210 # -- Options for manual page output --------------------------------------
211
212 # One entry per manual page. List of tuples
213 # (source start file, name, description, authors, manual section).
214 man_pages = [
215 (master_doc, 'cirq', 'Cirq Documentation',
216 [author], 1)
217 ]
218
219
220 # -- Options for Texinfo output ------------------------------------------
221
222 # Grouping the document tree into Texinfo files. List of tuples
223 # (source start file, target name, title, author,
224 # dir menu entry, description, category)
225 texinfo_documents = [
226 (master_doc, 'Cirq', 'Cirq Documentation',
227 author, 'Cirq', 'A python library for NISQ circuits.',
228 'Miscellaneous'),
229 ]
230
231
232 # -- Extension configuration -------------------------------------------------
233
234 # Generate subpages for reference docs automatically.
235 # http://www.sphinx-doc.org/en/master/ext/autosummary.html#generating-stub-pages-automatically
236 autosummary_generate = True
237
```
Path: `cirq/contrib/qasm_import/qasm.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from cirq import circuits
16 from cirq.contrib.qasm_import._parser import QasmParser
17
18
19 class QasmCircuitParser:
20 """QasmCircuitParser is currently partially developed, not functional,
21 DO NOT USE.
22 TODO(https://github.com/quantumlib/Cirq/issues/1548)
23 It will serve as the entrypoint for parsing QASM files."""
24
25 def __init__(self):
26 pass
27
28 def parse(self, qasm: str) -> circuits.Circuit:
29 return QasmParser().parse(qasm).circuit
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq/contrib/qasm_import/__init__.py b/cirq/contrib/qasm_import/__init__.py
--- a/cirq/contrib/qasm_import/__init__.py
+++ b/cirq/contrib/qasm_import/__init__.py
@@ -12,5 +12,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from cirq.contrib.qasm_import.exception import (QasmException)
-from cirq.contrib.qasm_import.qasm import (QasmCircuitParser)
+from cirq.contrib.qasm_import.exception import QasmException
+from cirq.contrib.qasm_import.qasm import circuit_from_qasm
diff --git a/cirq/contrib/qasm_import/qasm.py b/cirq/contrib/qasm_import/qasm.py
--- a/cirq/contrib/qasm_import/qasm.py
+++ b/cirq/contrib/qasm_import/qasm.py
@@ -16,14 +16,14 @@
from cirq.contrib.qasm_import._parser import QasmParser
-class QasmCircuitParser:
- """QasmCircuitParser is currently partially developed, not functional,
- DO NOT USE.
- TODO(https://github.com/quantumlib/Cirq/issues/1548)
- It will serve as the entrypoint for parsing QASM files."""
+def circuit_from_qasm(qasm: str) -> circuits.Circuit:
+ """Parses an OpenQASM string to `cirq.Circuit`.
- def __init__(self):
- pass
+ Args:
+ qasm: The OpenQASM string
- def parse(self, qasm: str) -> circuits.Circuit:
- return QasmParser().parse(qasm).circuit
+ Returns:
+ The parsed circuit
+ """
+
+ return QasmParser().parse(qasm).circuit
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -118,7 +118,8 @@
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
- 'sphinx.ext.viewcode'
+ 'sphinx.ext.viewcode',
+ 'sphinx_markdown_tables',
]
# Add any paths that contain templates here, relative to this directory.
| {"golden_diff": "diff --git a/cirq/contrib/qasm_import/__init__.py b/cirq/contrib/qasm_import/__init__.py\n--- a/cirq/contrib/qasm_import/__init__.py\n+++ b/cirq/contrib/qasm_import/__init__.py\n@@ -12,5 +12,5 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from cirq.contrib.qasm_import.exception import (QasmException)\n-from cirq.contrib.qasm_import.qasm import (QasmCircuitParser)\n+from cirq.contrib.qasm_import.exception import QasmException\n+from cirq.contrib.qasm_import.qasm import circuit_from_qasm\ndiff --git a/cirq/contrib/qasm_import/qasm.py b/cirq/contrib/qasm_import/qasm.py\n--- a/cirq/contrib/qasm_import/qasm.py\n+++ b/cirq/contrib/qasm_import/qasm.py\n@@ -16,14 +16,14 @@\n from cirq.contrib.qasm_import._parser import QasmParser\n \n \n-class QasmCircuitParser:\n- \"\"\"QasmCircuitParser is currently partially developed, not functional,\n- DO NOT USE.\n- TODO(https://github.com/quantumlib/Cirq/issues/1548)\n- It will serve as the entrypoint for parsing QASM files.\"\"\"\n+def circuit_from_qasm(qasm: str) -> circuits.Circuit:\n+ \"\"\"Parses an OpenQASM string to `cirq.Circuit`.\n \n- def __init__(self):\n- pass\n+ Args:\n+ qasm: The OpenQASM string\n \n- def parse(self, qasm: str) -> circuits.Circuit:\n- return QasmParser().parse(qasm).circuit\n+ Returns:\n+ The parsed circuit\n+ \"\"\"\n+\n+ return QasmParser().parse(qasm).circuit\ndiff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -118,7 +118,8 @@\n 'sphinx.ext.doctest',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n- 'sphinx.ext.viewcode'\n+ 'sphinx.ext.viewcode',\n+ 'sphinx_markdown_tables',\n ]\n \n # Add any paths that contain templates here, relative to this directory.\n", "issue": "Add QASM importer\nAs the other leg of #44 and maybe a partial solution to #862 depending on the gate sets - as we discussed on today's sync meeting, a QASM importer would be useful. \r\n\r\nI'm happy to design and implement it. \n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom cirq.contrib.qasm_import.exception import (QasmException)\nfrom cirq.contrib.qasm_import.qasm import (QasmCircuitParser)\n", "path": "cirq/contrib/qasm_import/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# coverage: ignore\n\n# Configuration file for the Sphinx documentation builder.\n# See http://www.sphinx-doc.org/en/master/config for help\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nfrom typing import List, Any\n\nimport os\nimport sys\n\nimport pypandoc\n\ncirq_root_path = os.path.dirname(os.path.dirname(__file__))\nsys.path.insert(0, cirq_root_path)\n\n\ndef setup(app):\n app.add_config_value('pandoc_use_parser', 'markdown', True)\n app.connect('autodoc-process-docstring', pandoc_process)\n\n\ndef convert_markdown_mathjax_for_rst(lines: List[str]) -> List[str]:\n if all('$$' not in line for line in lines):\n return lines\n\n data = '\\n'.join(lines)\n sections = data.split('$$')\n if len(sections) % 2 != 1:\n raise ValueError('Mismatched number of \"$$\" latex tokens.')\n\n result = []\n for i, s in enumerate(sections):\n if i % 2:\n # Avoid getting split across divs.\n s = ' '.join(s.split('\\n'))\n # Avoid intermediate layers turning our newlines into slashes.\n s = s.replace('\\\\\\\\', '\\\\newline')\n # Keep the $$ so MathJax can find it.\n result.append('$${}$$'.format(s))\n else:\n # Work around bad table detection in pandoc by concatenating\n # lines from the same paragraph.\n s = '\\n\\n'.join(e.replace('\\n', ' ') for e in s.split('\\n\\n'))\n\n # Convert markdown to rst.\n out = pypandoc.convert(s, to='rst', format='markdown_github')\n\n # Not sure why pandoc is escaping these...\n out = out.replace(r'\\|', '|')\n\n result.extend(out.split('\\n'))\n\n return result\n\n\ndef pandoc_process(app,\n what: str,\n name: str,\n obj: Any,\n options,\n lines: List[str]\n ) -> None:\n if not getattr(obj, '__module__', 'cirq').startswith('cirq'):\n # Don't convert objects from other modules.\n return\n\n # Don't convert output from Napoleon extension, which is already rst.\n i = 0\n while i < len(lines) and not lines[i].startswith(':'):\n i += 1\n if not i:\n return\n\n converted_lines = convert_markdown_mathjax_for_rst(lines[:i])\n kept_lines = lines[i:]\n\n data = pypandoc.convert(\n '\\n'.join(converted_lines),\n to='rst',\n format='markdown_github',\n )\n\n lines[:] = data.split('\\n') + kept_lines\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Cirq'\ncopyright = '2018, The Cirq Developers' # pylint: disable=redefined-builtin\nauthor = 'The Cirq Developers'\n\n# The full version, including alpha/beta/rc tags\n__version__ = ''\nexec(open(os.path.join(cirq_root_path, 'cirq', '_version.py')).read())\nrelease = __version__\n\n# The short X.Y version\nversion = release # '.'.join(release.split('.')[:2])\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# Allow markdown includes.\n# http://www.sphinx-doc.org/en/master/markdown.html\nsource_parsers = {\n '.md': 'recommonmark.parser.CommonMarkParser',\n}\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# -- Options for HTML output ---------------------------------------------\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_favicon = 'favicon.ico'\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output -----------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Cirqdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Cirq.tex', 'Cirq Documentation',\n 'The Cirq Developers', 'manual'),\n]\n\n\n# -- Options for manual page output --------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'cirq', 'Cirq Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output ------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Cirq', 'Cirq Documentation',\n author, 'Cirq', 'A python library for NISQ circuits.',\n 'Miscellaneous'),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# Generate subpages for reference docs automatically.\n# http://www.sphinx-doc.org/en/master/ext/autosummary.html#generating-stub-pages-automatically\nautosummary_generate = True\n", "path": "docs/conf.py"}, {"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom cirq import circuits\nfrom cirq.contrib.qasm_import._parser import QasmParser\n\n\nclass QasmCircuitParser:\n \"\"\"QasmCircuitParser is currently partially developed, not functional,\n DO NOT USE.\n TODO(https://github.com/quantumlib/Cirq/issues/1548)\n It will serve as the entrypoint for parsing QASM files.\"\"\"\n\n def __init__(self):\n pass\n\n def parse(self, qasm: str) -> circuits.Circuit:\n return QasmParser().parse(qasm).circuit\n", "path": "cirq/contrib/qasm_import/qasm.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom cirq.contrib.qasm_import.exception import QasmException\nfrom cirq.contrib.qasm_import.qasm import circuit_from_qasm\n", "path": "cirq/contrib/qasm_import/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# coverage: ignore\n\n# Configuration file for the Sphinx documentation builder.\n# See http://www.sphinx-doc.org/en/master/config for help\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nfrom typing import List, Any\n\nimport os\nimport sys\n\nimport pypandoc\n\ncirq_root_path = os.path.dirname(os.path.dirname(__file__))\nsys.path.insert(0, cirq_root_path)\n\n\ndef setup(app):\n app.add_config_value('pandoc_use_parser', 'markdown', True)\n app.connect('autodoc-process-docstring', pandoc_process)\n\n\ndef convert_markdown_mathjax_for_rst(lines: List[str]) -> List[str]:\n if all('$$' not in line for line in lines):\n return lines\n\n data = '\\n'.join(lines)\n sections = data.split('$$')\n if len(sections) % 2 != 1:\n raise ValueError('Mismatched number of \"$$\" latex tokens.')\n\n result = []\n for i, s in enumerate(sections):\n if i % 2:\n # Avoid getting split across divs.\n s = ' '.join(s.split('\\n'))\n # Avoid intermediate layers turning our newlines into slashes.\n s = s.replace('\\\\\\\\', '\\\\newline')\n # Keep the $$ so MathJax can find it.\n result.append('$${}$$'.format(s))\n else:\n # Work around bad table detection in pandoc by concatenating\n # lines from the same paragraph.\n s = '\\n\\n'.join(e.replace('\\n', ' ') for e in s.split('\\n\\n'))\n\n # Convert markdown to rst.\n out = pypandoc.convert(s, to='rst', format='markdown_github')\n\n # Not sure why pandoc is escaping these...\n out = out.replace(r'\\|', '|')\n\n result.extend(out.split('\\n'))\n\n return result\n\n\ndef pandoc_process(app,\n what: str,\n name: str,\n obj: Any,\n options,\n lines: List[str]\n ) -> None:\n if not getattr(obj, '__module__', 'cirq').startswith('cirq'):\n # Don't convert objects from other modules.\n return\n\n # Don't convert output from Napoleon extension, which is already rst.\n i = 0\n while i < len(lines) and not lines[i].startswith(':'):\n i += 1\n if not i:\n return\n\n converted_lines = convert_markdown_mathjax_for_rst(lines[:i])\n kept_lines = lines[i:]\n\n data = pypandoc.convert(\n '\\n'.join(converted_lines),\n to='rst',\n format='markdown_github',\n )\n\n lines[:] = data.split('\\n') + kept_lines\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Cirq'\ncopyright = '2018, The Cirq Developers' # pylint: disable=redefined-builtin\nauthor = 'The Cirq Developers'\n\n# The full version, including alpha/beta/rc tags\n__version__ = ''\nexec(open(os.path.join(cirq_root_path, 'cirq', '_version.py')).read())\nrelease = __version__\n\n# The short X.Y version\nversion = release # '.'.join(release.split('.')[:2])\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx_markdown_tables',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# Allow markdown includes.\n# http://www.sphinx-doc.org/en/master/markdown.html\nsource_parsers = {\n '.md': 'recommonmark.parser.CommonMarkParser',\n}\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# -- Options for HTML output ---------------------------------------------\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_favicon = 'favicon.ico'\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output -----------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Cirqdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Cirq.tex', 'Cirq Documentation',\n 'The Cirq Developers', 'manual'),\n]\n\n\n# -- Options for manual page output --------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'cirq', 'Cirq Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output ------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Cirq', 'Cirq Documentation',\n author, 'Cirq', 'A python library for NISQ circuits.',\n 'Miscellaneous'),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# Generate subpages for reference docs automatically.\n# http://www.sphinx-doc.org/en/master/ext/autosummary.html#generating-stub-pages-automatically\nautosummary_generate = True\n", "path": "docs/conf.py"}, {"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom cirq import circuits\nfrom cirq.contrib.qasm_import._parser import QasmParser\n\n\ndef circuit_from_qasm(qasm: str) -> circuits.Circuit:\n \"\"\"Parses an OpenQASM string to `cirq.Circuit`.\n\n Args:\n qasm: The OpenQASM string\n\n Returns:\n The parsed circuit\n \"\"\"\n\n return QasmParser().parse(qasm).circuit\n", "path": "cirq/contrib/qasm_import/qasm.py"}]} | 3,199 | 520 |
gh_patches_debug_18498 | rasdani/github-patches | git_diff | Cog-Creators__Red-DiscordBot-1156 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[V3] [p]mock doesn't work with aliases
Please be sure to read through other issues as well to make sure what you are suggesting/reporting has not already
been suggested/reported
### Type:
- [ ] Suggestion
- [x] Bug
### Brief description of the problem
Attempting to run an alias as another user with `[p]mock` does nothing
### Expected behavior
It should run the command the alias is for
### Actual behavior
It does nothing
### Steps to reproduce
1. Create an alias (for example, `[p]alias add c contact`
2. Try to use mock with it (`[p]mock <user> c`)
3. Nothing happens
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redbot/core/dev_commands.py`
Content:
```
1 import asyncio
2 import inspect
3 import io
4 import textwrap
5 import traceback
6 from contextlib import redirect_stdout
7
8 import discord
9 from discord.ext import commands
10 from . import checks
11 from .i18n import CogI18n
12 from .utils.chat_formatting import box, pagify
13 """
14 Notice:
15
16 95% of the below code came from R.Danny which can be found here:
17
18 https://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py
19 """
20
21 _ = CogI18n("Dev", __file__)
22
23
24 class Dev:
25 """Various development focused utilities."""
26
27 def __init__(self):
28 self._last_result = None
29 self.sessions = set()
30
31 @staticmethod
32 def cleanup_code(content):
33 """Automatically removes code blocks from the code."""
34 # remove ```py\n```
35 if content.startswith('```') and content.endswith('```'):
36 return '\n'.join(content.split('\n')[1:-1])
37
38 # remove `foo`
39 return content.strip('` \n')
40
41 @staticmethod
42 def get_syntax_error(e):
43 """Format a syntax error to send to the user.
44
45 Returns a string representation of the error formatted as a codeblock.
46 """
47 if e.text is None:
48 return box('{0.__class__.__name__}: {0}'.format(e), lang="py")
49 return box(
50 '{0.text}{1:>{0.offset}}\n{2}: {0}'
51 ''.format(e, '^', type(e).__name__),
52 lang="py")
53
54 @staticmethod
55 def get_pages(msg: str):
56 """Pagify the given message for output to the user."""
57 return pagify(msg, delims=["\n", " "], priority=True, shorten_by=10)
58
59 @staticmethod
60 def sanitize_output(ctx: commands.Context, input_: str) -> str:
61 """Hides the bot's token from a string."""
62 token = ctx.bot.http.token
63 r = "[EXPUNGED]"
64 result = input_.replace(token, r)
65 result = result.replace(token.lower(), r)
66 result = result.replace(token.upper(), r)
67 return result
68
69 @commands.command()
70 @checks.is_owner()
71 async def debug(self, ctx, *, code):
72 """Evaluate a statement of python code.
73
74 The bot will always respond with the return value of the code.
75 If the return value of the code is a coroutine, it will be awaited,
76 and the result of that will be the bot's response.
77
78 Note: Only one statement may be evaluated. Using await, yield or
79 similar restricted keywords will result in a syntax error. For multiple
80 lines or asynchronous code, see [p]repl or [p]eval.
81
82 Environment Variables:
83 ctx - command invokation context
84 bot - bot object
85 channel - the current channel object
86 author - command author's member object
87 message - the command's message object
88 discord - discord.py library
89 commands - discord.py commands extension
90 _ - The result of the last dev command.
91 """
92 env = {
93 'bot': ctx.bot,
94 'ctx': ctx,
95 'channel': ctx.channel,
96 'author': ctx.author,
97 'guild': ctx.guild,
98 'message': ctx.message,
99 'discord': discord,
100 'commands': commands,
101 '_': self._last_result
102 }
103
104 code = self.cleanup_code(code)
105
106 try:
107 result = eval(code, env)
108 except SyntaxError as e:
109 await ctx.send(self.get_syntax_error(e))
110 return
111 except Exception as e:
112 await ctx.send(
113 box('{}: {!s}'.format(type(e).__name__, e), lang='py'))
114 return
115
116 if asyncio.iscoroutine(result):
117 result = await result
118
119 self._last_result = result
120
121 result = self.sanitize_output(ctx, str(result))
122
123 await ctx.send_interactive(self.get_pages(result), box_lang="py")
124
125 @commands.command(name='eval')
126 @checks.is_owner()
127 async def _eval(self, ctx, *, body: str):
128 """Execute asynchronous code.
129
130 This command wraps code into the body of an async function and then
131 calls and awaits it. The bot will respond with anything printed to
132 stdout, as well as the return value of the function.
133
134 The code can be within a codeblock, inline code or neither, as long
135 as they are not mixed and they are formatted correctly.
136
137 Environment Variables:
138 ctx - command invokation context
139 bot - bot object
140 channel - the current channel object
141 author - command author's member object
142 message - the command's message object
143 discord - discord.py library
144 commands - discord.py commands extension
145 _ - The result of the last dev command.
146 """
147 env = {
148 'bot': ctx.bot,
149 'ctx': ctx,
150 'channel': ctx.channel,
151 'author': ctx.author,
152 'guild': ctx.guild,
153 'message': ctx.message,
154 'discord': discord,
155 'commands': commands,
156 '_': self._last_result
157 }
158
159 body = self.cleanup_code(body)
160 stdout = io.StringIO()
161
162 to_compile = 'async def func():\n%s' % textwrap.indent(body, ' ')
163
164 try:
165 exec(to_compile, env)
166 except SyntaxError as e:
167 return await ctx.send(self.get_syntax_error(e))
168
169 func = env['func']
170 result = None
171 try:
172 with redirect_stdout(stdout):
173 result = await func()
174 except:
175 printed = "{}{}".format(stdout.getvalue(), traceback.format_exc())
176 else:
177 printed = stdout.getvalue()
178 await ctx.tick()
179
180 if result is not None:
181 self._last_result = result
182 msg = "{}{}".format(printed, result)
183 else:
184 msg = printed
185 msg = self.sanitize_output(ctx, msg)
186
187 await ctx.send_interactive(self.get_pages(msg), box_lang="py")
188
189 @commands.command()
190 @checks.is_owner()
191 async def repl(self, ctx):
192 """Open an interactive REPL.
193
194 The REPL will only recognise code as messages which start with a
195 backtick. This includes codeblocks, and as such multiple lines can be
196 evaluated.
197
198 You may not await any code in this REPL unless you define it inside an
199 async function.
200 """
201 variables = {
202 'ctx': ctx,
203 'bot': ctx.bot,
204 'message': ctx.message,
205 'guild': ctx.guild,
206 'channel': ctx.channel,
207 'author': ctx.author,
208 '_': None,
209 }
210
211 if ctx.channel.id in self.sessions:
212 await ctx.send(_('Already running a REPL session in this channel. '
213 'Exit it with `quit`.'))
214 return
215
216 self.sessions.add(ctx.channel.id)
217 await ctx.send(_('Enter code to execute or evaluate.'
218 ' `exit()` or `quit` to exit.'))
219
220 msg_check = lambda m: (m.author == ctx.author and
221 m.channel == ctx.channel and
222 m.content.startswith('`'))
223
224 while True:
225 response = await ctx.bot.wait_for("message", check=msg_check)
226
227 cleaned = self.cleanup_code(response.content)
228
229 if cleaned in ('quit', 'exit', 'exit()'):
230 await ctx.send('Exiting.')
231 self.sessions.remove(ctx.channel.id)
232 return
233
234 executor = exec
235 if cleaned.count('\n') == 0:
236 # single statement, potentially 'eval'
237 try:
238 code = compile(cleaned, '<repl session>', 'eval')
239 except SyntaxError:
240 pass
241 else:
242 executor = eval
243
244 if executor is exec:
245 try:
246 code = compile(cleaned, '<repl session>', 'exec')
247 except SyntaxError as e:
248 await ctx.send(self.get_syntax_error(e))
249 continue
250
251 variables['message'] = response
252
253 stdout = io.StringIO()
254
255 msg = None
256
257 try:
258 with redirect_stdout(stdout):
259 result = executor(code, variables)
260 if inspect.isawaitable(result):
261 result = await result
262 except:
263 value = stdout.getvalue()
264 msg = "{}{}".format(value, traceback.format_exc())
265 else:
266 value = stdout.getvalue()
267 if result is not None:
268 msg = "{}{}".format(value, result)
269 variables['_'] = result
270 elif value:
271 msg = "{}".format(value)
272
273 msg = self.sanitize_output(ctx, msg)
274
275 try:
276 await ctx.send_interactive(self.get_pages(msg), box_lang="py")
277 except discord.Forbidden:
278 pass
279 except discord.HTTPException as e:
280 await ctx.send(_('Unexpected error: `{}`').format(e))
281
282 @commands.command()
283 @checks.is_owner()
284 async def mock(self, ctx, user: discord.Member, *, command):
285 """Mock another user invoking a command.
286
287 The prefix must not be entered.
288 """
289 # Since we have stateful objects now this might be pretty bad
290 # Sorry Danny
291 old_author = ctx.author
292 old_content = ctx.message.content
293 ctx.message.author = user
294 ctx.message.content = ctx.prefix + command
295
296 await ctx.bot.process_commands(ctx.message)
297
298 ctx.message.author = old_author
299 ctx.message.content = old_content
300
301 @commands.command(name="mockmsg")
302 @checks.is_owner()
303 async def mock_msg(self, ctx, user: discord.Member, *, content: str):
304 """Dispatch a message event as if it were sent by a different user.
305
306 Only reads the raw content of the message. Attachments, embeds etc. are
307 ignored.
308 """
309 old_author = ctx.author
310 old_content = ctx.message.content
311 ctx.message.author = user
312 ctx.message.content = content
313
314 ctx.bot.dispatch("message", ctx.message)
315
316 # If we change the author and content back too quickly,
317 # the bot won't process the mocked message in time.
318 await asyncio.sleep(2)
319 ctx.message.author = old_author
320 ctx.message.content = old_content
321
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redbot/core/dev_commands.py b/redbot/core/dev_commands.py
--- a/redbot/core/dev_commands.py
+++ b/redbot/core/dev_commands.py
@@ -4,6 +4,7 @@
import textwrap
import traceback
from contextlib import redirect_stdout
+from copy import copy
import discord
from discord.ext import commands
@@ -286,17 +287,11 @@
The prefix must not be entered.
"""
- # Since we have stateful objects now this might be pretty bad
- # Sorry Danny
- old_author = ctx.author
- old_content = ctx.message.content
- ctx.message.author = user
- ctx.message.content = ctx.prefix + command
+ msg = copy(ctx.message)
+ msg.author = user
+ msg.content = ctx.prefix + command
- await ctx.bot.process_commands(ctx.message)
-
- ctx.message.author = old_author
- ctx.message.content = old_content
+ ctx.bot.dispatch('message', msg)
@commands.command(name="mockmsg")
@checks.is_owner()
| {"golden_diff": "diff --git a/redbot/core/dev_commands.py b/redbot/core/dev_commands.py\n--- a/redbot/core/dev_commands.py\n+++ b/redbot/core/dev_commands.py\n@@ -4,6 +4,7 @@\n import textwrap\n import traceback\n from contextlib import redirect_stdout\n+from copy import copy\n \n import discord\n from discord.ext import commands\n@@ -286,17 +287,11 @@\n \n The prefix must not be entered.\n \"\"\"\n- # Since we have stateful objects now this might be pretty bad\n- # Sorry Danny\n- old_author = ctx.author\n- old_content = ctx.message.content\n- ctx.message.author = user\n- ctx.message.content = ctx.prefix + command\n+ msg = copy(ctx.message)\n+ msg.author = user\n+ msg.content = ctx.prefix + command\n \n- await ctx.bot.process_commands(ctx.message)\n-\n- ctx.message.author = old_author\n- ctx.message.content = old_content\n+ ctx.bot.dispatch('message', msg)\n \n @commands.command(name=\"mockmsg\")\n @checks.is_owner()\n", "issue": "[V3] [p]mock doesn't work with aliases\nPlease be sure to read through other issues as well to make sure what you are suggesting/reporting has not already\r\nbeen suggested/reported\r\n\r\n### Type:\r\n\r\n- [ ] Suggestion\r\n- [x] Bug\r\n\r\n### Brief description of the problem\r\nAttempting to run an alias as another user with `[p]mock` does nothing\r\n\r\n### Expected behavior\r\nIt should run the command the alias is for\r\n\r\n### Actual behavior\r\nIt does nothing\r\n### Steps to reproduce\r\n\r\n1. Create an alias (for example, `[p]alias add c contact`\r\n2. Try to use mock with it (`[p]mock <user> c`)\r\n3. Nothing happens\r\n\n", "before_files": [{"content": "import asyncio\nimport inspect\nimport io\nimport textwrap\nimport traceback\nfrom contextlib import redirect_stdout\n\nimport discord\nfrom discord.ext import commands\nfrom . import checks\nfrom .i18n import CogI18n\nfrom .utils.chat_formatting import box, pagify\n\"\"\"\nNotice:\n\n95% of the below code came from R.Danny which can be found here:\n\nhttps://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py\n\"\"\"\n\n_ = CogI18n(\"Dev\", __file__)\n\n\nclass Dev:\n \"\"\"Various development focused utilities.\"\"\"\n\n def __init__(self):\n self._last_result = None\n self.sessions = set()\n\n @staticmethod\n def cleanup_code(content):\n \"\"\"Automatically removes code blocks from the code.\"\"\"\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')\n\n @staticmethod\n def get_syntax_error(e):\n \"\"\"Format a syntax error to send to the user.\n\n Returns a string representation of the error formatted as a codeblock.\n \"\"\"\n if e.text is None:\n return box('{0.__class__.__name__}: {0}'.format(e), lang=\"py\")\n return box(\n '{0.text}{1:>{0.offset}}\\n{2}: {0}'\n ''.format(e, '^', type(e).__name__),\n lang=\"py\")\n\n @staticmethod\n def get_pages(msg: str):\n \"\"\"Pagify the given message for output to the user.\"\"\"\n return pagify(msg, delims=[\"\\n\", \" \"], priority=True, shorten_by=10)\n\n @staticmethod\n def sanitize_output(ctx: commands.Context, input_: str) -> str:\n \"\"\"Hides the bot's token from a string.\"\"\"\n token = ctx.bot.http.token\n r = \"[EXPUNGED]\"\n result = input_.replace(token, r)\n result = result.replace(token.lower(), r)\n result = result.replace(token.upper(), r)\n return result\n\n @commands.command()\n @checks.is_owner()\n async def debug(self, ctx, *, code):\n \"\"\"Evaluate a statement of python code.\n\n The bot will always respond with the return value of the code.\n If the return value of the code is a coroutine, it will be awaited,\n and the result of that will be the bot's response.\n\n Note: Only one statement may be evaluated. Using await, yield or\n similar restricted keywords will result in a syntax error. For multiple\n lines or asynchronous code, see [p]repl or [p]eval.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - discord.py commands extension\n _ - The result of the last dev command.\n \"\"\"\n env = {\n 'bot': ctx.bot,\n 'ctx': ctx,\n 'channel': ctx.channel,\n 'author': ctx.author,\n 'guild': ctx.guild,\n 'message': ctx.message,\n 'discord': discord,\n 'commands': commands,\n '_': self._last_result\n }\n\n code = self.cleanup_code(code)\n\n try:\n result = eval(code, env)\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n return\n except Exception as e:\n await ctx.send(\n box('{}: {!s}'.format(type(e).__name__, e), lang='py'))\n return\n\n if asyncio.iscoroutine(result):\n result = await result\n\n self._last_result = result\n\n result = self.sanitize_output(ctx, str(result))\n\n await ctx.send_interactive(self.get_pages(result), box_lang=\"py\")\n\n @commands.command(name='eval')\n @checks.is_owner()\n async def _eval(self, ctx, *, body: str):\n \"\"\"Execute asynchronous code.\n\n This command wraps code into the body of an async function and then\n calls and awaits it. The bot will respond with anything printed to\n stdout, as well as the return value of the function.\n\n The code can be within a codeblock, inline code or neither, as long\n as they are not mixed and they are formatted correctly.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - discord.py commands extension\n _ - The result of the last dev command.\n \"\"\"\n env = {\n 'bot': ctx.bot,\n 'ctx': ctx,\n 'channel': ctx.channel,\n 'author': ctx.author,\n 'guild': ctx.guild,\n 'message': ctx.message,\n 'discord': discord,\n 'commands': commands,\n '_': self._last_result\n }\n\n body = self.cleanup_code(body)\n stdout = io.StringIO()\n\n to_compile = 'async def func():\\n%s' % textwrap.indent(body, ' ')\n\n try:\n exec(to_compile, env)\n except SyntaxError as e:\n return await ctx.send(self.get_syntax_error(e))\n\n func = env['func']\n result = None\n try:\n with redirect_stdout(stdout):\n result = await func()\n except:\n printed = \"{}{}\".format(stdout.getvalue(), traceback.format_exc())\n else:\n printed = stdout.getvalue()\n await ctx.tick()\n\n if result is not None:\n self._last_result = result\n msg = \"{}{}\".format(printed, result)\n else:\n msg = printed\n msg = self.sanitize_output(ctx, msg)\n\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n\n @commands.command()\n @checks.is_owner()\n async def repl(self, ctx):\n \"\"\"Open an interactive REPL.\n\n The REPL will only recognise code as messages which start with a\n backtick. This includes codeblocks, and as such multiple lines can be\n evaluated.\n\n You may not await any code in this REPL unless you define it inside an\n async function.\n \"\"\"\n variables = {\n 'ctx': ctx,\n 'bot': ctx.bot,\n 'message': ctx.message,\n 'guild': ctx.guild,\n 'channel': ctx.channel,\n 'author': ctx.author,\n '_': None,\n }\n\n if ctx.channel.id in self.sessions:\n await ctx.send(_('Already running a REPL session in this channel. '\n 'Exit it with `quit`.'))\n return\n\n self.sessions.add(ctx.channel.id)\n await ctx.send(_('Enter code to execute or evaluate.'\n ' `exit()` or `quit` to exit.'))\n\n msg_check = lambda m: (m.author == ctx.author and\n m.channel == ctx.channel and\n m.content.startswith('`'))\n\n while True:\n response = await ctx.bot.wait_for(\"message\", check=msg_check)\n\n cleaned = self.cleanup_code(response.content)\n\n if cleaned in ('quit', 'exit', 'exit()'):\n await ctx.send('Exiting.')\n self.sessions.remove(ctx.channel.id)\n return\n\n executor = exec\n if cleaned.count('\\n') == 0:\n # single statement, potentially 'eval'\n try:\n code = compile(cleaned, '<repl session>', 'eval')\n except SyntaxError:\n pass\n else:\n executor = eval\n\n if executor is exec:\n try:\n code = compile(cleaned, '<repl session>', 'exec')\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n continue\n\n variables['message'] = response\n\n stdout = io.StringIO()\n\n msg = None\n\n try:\n with redirect_stdout(stdout):\n result = executor(code, variables)\n if inspect.isawaitable(result):\n result = await result\n except:\n value = stdout.getvalue()\n msg = \"{}{}\".format(value, traceback.format_exc())\n else:\n value = stdout.getvalue()\n if result is not None:\n msg = \"{}{}\".format(value, result)\n variables['_'] = result\n elif value:\n msg = \"{}\".format(value)\n\n msg = self.sanitize_output(ctx, msg)\n\n try:\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n except discord.Forbidden:\n pass\n except discord.HTTPException as e:\n await ctx.send(_('Unexpected error: `{}`').format(e))\n\n @commands.command()\n @checks.is_owner()\n async def mock(self, ctx, user: discord.Member, *, command):\n \"\"\"Mock another user invoking a command.\n\n The prefix must not be entered.\n \"\"\"\n # Since we have stateful objects now this might be pretty bad\n # Sorry Danny\n old_author = ctx.author\n old_content = ctx.message.content\n ctx.message.author = user\n ctx.message.content = ctx.prefix + command\n\n await ctx.bot.process_commands(ctx.message)\n\n ctx.message.author = old_author\n ctx.message.content = old_content\n\n @commands.command(name=\"mockmsg\")\n @checks.is_owner()\n async def mock_msg(self, ctx, user: discord.Member, *, content: str):\n \"\"\"Dispatch a message event as if it were sent by a different user.\n\n Only reads the raw content of the message. Attachments, embeds etc. are\n ignored.\n \"\"\"\n old_author = ctx.author\n old_content = ctx.message.content\n ctx.message.author = user\n ctx.message.content = content\n\n ctx.bot.dispatch(\"message\", ctx.message)\n\n # If we change the author and content back too quickly,\n # the bot won't process the mocked message in time.\n await asyncio.sleep(2)\n ctx.message.author = old_author\n ctx.message.content = old_content\n", "path": "redbot/core/dev_commands.py"}], "after_files": [{"content": "import asyncio\nimport inspect\nimport io\nimport textwrap\nimport traceback\nfrom contextlib import redirect_stdout\nfrom copy import copy\n\nimport discord\nfrom discord.ext import commands\nfrom . import checks\nfrom .i18n import CogI18n\nfrom .utils.chat_formatting import box, pagify\n\"\"\"\nNotice:\n\n95% of the below code came from R.Danny which can be found here:\n\nhttps://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py\n\"\"\"\n\n_ = CogI18n(\"Dev\", __file__)\n\n\nclass Dev:\n \"\"\"Various development focused utilities.\"\"\"\n\n def __init__(self):\n self._last_result = None\n self.sessions = set()\n\n @staticmethod\n def cleanup_code(content):\n \"\"\"Automatically removes code blocks from the code.\"\"\"\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')\n\n @staticmethod\n def get_syntax_error(e):\n \"\"\"Format a syntax error to send to the user.\n\n Returns a string representation of the error formatted as a codeblock.\n \"\"\"\n if e.text is None:\n return box('{0.__class__.__name__}: {0}'.format(e), lang=\"py\")\n return box(\n '{0.text}{1:>{0.offset}}\\n{2}: {0}'\n ''.format(e, '^', type(e).__name__),\n lang=\"py\")\n\n @staticmethod\n def get_pages(msg: str):\n \"\"\"Pagify the given message for output to the user.\"\"\"\n return pagify(msg, delims=[\"\\n\", \" \"], priority=True, shorten_by=10)\n\n @staticmethod\n def sanitize_output(ctx: commands.Context, input_: str) -> str:\n \"\"\"Hides the bot's token from a string.\"\"\"\n token = ctx.bot.http.token\n r = \"[EXPUNGED]\"\n result = input_.replace(token, r)\n result = result.replace(token.lower(), r)\n result = result.replace(token.upper(), r)\n return result\n\n @commands.command()\n @checks.is_owner()\n async def debug(self, ctx, *, code):\n \"\"\"Evaluate a statement of python code.\n\n The bot will always respond with the return value of the code.\n If the return value of the code is a coroutine, it will be awaited,\n and the result of that will be the bot's response.\n\n Note: Only one statement may be evaluated. Using await, yield or\n similar restricted keywords will result in a syntax error. For multiple\n lines or asynchronous code, see [p]repl or [p]eval.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - discord.py commands extension\n _ - The result of the last dev command.\n \"\"\"\n env = {\n 'bot': ctx.bot,\n 'ctx': ctx,\n 'channel': ctx.channel,\n 'author': ctx.author,\n 'guild': ctx.guild,\n 'message': ctx.message,\n 'discord': discord,\n 'commands': commands,\n '_': self._last_result\n }\n\n code = self.cleanup_code(code)\n\n try:\n result = eval(code, env)\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n return\n except Exception as e:\n await ctx.send(\n box('{}: {!s}'.format(type(e).__name__, e), lang='py'))\n return\n\n if asyncio.iscoroutine(result):\n result = await result\n\n self._last_result = result\n\n result = self.sanitize_output(ctx, str(result))\n\n await ctx.send_interactive(self.get_pages(result), box_lang=\"py\")\n\n @commands.command(name='eval')\n @checks.is_owner()\n async def _eval(self, ctx, *, body: str):\n \"\"\"Execute asynchronous code.\n\n This command wraps code into the body of an async function and then\n calls and awaits it. The bot will respond with anything printed to\n stdout, as well as the return value of the function.\n\n The code can be within a codeblock, inline code or neither, as long\n as they are not mixed and they are formatted correctly.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - discord.py commands extension\n _ - The result of the last dev command.\n \"\"\"\n env = {\n 'bot': ctx.bot,\n 'ctx': ctx,\n 'channel': ctx.channel,\n 'author': ctx.author,\n 'guild': ctx.guild,\n 'message': ctx.message,\n 'discord': discord,\n 'commands': commands,\n '_': self._last_result\n }\n\n body = self.cleanup_code(body)\n stdout = io.StringIO()\n\n to_compile = 'async def func():\\n%s' % textwrap.indent(body, ' ')\n\n try:\n exec(to_compile, env)\n except SyntaxError as e:\n return await ctx.send(self.get_syntax_error(e))\n\n func = env['func']\n result = None\n try:\n with redirect_stdout(stdout):\n result = await func()\n except:\n printed = \"{}{}\".format(stdout.getvalue(), traceback.format_exc())\n else:\n printed = stdout.getvalue()\n await ctx.tick()\n\n if result is not None:\n self._last_result = result\n msg = \"{}{}\".format(printed, result)\n else:\n msg = printed\n msg = self.sanitize_output(ctx, msg)\n\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n\n @commands.command()\n @checks.is_owner()\n async def repl(self, ctx):\n \"\"\"Open an interactive REPL.\n\n The REPL will only recognise code as messages which start with a\n backtick. This includes codeblocks, and as such multiple lines can be\n evaluated.\n\n You may not await any code in this REPL unless you define it inside an\n async function.\n \"\"\"\n variables = {\n 'ctx': ctx,\n 'bot': ctx.bot,\n 'message': ctx.message,\n 'guild': ctx.guild,\n 'channel': ctx.channel,\n 'author': ctx.author,\n '_': None,\n }\n\n if ctx.channel.id in self.sessions:\n await ctx.send(_('Already running a REPL session in this channel. '\n 'Exit it with `quit`.'))\n return\n\n self.sessions.add(ctx.channel.id)\n await ctx.send(_('Enter code to execute or evaluate.'\n ' `exit()` or `quit` to exit.'))\n\n msg_check = lambda m: (m.author == ctx.author and\n m.channel == ctx.channel and\n m.content.startswith('`'))\n\n while True:\n response = await ctx.bot.wait_for(\"message\", check=msg_check)\n\n cleaned = self.cleanup_code(response.content)\n\n if cleaned in ('quit', 'exit', 'exit()'):\n await ctx.send('Exiting.')\n self.sessions.remove(ctx.channel.id)\n return\n\n executor = exec\n if cleaned.count('\\n') == 0:\n # single statement, potentially 'eval'\n try:\n code = compile(cleaned, '<repl session>', 'eval')\n except SyntaxError:\n pass\n else:\n executor = eval\n\n if executor is exec:\n try:\n code = compile(cleaned, '<repl session>', 'exec')\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n continue\n\n variables['message'] = response\n\n stdout = io.StringIO()\n\n msg = None\n\n try:\n with redirect_stdout(stdout):\n result = executor(code, variables)\n if inspect.isawaitable(result):\n result = await result\n except:\n value = stdout.getvalue()\n msg = \"{}{}\".format(value, traceback.format_exc())\n else:\n value = stdout.getvalue()\n if result is not None:\n msg = \"{}{}\".format(value, result)\n variables['_'] = result\n elif value:\n msg = \"{}\".format(value)\n\n msg = self.sanitize_output(ctx, msg)\n\n try:\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n except discord.Forbidden:\n pass\n except discord.HTTPException as e:\n await ctx.send(_('Unexpected error: `{}`').format(e))\n\n @commands.command()\n @checks.is_owner()\n async def mock(self, ctx, user: discord.Member, *, command):\n \"\"\"Mock another user invoking a command.\n\n The prefix must not be entered.\n \"\"\"\n msg = copy(ctx.message)\n msg.author = user\n msg.content = ctx.prefix + command\n\n ctx.bot.dispatch('message', msg)\n\n @commands.command(name=\"mockmsg\")\n @checks.is_owner()\n async def mock_msg(self, ctx, user: discord.Member, *, content: str):\n \"\"\"Dispatch a message event as if it were sent by a different user.\n\n Only reads the raw content of the message. Attachments, embeds etc. are\n ignored.\n \"\"\"\n old_author = ctx.author\n old_content = ctx.message.content\n ctx.message.author = user\n ctx.message.content = content\n\n ctx.bot.dispatch(\"message\", ctx.message)\n\n # If we change the author and content back too quickly,\n # the bot won't process the mocked message in time.\n await asyncio.sleep(2)\n ctx.message.author = old_author\n ctx.message.content = old_content\n", "path": "redbot/core/dev_commands.py"}]} | 3,473 | 240 |
gh_patches_debug_39329 | rasdani/github-patches | git_diff | optuna__optuna-4097 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MLflowCallback error when using n_jobs > 1
### Expected behavior
be able to use more than one job and at the same time log it in mlflow
### Environment
- Optuna version: 3.0.3
- Python version: 3.9.7
- OS: Linux-4.14.35-1902.8.4.el7uek.x86_64-x86_64-with-glibc2.31
- MLFlow version: 1.29.0
### Error messages, stack traces, or logs
```shell
Exception: Run with UUID 13a5f691281742d1a9d6b6f98c43203a is already active. To start a new run, first end the current run with mlflow.end_run(). To start a nested run, call start_run with nested=True
```
### Steps to reproduce
trying to use multiprocessing i get
i know it is an mlflow issue, but there are some fixes to do when interacting with the mlflow api to support multiprocessing
so, some changes to MLflowCallback should be done
potential fixes
https://github.com/mlflow/mlflow/issues/3592
https://github.com/mlflow/mlflow/issues/1420
i tried even using:
`
mlflow_kwargs={
'nested': False
}
`
but this raises duplication data and errors in mlflow
i also tried with
`
from joblib import parallel_backend
with parallel_backend('multiprocessing'):`
during study.optimize()
### Additional context (optional)
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/integration/mlflow.py`
Content:
```
1 import functools
2 from typing import Any
3 from typing import Callable
4 from typing import Dict
5 from typing import List
6 from typing import Optional
7 from typing import Sequence
8 from typing import Union
9
10 import optuna
11 from optuna._experimental import experimental_class
12 from optuna._experimental import experimental_func
13 from optuna._imports import try_import
14 from optuna.study.study import ObjectiveFuncType
15
16
17 with try_import() as _imports:
18 import mlflow
19
20 RUN_ID_ATTRIBUTE_KEY = "mlflow_run_id"
21
22
23 @experimental_class("1.4.0")
24 class MLflowCallback:
25 """Callback to track Optuna trials with MLflow.
26
27 This callback adds relevant information that is
28 tracked by Optuna to MLflow.
29
30 Example:
31
32 Add MLflow callback to Optuna optimization.
33
34 .. testsetup::
35
36 import pathlib
37 import tempfile
38
39 tempdir = tempfile.mkdtemp()
40 YOUR_TRACKING_URI = pathlib.Path(tempdir).as_uri()
41
42 .. testcode::
43
44 import optuna
45 from optuna.integration.mlflow import MLflowCallback
46
47
48 def objective(trial):
49 x = trial.suggest_float("x", -10, 10)
50 return (x - 2) ** 2
51
52
53 mlflc = MLflowCallback(
54 tracking_uri=YOUR_TRACKING_URI,
55 metric_name="my metric score",
56 )
57
58 study = optuna.create_study(study_name="my_study")
59 study.optimize(objective, n_trials=10, callbacks=[mlflc])
60
61 .. testcleanup::
62
63 import shutil
64
65 shutil.rmtree(tempdir)
66
67 Args:
68 tracking_uri:
69 The URI of the MLflow tracking server.
70
71 Please refer to `mlflow.set_tracking_uri
72 <https://www.mlflow.org/docs/latest/python_api/mlflow.html#mlflow.set_tracking_uri>`_
73 for more details.
74 metric_name:
75 Name assigned to optimized metric. In case of multi-objective optimization,
76 list of names can be passed. Those names will be assigned
77 to metrics in the order returned by objective function.
78 If single name is provided, or this argument is left to default value,
79 it will be broadcasted to each objective with a number suffix in order
80 returned by objective function e.g. two objectives and default metric name
81 will be logged as ``value_0`` and ``value_1``. The number of metrics must be
82 the same as the number of values an objective function returns.
83 create_experiment:
84 When :obj:`True`, new MLflow experiment will be created for each optimization run,
85 named after the Optuna study. Setting this argument to :obj:`False` lets user run
86 optimization under existing experiment, set via `mlflow.set_experiment
87 <https://www.mlflow.org/docs/latest/python_api/mlflow.html#mlflow.get_tracking_uri>`_,
88 by passing ``experiment_id`` as one of ``mlflow_kwargs`` or under default MLflow
89 experiment, when no additional arguments are passed. Note that this argument
90 must be set to :obj:`False` when using Optuna with this callback within
91 Databricks Notebook.
92 mlflow_kwargs:
93 Set of arguments passed when initializing MLflow run.
94 Please refer to `MLflow API documentation
95 <https://www.mlflow.org/docs/latest/python_api/mlflow.html#mlflow.start_run>`_
96 for more details.
97
98 .. note::
99 ``nest_trials`` argument added in v2.3.0 is a part of ``mlflow_kwargs``
100 since v3.0.0. Anyone using ``nest_trials=True`` should migrate to
101 ``mlflow_kwargs={"nested": True}`` to avoid raising :exc:`TypeError`.
102
103 tag_study_user_attrs:
104 Flag indicating whether or not to add the study's user attrs
105 to the mlflow trial as tags. Please note that when this flag is
106 set, key value pairs in :attr:`~optuna.study.Study.user_attrs`
107 will supersede existing tags.
108 tag_trial_user_attrs:
109 Flag indicating whether or not to add the trial's user attrs
110 to the mlflow trial as tags. Please note that when both trial and
111 study user attributes are logged, the latter will supersede the former
112 in case of a collision.
113
114
115 """
116
117 def __init__(
118 self,
119 tracking_uri: Optional[str] = None,
120 metric_name: Union[str, Sequence[str]] = "value",
121 create_experiment: bool = True,
122 mlflow_kwargs: Optional[Dict[str, Any]] = None,
123 tag_study_user_attrs: bool = False,
124 tag_trial_user_attrs: bool = True,
125 ) -> None:
126
127 _imports.check()
128
129 if not isinstance(metric_name, Sequence):
130 raise TypeError(
131 "Expected metric_name to be string or sequence of strings, got {}.".format(
132 type(metric_name)
133 )
134 )
135
136 self._tracking_uri = tracking_uri
137 self._metric_name = metric_name
138 self._create_experiment = create_experiment
139 self._mlflow_kwargs = mlflow_kwargs or {}
140 self._tag_study_user_attrs = tag_study_user_attrs
141 self._tag_trial_user_attrs = tag_trial_user_attrs
142
143 def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:
144
145 self._initialize_experiment(study)
146
147 with mlflow.start_run(
148 run_id=trial.system_attrs.get(RUN_ID_ATTRIBUTE_KEY),
149 experiment_id=self._mlflow_kwargs.get("experiment_id"),
150 run_name=self._mlflow_kwargs.get("run_name") or str(trial.number),
151 nested=self._mlflow_kwargs.get("nested") or False,
152 tags=self._mlflow_kwargs.get("tags"),
153 ):
154
155 # This sets the metrics for MLflow.
156 self._log_metrics(trial.values)
157
158 # This sets the params for MLflow.
159 self._log_params(trial.params)
160
161 # This sets the tags for MLflow.
162 self._set_tags(trial, study)
163
164 @experimental_func("2.9.0")
165 def track_in_mlflow(self) -> Callable:
166 """Decorator for using MLflow logging in the objective function.
167
168 This decorator enables the extension of MLflow logging provided by the callback.
169
170 All information logged in the decorated objective function will be added to the MLflow
171 run for the trial created by the callback.
172
173 Example:
174
175 Add additional logging to MLflow.
176
177 .. testcode::
178
179 import optuna
180 import mlflow
181 from optuna.integration.mlflow import MLflowCallback
182
183 mlflc = MLflowCallback(
184 tracking_uri=YOUR_TRACKING_URI,
185 metric_name="my metric score",
186 )
187
188
189 @mlflc.track_in_mlflow()
190 def objective(trial):
191 x = trial.suggest_float("x", -10, 10)
192 mlflow.log_param("power", 2)
193 mlflow.log_metric("base of metric", x - 2)
194
195 return (x - 2) ** 2
196
197
198 study = optuna.create_study(study_name="my_other_study")
199 study.optimize(objective, n_trials=10, callbacks=[mlflc])
200
201 Returns:
202 ObjectiveFuncType: Objective function with tracking to MLflow enabled.
203 """
204
205 def decorator(func: ObjectiveFuncType) -> ObjectiveFuncType:
206 @functools.wraps(func)
207 def wrapper(trial: optuna.trial.Trial) -> Union[float, Sequence[float]]:
208 study = trial.study
209 self._initialize_experiment(study)
210 nested = self._mlflow_kwargs.get("nested")
211
212 with mlflow.start_run(run_name=str(trial.number), nested=nested) as run:
213 trial.set_system_attr(RUN_ID_ATTRIBUTE_KEY, run.info.run_id)
214
215 return func(trial)
216
217 return wrapper
218
219 return decorator
220
221 def _initialize_experiment(self, study: optuna.study.Study) -> None:
222 """Initialize an MLflow experiment with the study name.
223
224 If a tracking uri has been provided, MLflow will be initialized to use it.
225
226 Args:
227 study: Study to be tracked in MLflow.
228 """
229
230 # This sets the `tracking_uri` for MLflow.
231 if self._tracking_uri is not None:
232 mlflow.set_tracking_uri(self._tracking_uri)
233
234 if self._create_experiment:
235 mlflow.set_experiment(study.study_name)
236
237 def _set_tags(self, trial: optuna.trial.FrozenTrial, study: optuna.study.Study) -> None:
238 """Sets the Optuna tags for the current MLflow run.
239
240 Args:
241 trial: Trial to be tracked.
242 study: Study to be tracked.
243 """
244
245 tags: Dict[str, Union[str, List[str]]] = {}
246 tags["number"] = str(trial.number)
247 tags["datetime_start"] = str(trial.datetime_start)
248
249 tags["datetime_complete"] = str(trial.datetime_complete)
250
251 # Set trial state.
252 if trial.state.is_finished():
253 tags["state"] = trial.state.name
254
255 # Set study directions.
256 directions = [d.name for d in study.directions]
257 tags["direction"] = directions if len(directions) != 1 else directions[0]
258
259 distributions = {(k + "_distribution"): str(v) for (k, v) in trial.distributions.items()}
260 tags.update(distributions)
261
262 if self._tag_trial_user_attrs:
263 tags.update(trial.user_attrs)
264
265 if self._tag_study_user_attrs:
266 tags.update(study.user_attrs)
267
268 # This is a temporary fix on Optuna side. It avoids an error with user
269 # attributes that are too long. It should be fixed on MLflow side later.
270 # When it is fixed on MLflow side this codeblock can be removed.
271 # see https://github.com/optuna/optuna/issues/1340
272 # see https://github.com/mlflow/mlflow/issues/2931
273 for key, value in tags.items():
274 value = str(value) # make sure it is a string
275 max_val_length = mlflow.utils.validation.MAX_TAG_VAL_LENGTH
276 if len(value) > max_val_length:
277 tags[key] = "{}...".format(value[: max_val_length - 3])
278
279 mlflow.set_tags(tags)
280
281 def _log_metrics(self, values: Optional[List[float]]) -> None:
282 """Log the trial results as metrics to MLflow.
283
284 Args:
285 values: Results of a trial.
286 """
287 if values is None:
288 return
289
290 if isinstance(self._metric_name, str):
291 if len(values) > 1:
292 # Broadcast default name for multi-objective optimization.
293 names = ["{}_{}".format(self._metric_name, i) for i in range(len(values))]
294
295 else:
296 names = [self._metric_name]
297
298 else:
299 if len(self._metric_name) != len(values):
300 raise ValueError(
301 "Running multi-objective optimization "
302 "with {} objective values, but {} names specified. "
303 "Match objective values and names, or use default broadcasting.".format(
304 len(values), len(self._metric_name)
305 )
306 )
307
308 else:
309 names = [*self._metric_name]
310
311 metrics = {name: val for name, val in zip(names, values)}
312 mlflow.log_metrics(metrics)
313
314 @staticmethod
315 def _log_params(params: Dict[str, Any]) -> None:
316 """Log the parameters of the trial to MLflow.
317
318 Args:
319 params: Trial params.
320 """
321 mlflow.log_params(params)
322
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optuna/integration/mlflow.py b/optuna/integration/mlflow.py
--- a/optuna/integration/mlflow.py
+++ b/optuna/integration/mlflow.py
@@ -1,4 +1,5 @@
import functools
+import threading
from typing import Any
from typing import Callable
from typing import Dict
@@ -139,27 +140,29 @@
self._mlflow_kwargs = mlflow_kwargs or {}
self._tag_study_user_attrs = tag_study_user_attrs
self._tag_trial_user_attrs = tag_trial_user_attrs
+ self._lock = threading.Lock()
def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:
- self._initialize_experiment(study)
+ with self._lock:
+ self._initialize_experiment(study)
- with mlflow.start_run(
- run_id=trial.system_attrs.get(RUN_ID_ATTRIBUTE_KEY),
- experiment_id=self._mlflow_kwargs.get("experiment_id"),
- run_name=self._mlflow_kwargs.get("run_name") or str(trial.number),
- nested=self._mlflow_kwargs.get("nested") or False,
- tags=self._mlflow_kwargs.get("tags"),
- ):
+ with mlflow.start_run(
+ run_id=trial.system_attrs.get(RUN_ID_ATTRIBUTE_KEY),
+ experiment_id=self._mlflow_kwargs.get("experiment_id"),
+ run_name=self._mlflow_kwargs.get("run_name") or str(trial.number),
+ nested=self._mlflow_kwargs.get("nested") or False,
+ tags=self._mlflow_kwargs.get("tags"),
+ ):
- # This sets the metrics for MLflow.
- self._log_metrics(trial.values)
+ # This sets the metrics for MLflow.
+ self._log_metrics(trial.values)
- # This sets the params for MLflow.
- self._log_params(trial.params)
+ # This sets the params for MLflow.
+ self._log_params(trial.params)
- # This sets the tags for MLflow.
- self._set_tags(trial, study)
+ # This sets the tags for MLflow.
+ self._set_tags(trial, study)
@experimental_func("2.9.0")
def track_in_mlflow(self) -> Callable:
@@ -205,14 +208,15 @@
def decorator(func: ObjectiveFuncType) -> ObjectiveFuncType:
@functools.wraps(func)
def wrapper(trial: optuna.trial.Trial) -> Union[float, Sequence[float]]:
- study = trial.study
- self._initialize_experiment(study)
- nested = self._mlflow_kwargs.get("nested")
+ with self._lock:
+ study = trial.study
+ self._initialize_experiment(study)
+ nested = self._mlflow_kwargs.get("nested")
- with mlflow.start_run(run_name=str(trial.number), nested=nested) as run:
- trial.set_system_attr(RUN_ID_ATTRIBUTE_KEY, run.info.run_id)
+ with mlflow.start_run(run_name=str(trial.number), nested=nested) as run:
+ trial.set_system_attr(RUN_ID_ATTRIBUTE_KEY, run.info.run_id)
- return func(trial)
+ return func(trial)
return wrapper
| {"golden_diff": "diff --git a/optuna/integration/mlflow.py b/optuna/integration/mlflow.py\n--- a/optuna/integration/mlflow.py\n+++ b/optuna/integration/mlflow.py\n@@ -1,4 +1,5 @@\n import functools\n+import threading\n from typing import Any\n from typing import Callable\n from typing import Dict\n@@ -139,27 +140,29 @@\n self._mlflow_kwargs = mlflow_kwargs or {}\n self._tag_study_user_attrs = tag_study_user_attrs\n self._tag_trial_user_attrs = tag_trial_user_attrs\n+ self._lock = threading.Lock()\n \n def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:\n \n- self._initialize_experiment(study)\n+ with self._lock:\n+ self._initialize_experiment(study)\n \n- with mlflow.start_run(\n- run_id=trial.system_attrs.get(RUN_ID_ATTRIBUTE_KEY),\n- experiment_id=self._mlflow_kwargs.get(\"experiment_id\"),\n- run_name=self._mlflow_kwargs.get(\"run_name\") or str(trial.number),\n- nested=self._mlflow_kwargs.get(\"nested\") or False,\n- tags=self._mlflow_kwargs.get(\"tags\"),\n- ):\n+ with mlflow.start_run(\n+ run_id=trial.system_attrs.get(RUN_ID_ATTRIBUTE_KEY),\n+ experiment_id=self._mlflow_kwargs.get(\"experiment_id\"),\n+ run_name=self._mlflow_kwargs.get(\"run_name\") or str(trial.number),\n+ nested=self._mlflow_kwargs.get(\"nested\") or False,\n+ tags=self._mlflow_kwargs.get(\"tags\"),\n+ ):\n \n- # This sets the metrics for MLflow.\n- self._log_metrics(trial.values)\n+ # This sets the metrics for MLflow.\n+ self._log_metrics(trial.values)\n \n- # This sets the params for MLflow.\n- self._log_params(trial.params)\n+ # This sets the params for MLflow.\n+ self._log_params(trial.params)\n \n- # This sets the tags for MLflow.\n- self._set_tags(trial, study)\n+ # This sets the tags for MLflow.\n+ self._set_tags(trial, study)\n \n @experimental_func(\"2.9.0\")\n def track_in_mlflow(self) -> Callable:\n@@ -205,14 +208,15 @@\n def decorator(func: ObjectiveFuncType) -> ObjectiveFuncType:\n @functools.wraps(func)\n def wrapper(trial: optuna.trial.Trial) -> Union[float, Sequence[float]]:\n- study = trial.study\n- self._initialize_experiment(study)\n- nested = self._mlflow_kwargs.get(\"nested\")\n+ with self._lock:\n+ study = trial.study\n+ self._initialize_experiment(study)\n+ nested = self._mlflow_kwargs.get(\"nested\")\n \n- with mlflow.start_run(run_name=str(trial.number), nested=nested) as run:\n- trial.set_system_attr(RUN_ID_ATTRIBUTE_KEY, run.info.run_id)\n+ with mlflow.start_run(run_name=str(trial.number), nested=nested) as run:\n+ trial.set_system_attr(RUN_ID_ATTRIBUTE_KEY, run.info.run_id)\n \n- return func(trial)\n+ return func(trial)\n \n return wrapper\n", "issue": "MLflowCallback error when using n_jobs > 1\n### Expected behavior\r\n\r\nbe able to use more than one job and at the same time log it in mlflow\r\n\r\n### Environment\r\n\r\n- Optuna version: 3.0.3\r\n- Python version: 3.9.7\r\n- OS: Linux-4.14.35-1902.8.4.el7uek.x86_64-x86_64-with-glibc2.31\r\n- MLFlow version: 1.29.0\r\n\r\n\r\n### Error messages, stack traces, or logs\r\n\r\n```shell\r\nException: Run with UUID 13a5f691281742d1a9d6b6f98c43203a is already active. To start a new run, first end the current run with mlflow.end_run(). To start a nested run, call start_run with nested=True\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\ntrying to use multiprocessing i get \r\n\r\ni know it is an mlflow issue, but there are some fixes to do when interacting with the mlflow api to support multiprocessing\r\nso, some changes to MLflowCallback should be done\r\n\r\npotential fixes\r\nhttps://github.com/mlflow/mlflow/issues/3592\r\nhttps://github.com/mlflow/mlflow/issues/1420\r\n\r\ni tried even using:\r\n`\r\n mlflow_kwargs={\r\n 'nested': False\r\n }\r\n`\r\nbut this raises duplication data and errors in mlflow\r\n\r\ni also tried with\r\n`\r\nfrom joblib import parallel_backend\r\nwith parallel_backend('multiprocessing'):`\r\nduring study.optimize()\r\n\r\n### Additional context (optional)\r\n\r\n_No response_\n", "before_files": [{"content": "import functools\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\n\nimport optuna\nfrom optuna._experimental import experimental_class\nfrom optuna._experimental import experimental_func\nfrom optuna._imports import try_import\nfrom optuna.study.study import ObjectiveFuncType\n\n\nwith try_import() as _imports:\n import mlflow\n\nRUN_ID_ATTRIBUTE_KEY = \"mlflow_run_id\"\n\n\n@experimental_class(\"1.4.0\")\nclass MLflowCallback:\n \"\"\"Callback to track Optuna trials with MLflow.\n\n This callback adds relevant information that is\n tracked by Optuna to MLflow.\n\n Example:\n\n Add MLflow callback to Optuna optimization.\n\n .. testsetup::\n\n import pathlib\n import tempfile\n\n tempdir = tempfile.mkdtemp()\n YOUR_TRACKING_URI = pathlib.Path(tempdir).as_uri()\n\n .. testcode::\n\n import optuna\n from optuna.integration.mlflow import MLflowCallback\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -10, 10)\n return (x - 2) ** 2\n\n\n mlflc = MLflowCallback(\n tracking_uri=YOUR_TRACKING_URI,\n metric_name=\"my metric score\",\n )\n\n study = optuna.create_study(study_name=\"my_study\")\n study.optimize(objective, n_trials=10, callbacks=[mlflc])\n\n .. testcleanup::\n\n import shutil\n\n shutil.rmtree(tempdir)\n\n Args:\n tracking_uri:\n The URI of the MLflow tracking server.\n\n Please refer to `mlflow.set_tracking_uri\n <https://www.mlflow.org/docs/latest/python_api/mlflow.html#mlflow.set_tracking_uri>`_\n for more details.\n metric_name:\n Name assigned to optimized metric. In case of multi-objective optimization,\n list of names can be passed. Those names will be assigned\n to metrics in the order returned by objective function.\n If single name is provided, or this argument is left to default value,\n it will be broadcasted to each objective with a number suffix in order\n returned by objective function e.g. two objectives and default metric name\n will be logged as ``value_0`` and ``value_1``. The number of metrics must be\n the same as the number of values an objective function returns.\n create_experiment:\n When :obj:`True`, new MLflow experiment will be created for each optimization run,\n named after the Optuna study. Setting this argument to :obj:`False` lets user run\n optimization under existing experiment, set via `mlflow.set_experiment\n <https://www.mlflow.org/docs/latest/python_api/mlflow.html#mlflow.get_tracking_uri>`_,\n by passing ``experiment_id`` as one of ``mlflow_kwargs`` or under default MLflow\n experiment, when no additional arguments are passed. Note that this argument\n must be set to :obj:`False` when using Optuna with this callback within\n Databricks Notebook.\n mlflow_kwargs:\n Set of arguments passed when initializing MLflow run.\n Please refer to `MLflow API documentation\n <https://www.mlflow.org/docs/latest/python_api/mlflow.html#mlflow.start_run>`_\n for more details.\n\n .. note::\n ``nest_trials`` argument added in v2.3.0 is a part of ``mlflow_kwargs``\n since v3.0.0. Anyone using ``nest_trials=True`` should migrate to\n ``mlflow_kwargs={\"nested\": True}`` to avoid raising :exc:`TypeError`.\n\n tag_study_user_attrs:\n Flag indicating whether or not to add the study's user attrs\n to the mlflow trial as tags. Please note that when this flag is\n set, key value pairs in :attr:`~optuna.study.Study.user_attrs`\n will supersede existing tags.\n tag_trial_user_attrs:\n Flag indicating whether or not to add the trial's user attrs\n to the mlflow trial as tags. Please note that when both trial and\n study user attributes are logged, the latter will supersede the former\n in case of a collision.\n\n\n \"\"\"\n\n def __init__(\n self,\n tracking_uri: Optional[str] = None,\n metric_name: Union[str, Sequence[str]] = \"value\",\n create_experiment: bool = True,\n mlflow_kwargs: Optional[Dict[str, Any]] = None,\n tag_study_user_attrs: bool = False,\n tag_trial_user_attrs: bool = True,\n ) -> None:\n\n _imports.check()\n\n if not isinstance(metric_name, Sequence):\n raise TypeError(\n \"Expected metric_name to be string or sequence of strings, got {}.\".format(\n type(metric_name)\n )\n )\n\n self._tracking_uri = tracking_uri\n self._metric_name = metric_name\n self._create_experiment = create_experiment\n self._mlflow_kwargs = mlflow_kwargs or {}\n self._tag_study_user_attrs = tag_study_user_attrs\n self._tag_trial_user_attrs = tag_trial_user_attrs\n\n def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:\n\n self._initialize_experiment(study)\n\n with mlflow.start_run(\n run_id=trial.system_attrs.get(RUN_ID_ATTRIBUTE_KEY),\n experiment_id=self._mlflow_kwargs.get(\"experiment_id\"),\n run_name=self._mlflow_kwargs.get(\"run_name\") or str(trial.number),\n nested=self._mlflow_kwargs.get(\"nested\") or False,\n tags=self._mlflow_kwargs.get(\"tags\"),\n ):\n\n # This sets the metrics for MLflow.\n self._log_metrics(trial.values)\n\n # This sets the params for MLflow.\n self._log_params(trial.params)\n\n # This sets the tags for MLflow.\n self._set_tags(trial, study)\n\n @experimental_func(\"2.9.0\")\n def track_in_mlflow(self) -> Callable:\n \"\"\"Decorator for using MLflow logging in the objective function.\n\n This decorator enables the extension of MLflow logging provided by the callback.\n\n All information logged in the decorated objective function will be added to the MLflow\n run for the trial created by the callback.\n\n Example:\n\n Add additional logging to MLflow.\n\n .. testcode::\n\n import optuna\n import mlflow\n from optuna.integration.mlflow import MLflowCallback\n\n mlflc = MLflowCallback(\n tracking_uri=YOUR_TRACKING_URI,\n metric_name=\"my metric score\",\n )\n\n\n @mlflc.track_in_mlflow()\n def objective(trial):\n x = trial.suggest_float(\"x\", -10, 10)\n mlflow.log_param(\"power\", 2)\n mlflow.log_metric(\"base of metric\", x - 2)\n\n return (x - 2) ** 2\n\n\n study = optuna.create_study(study_name=\"my_other_study\")\n study.optimize(objective, n_trials=10, callbacks=[mlflc])\n\n Returns:\n ObjectiveFuncType: Objective function with tracking to MLflow enabled.\n \"\"\"\n\n def decorator(func: ObjectiveFuncType) -> ObjectiveFuncType:\n @functools.wraps(func)\n def wrapper(trial: optuna.trial.Trial) -> Union[float, Sequence[float]]:\n study = trial.study\n self._initialize_experiment(study)\n nested = self._mlflow_kwargs.get(\"nested\")\n\n with mlflow.start_run(run_name=str(trial.number), nested=nested) as run:\n trial.set_system_attr(RUN_ID_ATTRIBUTE_KEY, run.info.run_id)\n\n return func(trial)\n\n return wrapper\n\n return decorator\n\n def _initialize_experiment(self, study: optuna.study.Study) -> None:\n \"\"\"Initialize an MLflow experiment with the study name.\n\n If a tracking uri has been provided, MLflow will be initialized to use it.\n\n Args:\n study: Study to be tracked in MLflow.\n \"\"\"\n\n # This sets the `tracking_uri` for MLflow.\n if self._tracking_uri is not None:\n mlflow.set_tracking_uri(self._tracking_uri)\n\n if self._create_experiment:\n mlflow.set_experiment(study.study_name)\n\n def _set_tags(self, trial: optuna.trial.FrozenTrial, study: optuna.study.Study) -> None:\n \"\"\"Sets the Optuna tags for the current MLflow run.\n\n Args:\n trial: Trial to be tracked.\n study: Study to be tracked.\n \"\"\"\n\n tags: Dict[str, Union[str, List[str]]] = {}\n tags[\"number\"] = str(trial.number)\n tags[\"datetime_start\"] = str(trial.datetime_start)\n\n tags[\"datetime_complete\"] = str(trial.datetime_complete)\n\n # Set trial state.\n if trial.state.is_finished():\n tags[\"state\"] = trial.state.name\n\n # Set study directions.\n directions = [d.name for d in study.directions]\n tags[\"direction\"] = directions if len(directions) != 1 else directions[0]\n\n distributions = {(k + \"_distribution\"): str(v) for (k, v) in trial.distributions.items()}\n tags.update(distributions)\n\n if self._tag_trial_user_attrs:\n tags.update(trial.user_attrs)\n\n if self._tag_study_user_attrs:\n tags.update(study.user_attrs)\n\n # This is a temporary fix on Optuna side. It avoids an error with user\n # attributes that are too long. It should be fixed on MLflow side later.\n # When it is fixed on MLflow side this codeblock can be removed.\n # see https://github.com/optuna/optuna/issues/1340\n # see https://github.com/mlflow/mlflow/issues/2931\n for key, value in tags.items():\n value = str(value) # make sure it is a string\n max_val_length = mlflow.utils.validation.MAX_TAG_VAL_LENGTH\n if len(value) > max_val_length:\n tags[key] = \"{}...\".format(value[: max_val_length - 3])\n\n mlflow.set_tags(tags)\n\n def _log_metrics(self, values: Optional[List[float]]) -> None:\n \"\"\"Log the trial results as metrics to MLflow.\n\n Args:\n values: Results of a trial.\n \"\"\"\n if values is None:\n return\n\n if isinstance(self._metric_name, str):\n if len(values) > 1:\n # Broadcast default name for multi-objective optimization.\n names = [\"{}_{}\".format(self._metric_name, i) for i in range(len(values))]\n\n else:\n names = [self._metric_name]\n\n else:\n if len(self._metric_name) != len(values):\n raise ValueError(\n \"Running multi-objective optimization \"\n \"with {} objective values, but {} names specified. \"\n \"Match objective values and names, or use default broadcasting.\".format(\n len(values), len(self._metric_name)\n )\n )\n\n else:\n names = [*self._metric_name]\n\n metrics = {name: val for name, val in zip(names, values)}\n mlflow.log_metrics(metrics)\n\n @staticmethod\n def _log_params(params: Dict[str, Any]) -> None:\n \"\"\"Log the parameters of the trial to MLflow.\n\n Args:\n params: Trial params.\n \"\"\"\n mlflow.log_params(params)\n", "path": "optuna/integration/mlflow.py"}], "after_files": [{"content": "import functools\nimport threading\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\n\nimport optuna\nfrom optuna._experimental import experimental_class\nfrom optuna._experimental import experimental_func\nfrom optuna._imports import try_import\nfrom optuna.study.study import ObjectiveFuncType\n\n\nwith try_import() as _imports:\n import mlflow\n\nRUN_ID_ATTRIBUTE_KEY = \"mlflow_run_id\"\n\n\n@experimental_class(\"1.4.0\")\nclass MLflowCallback:\n \"\"\"Callback to track Optuna trials with MLflow.\n\n This callback adds relevant information that is\n tracked by Optuna to MLflow.\n\n Example:\n\n Add MLflow callback to Optuna optimization.\n\n .. testsetup::\n\n import pathlib\n import tempfile\n\n tempdir = tempfile.mkdtemp()\n YOUR_TRACKING_URI = pathlib.Path(tempdir).as_uri()\n\n .. testcode::\n\n import optuna\n from optuna.integration.mlflow import MLflowCallback\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -10, 10)\n return (x - 2) ** 2\n\n\n mlflc = MLflowCallback(\n tracking_uri=YOUR_TRACKING_URI,\n metric_name=\"my metric score\",\n )\n\n study = optuna.create_study(study_name=\"my_study\")\n study.optimize(objective, n_trials=10, callbacks=[mlflc])\n\n .. testcleanup::\n\n import shutil\n\n shutil.rmtree(tempdir)\n\n Args:\n tracking_uri:\n The URI of the MLflow tracking server.\n\n Please refer to `mlflow.set_tracking_uri\n <https://www.mlflow.org/docs/latest/python_api/mlflow.html#mlflow.set_tracking_uri>`_\n for more details.\n metric_name:\n Name assigned to optimized metric. In case of multi-objective optimization,\n list of names can be passed. Those names will be assigned\n to metrics in the order returned by objective function.\n If single name is provided, or this argument is left to default value,\n it will be broadcasted to each objective with a number suffix in order\n returned by objective function e.g. two objectives and default metric name\n will be logged as ``value_0`` and ``value_1``. The number of metrics must be\n the same as the number of values an objective function returns.\n create_experiment:\n When :obj:`True`, new MLflow experiment will be created for each optimization run,\n named after the Optuna study. Setting this argument to :obj:`False` lets user run\n optimization under existing experiment, set via `mlflow.set_experiment\n <https://www.mlflow.org/docs/latest/python_api/mlflow.html#mlflow.get_tracking_uri>`_,\n by passing ``experiment_id`` as one of ``mlflow_kwargs`` or under default MLflow\n experiment, when no additional arguments are passed. Note that this argument\n must be set to :obj:`False` when using Optuna with this callback within\n Databricks Notebook.\n mlflow_kwargs:\n Set of arguments passed when initializing MLflow run.\n Please refer to `MLflow API documentation\n <https://www.mlflow.org/docs/latest/python_api/mlflow.html#mlflow.start_run>`_\n for more details.\n\n .. note::\n ``nest_trials`` argument added in v2.3.0 is a part of ``mlflow_kwargs``\n since v3.0.0. Anyone using ``nest_trials=True`` should migrate to\n ``mlflow_kwargs={\"nested\": True}`` to avoid raising :exc:`TypeError`.\n\n tag_study_user_attrs:\n Flag indicating whether or not to add the study's user attrs\n to the mlflow trial as tags. Please note that when this flag is\n set, key value pairs in :attr:`~optuna.study.Study.user_attrs`\n will supersede existing tags.\n tag_trial_user_attrs:\n Flag indicating whether or not to add the trial's user attrs\n to the mlflow trial as tags. Please note that when both trial and\n study user attributes are logged, the latter will supersede the former\n in case of a collision.\n\n\n \"\"\"\n\n def __init__(\n self,\n tracking_uri: Optional[str] = None,\n metric_name: Union[str, Sequence[str]] = \"value\",\n create_experiment: bool = True,\n mlflow_kwargs: Optional[Dict[str, Any]] = None,\n tag_study_user_attrs: bool = False,\n tag_trial_user_attrs: bool = True,\n ) -> None:\n\n _imports.check()\n\n if not isinstance(metric_name, Sequence):\n raise TypeError(\n \"Expected metric_name to be string or sequence of strings, got {}.\".format(\n type(metric_name)\n )\n )\n\n self._tracking_uri = tracking_uri\n self._metric_name = metric_name\n self._create_experiment = create_experiment\n self._mlflow_kwargs = mlflow_kwargs or {}\n self._tag_study_user_attrs = tag_study_user_attrs\n self._tag_trial_user_attrs = tag_trial_user_attrs\n self._lock = threading.Lock()\n\n def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:\n\n with self._lock:\n self._initialize_experiment(study)\n\n with mlflow.start_run(\n run_id=trial.system_attrs.get(RUN_ID_ATTRIBUTE_KEY),\n experiment_id=self._mlflow_kwargs.get(\"experiment_id\"),\n run_name=self._mlflow_kwargs.get(\"run_name\") or str(trial.number),\n nested=self._mlflow_kwargs.get(\"nested\") or False,\n tags=self._mlflow_kwargs.get(\"tags\"),\n ):\n\n # This sets the metrics for MLflow.\n self._log_metrics(trial.values)\n\n # This sets the params for MLflow.\n self._log_params(trial.params)\n\n # This sets the tags for MLflow.\n self._set_tags(trial, study)\n\n @experimental_func(\"2.9.0\")\n def track_in_mlflow(self) -> Callable:\n \"\"\"Decorator for using MLflow logging in the objective function.\n\n This decorator enables the extension of MLflow logging provided by the callback.\n\n All information logged in the decorated objective function will be added to the MLflow\n run for the trial created by the callback.\n\n Example:\n\n Add additional logging to MLflow.\n\n .. testcode::\n\n import optuna\n import mlflow\n from optuna.integration.mlflow import MLflowCallback\n\n mlflc = MLflowCallback(\n tracking_uri=YOUR_TRACKING_URI,\n metric_name=\"my metric score\",\n )\n\n\n @mlflc.track_in_mlflow()\n def objective(trial):\n x = trial.suggest_float(\"x\", -10, 10)\n mlflow.log_param(\"power\", 2)\n mlflow.log_metric(\"base of metric\", x - 2)\n\n return (x - 2) ** 2\n\n\n study = optuna.create_study(study_name=\"my_other_study\")\n study.optimize(objective, n_trials=10, callbacks=[mlflc])\n\n Returns:\n ObjectiveFuncType: Objective function with tracking to MLflow enabled.\n \"\"\"\n\n def decorator(func: ObjectiveFuncType) -> ObjectiveFuncType:\n @functools.wraps(func)\n def wrapper(trial: optuna.trial.Trial) -> Union[float, Sequence[float]]:\n with self._lock:\n study = trial.study\n self._initialize_experiment(study)\n nested = self._mlflow_kwargs.get(\"nested\")\n\n with mlflow.start_run(run_name=str(trial.number), nested=nested) as run:\n trial.set_system_attr(RUN_ID_ATTRIBUTE_KEY, run.info.run_id)\n\n return func(trial)\n\n return wrapper\n\n return decorator\n\n def _initialize_experiment(self, study: optuna.study.Study) -> None:\n \"\"\"Initialize an MLflow experiment with the study name.\n\n If a tracking uri has been provided, MLflow will be initialized to use it.\n\n Args:\n study: Study to be tracked in MLflow.\n \"\"\"\n\n # This sets the `tracking_uri` for MLflow.\n if self._tracking_uri is not None:\n mlflow.set_tracking_uri(self._tracking_uri)\n\n if self._create_experiment:\n mlflow.set_experiment(study.study_name)\n\n def _set_tags(self, trial: optuna.trial.FrozenTrial, study: optuna.study.Study) -> None:\n \"\"\"Sets the Optuna tags for the current MLflow run.\n\n Args:\n trial: Trial to be tracked.\n study: Study to be tracked.\n \"\"\"\n\n tags: Dict[str, Union[str, List[str]]] = {}\n tags[\"number\"] = str(trial.number)\n tags[\"datetime_start\"] = str(trial.datetime_start)\n\n tags[\"datetime_complete\"] = str(trial.datetime_complete)\n\n # Set trial state.\n if trial.state.is_finished():\n tags[\"state\"] = trial.state.name\n\n # Set study directions.\n directions = [d.name for d in study.directions]\n tags[\"direction\"] = directions if len(directions) != 1 else directions[0]\n\n distributions = {(k + \"_distribution\"): str(v) for (k, v) in trial.distributions.items()}\n tags.update(distributions)\n\n if self._tag_trial_user_attrs:\n tags.update(trial.user_attrs)\n\n if self._tag_study_user_attrs:\n tags.update(study.user_attrs)\n\n # This is a temporary fix on Optuna side. It avoids an error with user\n # attributes that are too long. It should be fixed on MLflow side later.\n # When it is fixed on MLflow side this codeblock can be removed.\n # see https://github.com/optuna/optuna/issues/1340\n # see https://github.com/mlflow/mlflow/issues/2931\n for key, value in tags.items():\n value = str(value) # make sure it is a string\n max_val_length = mlflow.utils.validation.MAX_TAG_VAL_LENGTH\n if len(value) > max_val_length:\n tags[key] = \"{}...\".format(value[: max_val_length - 3])\n\n mlflow.set_tags(tags)\n\n def _log_metrics(self, values: Optional[List[float]]) -> None:\n \"\"\"Log the trial results as metrics to MLflow.\n\n Args:\n values: Results of a trial.\n \"\"\"\n if values is None:\n return\n\n if isinstance(self._metric_name, str):\n if len(values) > 1:\n # Broadcast default name for multi-objective optimization.\n names = [\"{}_{}\".format(self._metric_name, i) for i in range(len(values))]\n\n else:\n names = [self._metric_name]\n\n else:\n if len(self._metric_name) != len(values):\n raise ValueError(\n \"Running multi-objective optimization \"\n \"with {} objective values, but {} names specified. \"\n \"Match objective values and names, or use default broadcasting.\".format(\n len(values), len(self._metric_name)\n )\n )\n\n else:\n names = [*self._metric_name]\n\n metrics = {name: val for name, val in zip(names, values)}\n mlflow.log_metrics(metrics)\n\n @staticmethod\n def _log_params(params: Dict[str, Any]) -> None:\n \"\"\"Log the parameters of the trial to MLflow.\n\n Args:\n params: Trial params.\n \"\"\"\n mlflow.log_params(params)\n", "path": "optuna/integration/mlflow.py"}]} | 3,993 | 740 |
gh_patches_debug_11769 | rasdani/github-patches | git_diff | apache__tvm-2119 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CONTRIB] NNPack Test Flaky
http://ci.tvm.ai:8080/job/tvm/job/PR-2103/1/consoleText
cc @ajtulloch can you take a look?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/tvm/contrib/nnpack.py`
Content:
```
1 """External function interface to NNPACK libraroes."""
2 from __future__ import absolute_import as _abs
3
4 from .. import api as _api
5 from .. import intrin as _intrin
6 from .._ffi.function import _init_api
7
8 def config(nthreads):
9 """Configure the nnpack library.
10
11 Parameters
12 ----------
13 nthreads : int
14 The threads number of nnpack thread pool, must be a nonnegative.
15
16 """
17 _Config(nthreads)
18
19 def fully_connected_inference(lhs, rhs, nthreads=1):
20 """Create an extern op that compute fully connected of 1D tensor lhs and
21 2D tensor rhs with nnpack.
22
23 Parameters
24 ----------
25 lhs : Tensor
26 lhs 1D array input[input_channels] of FP32 elements
27 rhs : Tensor
28 lhs 2D matrix kernel[output_channels][input_channels] of FP32 elements
29
30 Returns
31 -------
32 C : Tensor
33 lhs 1D array out[output_channels] of FP32 elements.
34 """
35 m = rhs.shape[0]
36 return _api.extern(
37 (m, ), [lhs, rhs],
38 lambda ins, outs: _intrin.call_packed(
39 "tvm.contrib.nnpack.fully_connected_inference",
40 ins[0], ins[1], outs[0], nthreads), name="C")
41
42 def fully_connected_output(lhs, rhs, nthreads=1):
43 """Create an extern op that compute fully connected of 2D tensor lhs and
44 2D tensor rhs with nnpack.
45
46 Parameters
47 ----------
48 lhs : Tensor
49 lhs 2D matrix input[batch_size][input_channels] of FP32 elements
50 rhs : Tensor
51 lhs 2D matrix kernel[output_channels][input_channels] of FP32 elements
52
53 Returns
54 -------
55 C : Tensor
56 lhs 2D array out[batch_size][output_channels] of FP32 elements.
57 """
58 n = lhs.shape[0]
59 m = rhs.shape[0]
60 return _api.extern(
61 (n, m), [lhs, rhs],
62 lambda ins, outs: _intrin.call_packed(
63 "tvm.contrib.nnpack.fully_connected_output",
64 ins[0], ins[1], outs[0], nthreads), name="C")
65
66
67 class ConvolutionAlgorithm:
68 AUTO = 0
69 FFT_8x8 = 1
70 FFT_16x16 = 2
71 WT_8x8 = 3
72 IMPLICIT_GEMM = 4
73 DIRECT = 5
74 WT_8x8_FP16 = 6
75
76
77 class ConvolutionTransformStrategy:
78 COMPUTE = 1
79 PRECOMPUTE = 2
80
81
82 def convolution_inference(
83 data, kernel, bias, padding, stride, nthreads=1,
84 algorithm=ConvolutionAlgorithm.AUTO):
85 """Create an extern op to do inference convolution of 4D tensor data and
86 4D tensor kernel and 1D tensor bias with nnpack.
87
88 Parameters
89 ----------
90 data : Tensor
91 data 4D tensor input[batch][input_channels][input_height][input_width] of
92 FP32 elements.
93 kernel : Tensor
94 kernel 4D tensor kernel[output_channels][input_channels][kernel_height]
95 [kernel_width] of FP32 elements.
96 bias : Tensor
97 bias 1D array bias[output_channels][input_channels][kernel_height]
98 [kernel_width] of FP32 elements.
99 padding : list
100 padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],
101 which indicates the padding around the feature map.
102 stride : list
103 stride A 2-dim list of [stride_height, stride_width], which indicates
104 the stride.
105
106 Returns
107 -------
108 output : Tensor
109 output 4D tensor output[batch][output_channels][output_height][output_width]
110 of FP32 elements.
111 """
112
113 assert isinstance(padding, list) and len(padding) == 4
114 assert isinstance(stride, list) and len(stride) == 2
115 batch, _, input_height, input_width = data.shape
116 output_channels, _, kernel_height, kernel_width = kernel.shape
117 output_height = (input_height + padding[0] + padding[1] - kernel_height) / stride[0] + 1
118 output_width = (input_width + padding[0] + padding[1] - kernel_width) / stride[1] + 1
119
120 return _api.extern(
121 (batch, output_channels, output_height, output_width),
122 [data, kernel, bias] if bias is not None else [data, kernel],
123 lambda ins, outs: _intrin.call_packed(
124 "tvm.contrib.nnpack.convolution_inference",
125 ins[0],
126 ins[1],
127 ins[2] if bias is not None else 0,
128 outs[0], padding[0], padding[1], padding[2], padding[3],
129 stride[0], stride[1], nthreads, algorithm), name="C")
130
131 def convolution_inference_without_weight_transform(
132 data, transformed_kernel, bias, padding, stride, nthreads=1,
133 algorithm=ConvolutionAlgorithm.AUTO):
134 """Create an extern op to do inference convolution of 4D tensor data and
135 4D pre-transformed tensor kernel and 1D tensor bias with nnpack.
136
137 Parameters
138 ----------
139 data : Tensor
140 data 4D tensor input[batch][input_channels][input_height][input_width] of
141 FP32 elements.
142 transformed_kernel : Tensor
143 transformed_kernel 4D tensor kernel[output_channels][input_channels][tile]
144 [tile] of FP32 elements.
145 bias : Tensor
146 bias 1D array bias[output_channels][input_channels][kernel_height]
147 [kernel_width] of FP32 elements.
148 padding : list
149 padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],
150 which indicates the padding around the feature map.
151 stride : list
152 stride A 2-dim list of [stride_height, stride_width], which indicates
153 the stride.
154
155 Returns
156 -------
157 output : Tensor
158 output 4D tensor output[batch][output_channels][output_height][output_width]
159 of FP32 elements.
160 """
161
162 assert algorithm in (ConvolutionAlgorithm.WT_8x8,
163 ConvolutionAlgorithm.WT_8x8_FP16)
164 assert isinstance(padding, list) and len(padding) == 4
165 assert isinstance(stride, list) and len(stride) == 2
166 batch, _, input_height, input_width = data.shape
167 output_channels, _, _, _ = transformed_kernel.shape
168 kernel_height, kernel_width = (3, 3)
169 output_height = (input_height + padding[0] + padding[1] - kernel_height) / stride[0] + 1
170 output_width = (input_width + padding[0] + padding[1] - kernel_width) / stride[1] + 1
171
172 return _api.extern(
173 (batch, output_channels, output_height, output_width),
174 [data, transformed_kernel, bias] if bias is not None else [data, transformed_kernel],
175 lambda ins, outs: _intrin.call_packed(
176 "tvm.contrib.nnpack.convolution_inference_without_weight_transform",
177 ins[0],
178 ins[1],
179 ins[2] if bias is not None else 0,
180 outs[0], padding[0], padding[1], padding[2], padding[3],
181 stride[0], stride[1], nthreads, algorithm), name="C")
182
183 def convolution_inference_weight_transform(
184 kernel, nthreads=1,
185 algorithm=ConvolutionAlgorithm.AUTO):
186 """Create an extern op to do inference convolution of 3D tensor data and
187 4D tensor kernel and 1D tensor bias with nnpack.
188
189 Parameters
190 ----------
191 kernel : Tensor
192 kernel 4D tensor kernel[output_channels][input_channels][kernel_height]
193 [kernel_width] of FP32 elements.
194
195 Returns
196 -------
197 output : Tensor
198 output 4D tensor output[output_channels][input_channels][tile][tile]
199 of FP32 elements.
200 """
201 assert algorithm in (ConvolutionAlgorithm.WT_8x8, ConvolutionAlgorithm.WT_8x8_FP16)
202 output_channels, input_channels, _, _ = kernel.shape
203
204 transform_tile_size = 8
205 return _api.extern(
206 (output_channels, input_channels, transform_tile_size, transform_tile_size),
207 [kernel],
208 lambda ins, outs: _intrin.call_packed(
209 "tvm.contrib.nnpack.convolution_inference_weight_transform",
210 ins[0], outs[0], nthreads, algorithm), name="transform_kernel")
211
212 def convolution_output(data, kernel, bias, padding, nthreads=1):
213 """Create an extern op to compute convolution of 4D tensor data and
214 4D tensor kernel and 1D tensor bias with nnpack.
215
216 Parameters
217 ----------
218 data : Tensor
219 data 4D tensor input[batch_size][input_channels][input_height]
220 [input_width] of FP32 elements.
221 kernel : Tensor
222 kernel 4D tensor kernel[output_channels][input_channels][kernel_height]
223 [kernel_width] of FP32 elements.
224 bias : Tensor
225 bias 1D array bias[output_channels][input_channels][kernel_height]
226 [kernel_width] of FP32 elements.
227 padding : list
228 padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],
229 which indicates the padding around the feature map.
230
231 Returns
232 -------
233 output : Tensor
234 output 4D tensor output[batch_size][output_channels][output_height]
235 [output_width] of FP32 elements.
236 """
237
238 assert isinstance(padding, list) and len(padding) == 4
239 batch, _, input_height, input_width = data.shape
240 output_channels, _, kernel_height, kernel_width = kernel.shape
241 output_height = (input_height + padding[0] + padding[1] - kernel_height) + 1
242 output_width = (input_width + padding[0] + padding[1] - kernel_width) + 1
243
244 return _api.extern(
245 (batch, output_channels, output_height, output_width), [data, kernel, bias],
246 lambda ins, outs: _intrin.call_packed(
247 "tvm.contrib.nnpack.convolution_output", ins[0], ins[1], ins[2],
248 outs[0], padding[0], padding[1], padding[2], padding[3], nthreads), name="C")
249
250
251 _init_api("tvm.contrib.nnpack")
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/tvm/contrib/nnpack.py b/python/tvm/contrib/nnpack.py
--- a/python/tvm/contrib/nnpack.py
+++ b/python/tvm/contrib/nnpack.py
@@ -5,16 +5,11 @@
from .. import intrin as _intrin
from .._ffi.function import _init_api
-def config(nthreads):
- """Configure the nnpack library.
-
- Parameters
- ----------
- nthreads : int
- The threads number of nnpack thread pool, must be a nonnegative.
-
+def is_available():
+ """Check whether NNPACK is available, that is, `nnp_initialize()`
+ returns `nnp_status_success`.
"""
- _Config(nthreads)
+ return _initialize() == 0
def fully_connected_inference(lhs, rhs, nthreads=1):
"""Create an extern op that compute fully connected of 1D tensor lhs and
| {"golden_diff": "diff --git a/python/tvm/contrib/nnpack.py b/python/tvm/contrib/nnpack.py\n--- a/python/tvm/contrib/nnpack.py\n+++ b/python/tvm/contrib/nnpack.py\n@@ -5,16 +5,11 @@\n from .. import intrin as _intrin\n from .._ffi.function import _init_api\n \n-def config(nthreads):\n- \"\"\"Configure the nnpack library.\n-\n- Parameters\n- ----------\n- nthreads : int\n- The threads number of nnpack thread pool, must be a nonnegative.\n-\n+def is_available():\n+ \"\"\"Check whether NNPACK is available, that is, `nnp_initialize()`\n+ returns `nnp_status_success`.\n \"\"\"\n- _Config(nthreads)\n+ return _initialize() == 0\n \n def fully_connected_inference(lhs, rhs, nthreads=1):\n \"\"\"Create an extern op that compute fully connected of 1D tensor lhs and\n", "issue": "[CONTRIB] NNPack Test Flaky\nhttp://ci.tvm.ai:8080/job/tvm/job/PR-2103/1/consoleText\r\n\r\ncc @ajtulloch can you take a look?\n", "before_files": [{"content": "\"\"\"External function interface to NNPACK libraroes.\"\"\"\nfrom __future__ import absolute_import as _abs\n\nfrom .. import api as _api\nfrom .. import intrin as _intrin\nfrom .._ffi.function import _init_api\n\ndef config(nthreads):\n \"\"\"Configure the nnpack library.\n\n Parameters\n ----------\n nthreads : int\n The threads number of nnpack thread pool, must be a nonnegative.\n\n \"\"\"\n _Config(nthreads)\n\ndef fully_connected_inference(lhs, rhs, nthreads=1):\n \"\"\"Create an extern op that compute fully connected of 1D tensor lhs and\n 2D tensor rhs with nnpack.\n\n Parameters\n ----------\n lhs : Tensor\n lhs 1D array input[input_channels] of FP32 elements\n rhs : Tensor\n lhs 2D matrix kernel[output_channels][input_channels] of FP32 elements\n\n Returns\n -------\n C : Tensor\n lhs 1D array out[output_channels] of FP32 elements.\n \"\"\"\n m = rhs.shape[0]\n return _api.extern(\n (m, ), [lhs, rhs],\n lambda ins, outs: _intrin.call_packed(\n \"tvm.contrib.nnpack.fully_connected_inference\",\n ins[0], ins[1], outs[0], nthreads), name=\"C\")\n\ndef fully_connected_output(lhs, rhs, nthreads=1):\n \"\"\"Create an extern op that compute fully connected of 2D tensor lhs and\n 2D tensor rhs with nnpack.\n\n Parameters\n ----------\n lhs : Tensor\n lhs 2D matrix input[batch_size][input_channels] of FP32 elements\n rhs : Tensor\n lhs 2D matrix kernel[output_channels][input_channels] of FP32 elements\n\n Returns\n -------\n C : Tensor\n lhs 2D array out[batch_size][output_channels] of FP32 elements.\n \"\"\"\n n = lhs.shape[0]\n m = rhs.shape[0]\n return _api.extern(\n (n, m), [lhs, rhs],\n lambda ins, outs: _intrin.call_packed(\n \"tvm.contrib.nnpack.fully_connected_output\",\n ins[0], ins[1], outs[0], nthreads), name=\"C\")\n\n\nclass ConvolutionAlgorithm:\n AUTO = 0\n FFT_8x8 = 1\n FFT_16x16 = 2\n WT_8x8 = 3\n IMPLICIT_GEMM = 4\n DIRECT = 5\n WT_8x8_FP16 = 6\n\n\nclass ConvolutionTransformStrategy:\n COMPUTE = 1\n PRECOMPUTE = 2\n\n\ndef convolution_inference(\n data, kernel, bias, padding, stride, nthreads=1,\n algorithm=ConvolutionAlgorithm.AUTO):\n \"\"\"Create an extern op to do inference convolution of 4D tensor data and\n 4D tensor kernel and 1D tensor bias with nnpack.\n\n Parameters\n ----------\n data : Tensor\n data 4D tensor input[batch][input_channels][input_height][input_width] of\n FP32 elements.\n kernel : Tensor\n kernel 4D tensor kernel[output_channels][input_channels][kernel_height]\n [kernel_width] of FP32 elements.\n bias : Tensor\n bias 1D array bias[output_channels][input_channels][kernel_height]\n [kernel_width] of FP32 elements.\n padding : list\n padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],\n which indicates the padding around the feature map.\n stride : list\n stride A 2-dim list of [stride_height, stride_width], which indicates\n the stride.\n\n Returns\n -------\n output : Tensor\n output 4D tensor output[batch][output_channels][output_height][output_width]\n of FP32 elements.\n \"\"\"\n\n assert isinstance(padding, list) and len(padding) == 4\n assert isinstance(stride, list) and len(stride) == 2\n batch, _, input_height, input_width = data.shape\n output_channels, _, kernel_height, kernel_width = kernel.shape\n output_height = (input_height + padding[0] + padding[1] - kernel_height) / stride[0] + 1\n output_width = (input_width + padding[0] + padding[1] - kernel_width) / stride[1] + 1\n\n return _api.extern(\n (batch, output_channels, output_height, output_width),\n [data, kernel, bias] if bias is not None else [data, kernel],\n lambda ins, outs: _intrin.call_packed(\n \"tvm.contrib.nnpack.convolution_inference\",\n ins[0],\n ins[1],\n ins[2] if bias is not None else 0,\n outs[0], padding[0], padding[1], padding[2], padding[3],\n stride[0], stride[1], nthreads, algorithm), name=\"C\")\n\ndef convolution_inference_without_weight_transform(\n data, transformed_kernel, bias, padding, stride, nthreads=1,\n algorithm=ConvolutionAlgorithm.AUTO):\n \"\"\"Create an extern op to do inference convolution of 4D tensor data and\n 4D pre-transformed tensor kernel and 1D tensor bias with nnpack.\n\n Parameters\n ----------\n data : Tensor\n data 4D tensor input[batch][input_channels][input_height][input_width] of\n FP32 elements.\n transformed_kernel : Tensor\n transformed_kernel 4D tensor kernel[output_channels][input_channels][tile]\n [tile] of FP32 elements.\n bias : Tensor\n bias 1D array bias[output_channels][input_channels][kernel_height]\n [kernel_width] of FP32 elements.\n padding : list\n padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],\n which indicates the padding around the feature map.\n stride : list\n stride A 2-dim list of [stride_height, stride_width], which indicates\n the stride.\n\n Returns\n -------\n output : Tensor\n output 4D tensor output[batch][output_channels][output_height][output_width]\n of FP32 elements.\n \"\"\"\n\n assert algorithm in (ConvolutionAlgorithm.WT_8x8,\n ConvolutionAlgorithm.WT_8x8_FP16)\n assert isinstance(padding, list) and len(padding) == 4\n assert isinstance(stride, list) and len(stride) == 2\n batch, _, input_height, input_width = data.shape\n output_channels, _, _, _ = transformed_kernel.shape\n kernel_height, kernel_width = (3, 3)\n output_height = (input_height + padding[0] + padding[1] - kernel_height) / stride[0] + 1\n output_width = (input_width + padding[0] + padding[1] - kernel_width) / stride[1] + 1\n\n return _api.extern(\n (batch, output_channels, output_height, output_width),\n [data, transformed_kernel, bias] if bias is not None else [data, transformed_kernel],\n lambda ins, outs: _intrin.call_packed(\n \"tvm.contrib.nnpack.convolution_inference_without_weight_transform\",\n ins[0],\n ins[1],\n ins[2] if bias is not None else 0,\n outs[0], padding[0], padding[1], padding[2], padding[3],\n stride[0], stride[1], nthreads, algorithm), name=\"C\")\n\ndef convolution_inference_weight_transform(\n kernel, nthreads=1,\n algorithm=ConvolutionAlgorithm.AUTO):\n \"\"\"Create an extern op to do inference convolution of 3D tensor data and\n 4D tensor kernel and 1D tensor bias with nnpack.\n\n Parameters\n ----------\n kernel : Tensor\n kernel 4D tensor kernel[output_channels][input_channels][kernel_height]\n [kernel_width] of FP32 elements.\n\n Returns\n -------\n output : Tensor\n output 4D tensor output[output_channels][input_channels][tile][tile]\n of FP32 elements.\n \"\"\"\n assert algorithm in (ConvolutionAlgorithm.WT_8x8, ConvolutionAlgorithm.WT_8x8_FP16)\n output_channels, input_channels, _, _ = kernel.shape\n\n transform_tile_size = 8\n return _api.extern(\n (output_channels, input_channels, transform_tile_size, transform_tile_size),\n [kernel],\n lambda ins, outs: _intrin.call_packed(\n \"tvm.contrib.nnpack.convolution_inference_weight_transform\",\n ins[0], outs[0], nthreads, algorithm), name=\"transform_kernel\")\n\ndef convolution_output(data, kernel, bias, padding, nthreads=1):\n \"\"\"Create an extern op to compute convolution of 4D tensor data and\n 4D tensor kernel and 1D tensor bias with nnpack.\n\n Parameters\n ----------\n data : Tensor\n data 4D tensor input[batch_size][input_channels][input_height]\n [input_width] of FP32 elements.\n kernel : Tensor\n kernel 4D tensor kernel[output_channels][input_channels][kernel_height]\n [kernel_width] of FP32 elements.\n bias : Tensor\n bias 1D array bias[output_channels][input_channels][kernel_height]\n [kernel_width] of FP32 elements.\n padding : list\n padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],\n which indicates the padding around the feature map.\n\n Returns\n -------\n output : Tensor\n output 4D tensor output[batch_size][output_channels][output_height]\n [output_width] of FP32 elements.\n \"\"\"\n\n assert isinstance(padding, list) and len(padding) == 4\n batch, _, input_height, input_width = data.shape\n output_channels, _, kernel_height, kernel_width = kernel.shape\n output_height = (input_height + padding[0] + padding[1] - kernel_height) + 1\n output_width = (input_width + padding[0] + padding[1] - kernel_width) + 1\n\n return _api.extern(\n (batch, output_channels, output_height, output_width), [data, kernel, bias],\n lambda ins, outs: _intrin.call_packed(\n \"tvm.contrib.nnpack.convolution_output\", ins[0], ins[1], ins[2],\n outs[0], padding[0], padding[1], padding[2], padding[3], nthreads), name=\"C\")\n\n\n_init_api(\"tvm.contrib.nnpack\")\n", "path": "python/tvm/contrib/nnpack.py"}], "after_files": [{"content": "\"\"\"External function interface to NNPACK libraroes.\"\"\"\nfrom __future__ import absolute_import as _abs\n\nfrom .. import api as _api\nfrom .. import intrin as _intrin\nfrom .._ffi.function import _init_api\n\ndef is_available():\n \"\"\"Check whether NNPACK is available, that is, `nnp_initialize()`\n returns `nnp_status_success`.\n \"\"\"\n return _initialize() == 0\n\ndef fully_connected_inference(lhs, rhs, nthreads=1):\n \"\"\"Create an extern op that compute fully connected of 1D tensor lhs and\n 2D tensor rhs with nnpack.\n\n Parameters\n ----------\n lhs : Tensor\n lhs 1D array input[input_channels] of FP32 elements\n rhs : Tensor\n lhs 2D matrix kernel[output_channels][input_channels] of FP32 elements\n\n Returns\n -------\n C : Tensor\n lhs 1D array out[output_channels] of FP32 elements.\n \"\"\"\n m = rhs.shape[0]\n return _api.extern(\n (m, ), [lhs, rhs],\n lambda ins, outs: _intrin.call_packed(\n \"tvm.contrib.nnpack.fully_connected_inference\",\n ins[0], ins[1], outs[0], nthreads), name=\"C\")\n\ndef fully_connected_output(lhs, rhs, nthreads=1):\n \"\"\"Create an extern op that compute fully connected of 2D tensor lhs and\n 2D tensor rhs with nnpack.\n\n Parameters\n ----------\n lhs : Tensor\n lhs 2D matrix input[batch_size][input_channels] of FP32 elements\n rhs : Tensor\n lhs 2D matrix kernel[output_channels][input_channels] of FP32 elements\n\n Returns\n -------\n C : Tensor\n lhs 2D array out[batch_size][output_channels] of FP32 elements.\n \"\"\"\n n = lhs.shape[0]\n m = rhs.shape[0]\n return _api.extern(\n (n, m), [lhs, rhs],\n lambda ins, outs: _intrin.call_packed(\n \"tvm.contrib.nnpack.fully_connected_output\",\n ins[0], ins[1], outs[0], nthreads), name=\"C\")\n\n\nclass ConvolutionAlgorithm:\n AUTO = 0\n FFT_8x8 = 1\n FFT_16x16 = 2\n WT_8x8 = 3\n IMPLICIT_GEMM = 4\n DIRECT = 5\n WT_8x8_FP16 = 6\n\n\nclass ConvolutionTransformStrategy:\n COMPUTE = 1\n PRECOMPUTE = 2\n\n\ndef convolution_inference(\n data, kernel, bias, padding, stride, nthreads=1,\n algorithm=ConvolutionAlgorithm.AUTO):\n \"\"\"Create an extern op to do inference convolution of 4D tensor data and\n 4D tensor kernel and 1D tensor bias with nnpack.\n\n Parameters\n ----------\n data : Tensor\n data 4D tensor input[batch][input_channels][input_height][input_width] of\n FP32 elements.\n kernel : Tensor\n kernel 4D tensor kernel[output_channels][input_channels][kernel_height]\n [kernel_width] of FP32 elements.\n bias : Tensor\n bias 1D array bias[output_channels][input_channels][kernel_height]\n [kernel_width] of FP32 elements.\n padding : list\n padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],\n which indicates the padding around the feature map.\n stride : list\n stride A 2-dim list of [stride_height, stride_width], which indicates\n the stride.\n\n Returns\n -------\n output : Tensor\n output 4D tensor output[batch][output_channels][output_height][output_width]\n of FP32 elements.\n \"\"\"\n\n assert isinstance(padding, list) and len(padding) == 4\n assert isinstance(stride, list) and len(stride) == 2\n batch, _, input_height, input_width = data.shape\n output_channels, _, kernel_height, kernel_width = kernel.shape\n output_height = (input_height + padding[0] + padding[1] - kernel_height) / stride[0] + 1\n output_width = (input_width + padding[0] + padding[1] - kernel_width) / stride[1] + 1\n\n return _api.extern(\n (batch, output_channels, output_height, output_width),\n [data, kernel, bias] if bias is not None else [data, kernel],\n lambda ins, outs: _intrin.call_packed(\n \"tvm.contrib.nnpack.convolution_inference\",\n ins[0],\n ins[1],\n ins[2] if bias is not None else 0,\n outs[0], padding[0], padding[1], padding[2], padding[3],\n stride[0], stride[1], nthreads, algorithm), name=\"C\")\n\ndef convolution_inference_without_weight_transform(\n data, transformed_kernel, bias, padding, stride, nthreads=1,\n algorithm=ConvolutionAlgorithm.AUTO):\n \"\"\"Create an extern op to do inference convolution of 4D tensor data and\n 4D pre-transformed tensor kernel and 1D tensor bias with nnpack.\n\n Parameters\n ----------\n data : Tensor\n data 4D tensor input[batch][input_channels][input_height][input_width] of\n FP32 elements.\n transformed_kernel : Tensor\n transformed_kernel 4D tensor kernel[output_channels][input_channels][tile]\n [tile] of FP32 elements.\n bias : Tensor\n bias 1D array bias[output_channels][input_channels][kernel_height]\n [kernel_width] of FP32 elements.\n padding : list\n padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],\n which indicates the padding around the feature map.\n stride : list\n stride A 2-dim list of [stride_height, stride_width], which indicates\n the stride.\n\n Returns\n -------\n output : Tensor\n output 4D tensor output[batch][output_channels][output_height][output_width]\n of FP32 elements.\n \"\"\"\n\n assert algorithm in (ConvolutionAlgorithm.WT_8x8,\n ConvolutionAlgorithm.WT_8x8_FP16)\n assert isinstance(padding, list) and len(padding) == 4\n assert isinstance(stride, list) and len(stride) == 2\n batch, _, input_height, input_width = data.shape\n output_channels, _, _, _ = transformed_kernel.shape\n kernel_height, kernel_width = (3, 3)\n output_height = (input_height + padding[0] + padding[1] - kernel_height) / stride[0] + 1\n output_width = (input_width + padding[0] + padding[1] - kernel_width) / stride[1] + 1\n\n return _api.extern(\n (batch, output_channels, output_height, output_width),\n [data, transformed_kernel, bias] if bias is not None else [data, transformed_kernel],\n lambda ins, outs: _intrin.call_packed(\n \"tvm.contrib.nnpack.convolution_inference_without_weight_transform\",\n ins[0],\n ins[1],\n ins[2] if bias is not None else 0,\n outs[0], padding[0], padding[1], padding[2], padding[3],\n stride[0], stride[1], nthreads, algorithm), name=\"C\")\n\ndef convolution_inference_weight_transform(\n kernel, nthreads=1,\n algorithm=ConvolutionAlgorithm.AUTO):\n \"\"\"Create an extern op to do inference convolution of 3D tensor data and\n 4D tensor kernel and 1D tensor bias with nnpack.\n\n Parameters\n ----------\n kernel : Tensor\n kernel 4D tensor kernel[output_channels][input_channels][kernel_height]\n [kernel_width] of FP32 elements.\n\n Returns\n -------\n output : Tensor\n output 4D tensor output[output_channels][input_channels][tile][tile]\n of FP32 elements.\n \"\"\"\n assert algorithm in (ConvolutionAlgorithm.WT_8x8, ConvolutionAlgorithm.WT_8x8_FP16)\n output_channels, input_channels, _, _ = kernel.shape\n\n transform_tile_size = 8\n return _api.extern(\n (output_channels, input_channels, transform_tile_size, transform_tile_size),\n [kernel],\n lambda ins, outs: _intrin.call_packed(\n \"tvm.contrib.nnpack.convolution_inference_weight_transform\",\n ins[0], outs[0], nthreads, algorithm), name=\"transform_kernel\")\n\ndef convolution_output(data, kernel, bias, padding, nthreads=1):\n \"\"\"Create an extern op to compute convolution of 4D tensor data and\n 4D tensor kernel and 1D tensor bias with nnpack.\n\n Parameters\n ----------\n data : Tensor\n data 4D tensor input[batch_size][input_channels][input_height]\n [input_width] of FP32 elements.\n kernel : Tensor\n kernel 4D tensor kernel[output_channels][input_channels][kernel_height]\n [kernel_width] of FP32 elements.\n bias : Tensor\n bias 1D array bias[output_channels][input_channels][kernel_height]\n [kernel_width] of FP32 elements.\n padding : list\n padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],\n which indicates the padding around the feature map.\n\n Returns\n -------\n output : Tensor\n output 4D tensor output[batch_size][output_channels][output_height]\n [output_width] of FP32 elements.\n \"\"\"\n\n assert isinstance(padding, list) and len(padding) == 4\n batch, _, input_height, input_width = data.shape\n output_channels, _, kernel_height, kernel_width = kernel.shape\n output_height = (input_height + padding[0] + padding[1] - kernel_height) + 1\n output_width = (input_width + padding[0] + padding[1] - kernel_width) + 1\n\n return _api.extern(\n (batch, output_channels, output_height, output_width), [data, kernel, bias],\n lambda ins, outs: _intrin.call_packed(\n \"tvm.contrib.nnpack.convolution_output\", ins[0], ins[1], ins[2],\n outs[0], padding[0], padding[1], padding[2], padding[3], nthreads), name=\"C\")\n\n\n_init_api(\"tvm.contrib.nnpack\")\n", "path": "python/tvm/contrib/nnpack.py"}]} | 3,342 | 211 |
gh_patches_debug_27919 | rasdani/github-patches | git_diff | saulpw__visidata-1059 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[unfurl-col] unfurling a column with TypedWrapper halts unfurl
**Small description**
When unfurling a column which has a TypedWrapper a sheet loading halt occurs
**Expected result**
The same response as whatever `options.unfurl_empty` would do for a row without a list.
**Actual result with screenshot**
AttributeError: 'TypedWrapper' object has no attribute 'xyz'

If you get an unexpected error, please include the full stack trace that you get with `Ctrl-E`.
**Steps to reproduce with sample data and a .vd**
`echo '[{"a":1,"d":{"b":[1,2,3]}},{"a":2,"d":{"c":[1,2,3]}},{"a":3,"d":{"b":[1,2,3]}}]' | vd -f json`
```
sheet col row longname input keystrokes comment
open-file - o
_ d addcol-expr curcol['b'] = create new column from Python expression, with column names as variables
_ curcol_b_ unfurl-col row-wise expand current column of lists (e.g. [2]) or dicts (e.g. {3}) within that column
```
**Additional context**
v2.6dev
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/unfurl.py`
Content:
```
1 '''This adds the `unfurl-col` command, to unfurl a column containing iterable values, such as lists and dicts.
2 Unfurling pushes a new sheet, with each key/value pair in the unfurled column values getting its own row, with the rest of the source sheet's columns copied for each of those rows.
3
4 Note: When unfurling a column, non-iterable objects (numbers, and also strings) are treated as single-item lists, so that they too can be unfurled.
5
6 Credit to Jeremy Singer-Vine for the idea and original implementation.
7 '''
8
9 from collections.abc import Iterable, Mapping
10 from visidata import vd, Progress, Sheet, Column, ColumnItem, SettableColumn, SubColumnFunc, asyncthread, clean_to_id
11
12
13 vd.option('unfurl_empty', False, 'if unfurl includes rows for empty containers', replay=True)
14
15
16 class UnfurledSheet(Sheet):
17 @asyncthread
18 def reload(self):
19 # Copy over base sheet, using SubColumnFunc
20 self.columns = []
21 for col in self.source.columns:
22 if col is self.source_col:
23 # Replace iterable column with two columns: keys and values
24 self.cursorVisibleColIndex = len(self.columns)-1
25 self.addColumn(ColumnItem(col.name + "_key", 1))
26 self.addColumn(ColumnItem(col.name + "_value", 2))
27 else:
28 self.addColumn(SubColumnFunc(col.name, col, 0, keycol=col.keycol))
29
30 self.rows = []
31 unfurl_empty = self.options.unfurl_empty
32 for row in Progress(self.source.rows):
33 val = self.source_col.getValue(row)
34
35 if not isinstance(val, Iterable) or isinstance(val, str):
36 val = [ val ]
37
38 if isinstance(val, Mapping):
39 gen = val.items()
40 else:
41 gen = enumerate(val)
42
43 nadded = 0
44 for key, sub_value in gen:
45 new_row = [ row, key, sub_value ]
46 self.addRow(new_row)
47 nadded += 1
48
49 if unfurl_empty and not nadded:
50 self.addRow([row, None, None])
51
52 @Sheet.api
53 def unfurl_col(sheet, col):
54 clean_id = clean_to_id(col.name)
55 vs = UnfurledSheet(f"{sheet.name}_{clean_id}_unfurled", source=sheet, source_col=col)
56 return vs
57
58
59 Sheet.addCommand("zM", "unfurl-col", "vd.push(unfurl_col(cursorCol))", "row-wise expand current column of lists (e.g. [2]) or dicts (e.g. {3}) within that column")
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/visidata/unfurl.py b/visidata/unfurl.py
--- a/visidata/unfurl.py
+++ b/visidata/unfurl.py
@@ -8,12 +8,14 @@
from collections.abc import Iterable, Mapping
from visidata import vd, Progress, Sheet, Column, ColumnItem, SettableColumn, SubColumnFunc, asyncthread, clean_to_id
+from visidata import stacktrace, TypedExceptionWrapper
vd.option('unfurl_empty', False, 'if unfurl includes rows for empty containers', replay=True)
class UnfurledSheet(Sheet):
+ # rowdef: [row, key, sub_value]
@asyncthread
def reload(self):
# Copy over base sheet, using SubColumnFunc
@@ -30,7 +32,16 @@
self.rows = []
unfurl_empty = self.options.unfurl_empty
for row in Progress(self.source.rows):
- val = self.source_col.getValue(row)
+ try:
+ val = self.source_col.getValue(row)
+ except Exception as e:
+ e.stacktrace = stacktrace()
+ if unfurl_empty:
+ # TypedExceptionWrapper allows the use of z^E to see the stacktrace
+ # the exception on its own lacks clarity
+ self.addRow([row, TypedExceptionWrapper(None, exception=e), TypedExceptionWrapper(None, exception=e)])
+ else:
+ vd.exceptionCaught(e)
if not isinstance(val, Iterable) or isinstance(val, str):
val = [ val ]
| {"golden_diff": "diff --git a/visidata/unfurl.py b/visidata/unfurl.py\n--- a/visidata/unfurl.py\n+++ b/visidata/unfurl.py\n@@ -8,12 +8,14 @@\n \n from collections.abc import Iterable, Mapping\n from visidata import vd, Progress, Sheet, Column, ColumnItem, SettableColumn, SubColumnFunc, asyncthread, clean_to_id\n+from visidata import stacktrace, TypedExceptionWrapper\n \n \n vd.option('unfurl_empty', False, 'if unfurl includes rows for empty containers', replay=True)\n \n \n class UnfurledSheet(Sheet):\n+ # rowdef: [row, key, sub_value]\n @asyncthread\n def reload(self):\n # Copy over base sheet, using SubColumnFunc\n@@ -30,7 +32,16 @@\n self.rows = []\n unfurl_empty = self.options.unfurl_empty\n for row in Progress(self.source.rows):\n- val = self.source_col.getValue(row)\n+ try:\n+ val = self.source_col.getValue(row)\n+ except Exception as e:\n+ e.stacktrace = stacktrace()\n+ if unfurl_empty:\n+ # TypedExceptionWrapper allows the use of z^E to see the stacktrace\n+ # the exception on its own lacks clarity\n+ self.addRow([row, TypedExceptionWrapper(None, exception=e), TypedExceptionWrapper(None, exception=e)])\n+ else:\n+ vd.exceptionCaught(e)\n \n if not isinstance(val, Iterable) or isinstance(val, str):\n val = [ val ]\n", "issue": "[unfurl-col] unfurling a column with TypedWrapper halts unfurl\n**Small description**\r\nWhen unfurling a column which has a TypedWrapper a sheet loading halt occurs\r\n\r\n**Expected result**\r\nThe same response as whatever `options.unfurl_empty` would do for a row without a list.\r\n\r\n**Actual result with screenshot**\r\nAttributeError: 'TypedWrapper' object has no attribute 'xyz'\r\n\r\n\r\nIf you get an unexpected error, please include the full stack trace that you get with `Ctrl-E`.\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\n`echo '[{\"a\":1,\"d\":{\"b\":[1,2,3]}},{\"a\":2,\"d\":{\"c\":[1,2,3]}},{\"a\":3,\"d\":{\"b\":[1,2,3]}}]' | vd -f json`\r\n\r\n```\r\nsheet\tcol\trow\tlongname\tinput\tkeystrokes\tcomment\r\n\t\t\topen-file\t-\to\t\r\n_\td\t\taddcol-expr\tcurcol['b']\t=\tcreate new column from Python expression, with column names as variables\r\n_\tcurcol_b_\t\tunfurl-col\t\t\trow-wise expand current column of lists (e.g. [2]) or dicts (e.g. {3}) within that column\r\n\r\n```\r\n\r\n**Additional context**\r\nv2.6dev\r\n\n", "before_files": [{"content": "'''This adds the `unfurl-col` command, to unfurl a column containing iterable values, such as lists and dicts.\nUnfurling pushes a new sheet, with each key/value pair in the unfurled column values getting its own row, with the rest of the source sheet's columns copied for each of those rows.\n\nNote: When unfurling a column, non-iterable objects (numbers, and also strings) are treated as single-item lists, so that they too can be unfurled.\n\nCredit to Jeremy Singer-Vine for the idea and original implementation.\n'''\n\nfrom collections.abc import Iterable, Mapping\nfrom visidata import vd, Progress, Sheet, Column, ColumnItem, SettableColumn, SubColumnFunc, asyncthread, clean_to_id\n\n\nvd.option('unfurl_empty', False, 'if unfurl includes rows for empty containers', replay=True)\n\n\nclass UnfurledSheet(Sheet):\n @asyncthread\n def reload(self):\n # Copy over base sheet, using SubColumnFunc\n self.columns = []\n for col in self.source.columns:\n if col is self.source_col:\n # Replace iterable column with two columns: keys and values\n self.cursorVisibleColIndex = len(self.columns)-1\n self.addColumn(ColumnItem(col.name + \"_key\", 1))\n self.addColumn(ColumnItem(col.name + \"_value\", 2))\n else:\n self.addColumn(SubColumnFunc(col.name, col, 0, keycol=col.keycol))\n\n self.rows = []\n unfurl_empty = self.options.unfurl_empty\n for row in Progress(self.source.rows):\n val = self.source_col.getValue(row)\n\n if not isinstance(val, Iterable) or isinstance(val, str):\n val = [ val ]\n\n if isinstance(val, Mapping):\n gen = val.items()\n else:\n gen = enumerate(val)\n\n nadded = 0\n for key, sub_value in gen:\n new_row = [ row, key, sub_value ]\n self.addRow(new_row)\n nadded += 1\n\n if unfurl_empty and not nadded:\n self.addRow([row, None, None])\n\[email protected]\ndef unfurl_col(sheet, col):\n clean_id = clean_to_id(col.name)\n vs = UnfurledSheet(f\"{sheet.name}_{clean_id}_unfurled\", source=sheet, source_col=col)\n return vs\n\n\nSheet.addCommand(\"zM\", \"unfurl-col\", \"vd.push(unfurl_col(cursorCol))\", \"row-wise expand current column of lists (e.g. [2]) or dicts (e.g. {3}) within that column\")\n", "path": "visidata/unfurl.py"}], "after_files": [{"content": "'''This adds the `unfurl-col` command, to unfurl a column containing iterable values, such as lists and dicts.\nUnfurling pushes a new sheet, with each key/value pair in the unfurled column values getting its own row, with the rest of the source sheet's columns copied for each of those rows.\n\nNote: When unfurling a column, non-iterable objects (numbers, and also strings) are treated as single-item lists, so that they too can be unfurled.\n\nCredit to Jeremy Singer-Vine for the idea and original implementation.\n'''\n\nfrom collections.abc import Iterable, Mapping\nfrom visidata import vd, Progress, Sheet, Column, ColumnItem, SettableColumn, SubColumnFunc, asyncthread, clean_to_id\nfrom visidata import stacktrace, TypedExceptionWrapper\n\n\nvd.option('unfurl_empty', False, 'if unfurl includes rows for empty containers', replay=True)\n\n\nclass UnfurledSheet(Sheet):\n # rowdef: [row, key, sub_value]\n @asyncthread\n def reload(self):\n # Copy over base sheet, using SubColumnFunc\n self.columns = []\n for col in self.source.columns:\n if col is self.source_col:\n # Replace iterable column with two columns: keys and values\n self.cursorVisibleColIndex = len(self.columns)-1\n self.addColumn(ColumnItem(col.name + \"_key\", 1))\n self.addColumn(ColumnItem(col.name + \"_value\", 2))\n else:\n self.addColumn(SubColumnFunc(col.name, col, 0, keycol=col.keycol))\n\n self.rows = []\n unfurl_empty = self.options.unfurl_empty\n for row in Progress(self.source.rows):\n try:\n val = self.source_col.getValue(row)\n except Exception as e:\n e.stacktrace = stacktrace()\n if unfurl_empty:\n # TypedExceptionWrapper allows the use of z^E to see the stacktrace\n # the exception on its own lacks clarity\n self.addRow([row, TypedExceptionWrapper(None, exception=e), TypedExceptionWrapper(None, exception=e)])\n else:\n vd.exceptionCaught(e)\n\n if not isinstance(val, Iterable) or isinstance(val, str):\n val = [ val ]\n\n if isinstance(val, Mapping):\n gen = val.items()\n else:\n gen = enumerate(val)\n\n nadded = 0\n for key, sub_value in gen:\n new_row = [ row, key, sub_value ]\n self.addRow(new_row)\n nadded += 1\n\n if unfurl_empty and not nadded:\n self.addRow([row, None, None])\n\[email protected]\ndef unfurl_col(sheet, col):\n clean_id = clean_to_id(col.name)\n vs = UnfurledSheet(f\"{sheet.name}_{clean_id}_unfurled\", source=sheet, source_col=col)\n return vs\n\n\nSheet.addCommand(\"zM\", \"unfurl-col\", \"vd.push(unfurl_col(cursorCol))\", \"row-wise expand current column of lists (e.g. [2]) or dicts (e.g. {3}) within that column\")\n", "path": "visidata/unfurl.py"}]} | 1,281 | 348 |
gh_patches_debug_11658 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1890 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IndexError when "url" not found in args or kwargs.
### Overview
I've found an issue in line 46 of `call()` method in `AioHttpClientInstrumentation(...)` class.
https://github.com/elastic/apm-agent-python/blob/da93e7af448abcac367d216e2d20a584051f6e50/elasticapm/instrumentation/packages/asyncio/aiohttp_client.py#L44-L47
I'm getting an `IndexError` exception due to lack or "url" in both kwargs and in args[1]. The the reason is that the argument containing urls is called "str_or_url".
https://github.com/aio-libs/aiohttp/blob/4b59d55e9e79f5a0b1932d6dc9f6b12a33d19266/aiohttp/client.py#L325-L328
By default the code is running fine, but this issue will appear in cases where someone will try to use `ClientSession._request()` method directly AND use keyword-arguments.
### How to recreate the bug?
This is a general example on how to recreate the bug. Lets assume that somewhere in my code I want to connect to some external http rest-api service using aiohttp library. I'll be using custom made session object based on ClientSession object from aiohttp library.
```python
from aiohttp import ClientSession, ClientResponse
class CustomSession(ClientSession):
async def _request(self, method: str, str_or_url: StrOrURL, **kwargs: Any) -> ClientResponse:
# put some extra code here, like add retry functionality
client_response = await super()._request(method=method, str_or_url=url, **kwargs) # << ISSUE IS HERE
return client_response
```
the above code is valid and it works as long as there is no apm agent running as middleware. With apm agent added, the code has to be written as below:
```python
client_response = await super()._request(method, url, **kwargs)
```
### How to fix it?
Do search for both `url` and `str_or_url` in kwargs. It's simple fix that can fit in same line, I bet.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/instrumentation/packages/asyncio/aiohttp_client.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from elasticapm import async_capture_span
32 from elasticapm.conf import constants
33 from elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule
34 from elasticapm.traces import DroppedSpan, execution_context
35 from elasticapm.utils import get_host_from_url, sanitize_url
36 from elasticapm.utils.disttracing import TracingOptions
37
38
39 class AioHttpClientInstrumentation(AsyncAbstractInstrumentedModule):
40 name = "aiohttp_client"
41
42 instrument_list = [("aiohttp.client", "ClientSession._request")]
43
44 async def call(self, module, method, wrapped, instance, args, kwargs):
45 method = kwargs["method"] if "method" in kwargs else args[0]
46 url = kwargs["url"] if "url" in kwargs else args[1]
47 url = str(url)
48
49 signature = " ".join([method.upper(), get_host_from_url(url)])
50 url = sanitize_url(url)
51 transaction = execution_context.get_transaction()
52
53 async with async_capture_span(
54 signature,
55 span_type="external",
56 span_subtype="http",
57 extra={"http": {"url": url}},
58 leaf=True,
59 ) as span:
60 leaf_span = span
61 while isinstance(leaf_span, DroppedSpan):
62 leaf_span = leaf_span.parent
63
64 parent_id = leaf_span.id if leaf_span else transaction.id
65 trace_parent = transaction.trace_parent.copy_from(
66 span_id=parent_id, trace_options=TracingOptions(recorded=True)
67 )
68 headers = kwargs.get("headers") or {}
69 self._set_disttracing_headers(headers, trace_parent, transaction)
70 kwargs["headers"] = headers
71 response = await wrapped(*args, **kwargs)
72 if response:
73 if span.context:
74 span.context["http"]["status_code"] = response.status
75 span.set_success() if response.status < 400 else span.set_failure()
76 return response
77
78 def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):
79 # since we don't have a span, we set the span id to the transaction id
80 trace_parent = transaction.trace_parent.copy_from(
81 span_id=transaction.id, trace_options=TracingOptions(recorded=False)
82 )
83
84 headers = kwargs.get("headers") or {}
85 self._set_disttracing_headers(headers, trace_parent, transaction)
86 kwargs["headers"] = headers
87 return args, kwargs
88
89 def _set_disttracing_headers(self, headers, trace_parent, transaction):
90 trace_parent_str = trace_parent.to_string()
91 headers[constants.TRACEPARENT_HEADER_NAME] = trace_parent_str
92 if transaction.tracer.config.use_elastic_traceparent_header:
93 headers[constants.TRACEPARENT_LEGACY_HEADER_NAME] = trace_parent_str
94 if trace_parent.tracestate:
95 headers[constants.TRACESTATE_HEADER_NAME] = trace_parent.tracestate
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/instrumentation/packages/asyncio/aiohttp_client.py b/elasticapm/instrumentation/packages/asyncio/aiohttp_client.py
--- a/elasticapm/instrumentation/packages/asyncio/aiohttp_client.py
+++ b/elasticapm/instrumentation/packages/asyncio/aiohttp_client.py
@@ -42,8 +42,10 @@
instrument_list = [("aiohttp.client", "ClientSession._request")]
async def call(self, module, method, wrapped, instance, args, kwargs):
- method = kwargs["method"] if "method" in kwargs else args[0]
- url = kwargs["url"] if "url" in kwargs else args[1]
+ method = kwargs.get("method", args[0])
+ url = kwargs.get("url", kwargs.get("str_or_url", None))
+ if url is None:
+ url = args[1]
url = str(url)
signature = " ".join([method.upper(), get_host_from_url(url)])
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/asyncio/aiohttp_client.py b/elasticapm/instrumentation/packages/asyncio/aiohttp_client.py\n--- a/elasticapm/instrumentation/packages/asyncio/aiohttp_client.py\n+++ b/elasticapm/instrumentation/packages/asyncio/aiohttp_client.py\n@@ -42,8 +42,10 @@\n instrument_list = [(\"aiohttp.client\", \"ClientSession._request\")]\n \n async def call(self, module, method, wrapped, instance, args, kwargs):\n- method = kwargs[\"method\"] if \"method\" in kwargs else args[0]\n- url = kwargs[\"url\"] if \"url\" in kwargs else args[1]\n+ method = kwargs.get(\"method\", args[0])\n+ url = kwargs.get(\"url\", kwargs.get(\"str_or_url\", None))\n+ if url is None:\n+ url = args[1]\n url = str(url)\n \n signature = \" \".join([method.upper(), get_host_from_url(url)])\n", "issue": "IndexError when \"url\" not found in args or kwargs.\n### Overview\r\n\r\nI've found an issue in line 46 of `call()` method in `AioHttpClientInstrumentation(...)` class.\r\n\r\nhttps://github.com/elastic/apm-agent-python/blob/da93e7af448abcac367d216e2d20a584051f6e50/elasticapm/instrumentation/packages/asyncio/aiohttp_client.py#L44-L47\r\n\r\nI'm getting an `IndexError` exception due to lack or \"url\" in both kwargs and in args[1]. The the reason is that the argument containing urls is called \"str_or_url\".\r\n\r\nhttps://github.com/aio-libs/aiohttp/blob/4b59d55e9e79f5a0b1932d6dc9f6b12a33d19266/aiohttp/client.py#L325-L328\r\n\r\nBy default the code is running fine, but this issue will appear in cases where someone will try to use `ClientSession._request()` method directly AND use keyword-arguments.\r\n\r\n### How to recreate the bug?\r\n\r\nThis is a general example on how to recreate the bug. Lets assume that somewhere in my code I want to connect to some external http rest-api service using aiohttp library. I'll be using custom made session object based on ClientSession object from aiohttp library.\r\n\r\n```python\r\nfrom aiohttp import ClientSession, ClientResponse\r\n\r\nclass CustomSession(ClientSession):\r\n\r\n async def _request(self, method: str, str_or_url: StrOrURL, **kwargs: Any) -> ClientResponse:\r\n # put some extra code here, like add retry functionality\r\n client_response = await super()._request(method=method, str_or_url=url, **kwargs) # << ISSUE IS HERE\r\n return client_response\r\n```\r\nthe above code is valid and it works as long as there is no apm agent running as middleware. With apm agent added, the code has to be written as below:\r\n```python\r\n client_response = await super()._request(method, url, **kwargs)\r\n```\r\n\r\n### How to fix it?\r\n\r\nDo search for both `url` and `str_or_url` in kwargs. It's simple fix that can fit in same line, I bet.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm import async_capture_span\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule\nfrom elasticapm.traces import DroppedSpan, execution_context\nfrom elasticapm.utils import get_host_from_url, sanitize_url\nfrom elasticapm.utils.disttracing import TracingOptions\n\n\nclass AioHttpClientInstrumentation(AsyncAbstractInstrumentedModule):\n name = \"aiohttp_client\"\n\n instrument_list = [(\"aiohttp.client\", \"ClientSession._request\")]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n method = kwargs[\"method\"] if \"method\" in kwargs else args[0]\n url = kwargs[\"url\"] if \"url\" in kwargs else args[1]\n url = str(url)\n\n signature = \" \".join([method.upper(), get_host_from_url(url)])\n url = sanitize_url(url)\n transaction = execution_context.get_transaction()\n\n async with async_capture_span(\n signature,\n span_type=\"external\",\n span_subtype=\"http\",\n extra={\"http\": {\"url\": url}},\n leaf=True,\n ) as span:\n leaf_span = span\n while isinstance(leaf_span, DroppedSpan):\n leaf_span = leaf_span.parent\n\n parent_id = leaf_span.id if leaf_span else transaction.id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=parent_id, trace_options=TracingOptions(recorded=True)\n )\n headers = kwargs.get(\"headers\") or {}\n self._set_disttracing_headers(headers, trace_parent, transaction)\n kwargs[\"headers\"] = headers\n response = await wrapped(*args, **kwargs)\n if response:\n if span.context:\n span.context[\"http\"][\"status_code\"] = response.status\n span.set_success() if response.status < 400 else span.set_failure()\n return response\n\n def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):\n # since we don't have a span, we set the span id to the transaction id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=transaction.id, trace_options=TracingOptions(recorded=False)\n )\n\n headers = kwargs.get(\"headers\") or {}\n self._set_disttracing_headers(headers, trace_parent, transaction)\n kwargs[\"headers\"] = headers\n return args, kwargs\n\n def _set_disttracing_headers(self, headers, trace_parent, transaction):\n trace_parent_str = trace_parent.to_string()\n headers[constants.TRACEPARENT_HEADER_NAME] = trace_parent_str\n if transaction.tracer.config.use_elastic_traceparent_header:\n headers[constants.TRACEPARENT_LEGACY_HEADER_NAME] = trace_parent_str\n if trace_parent.tracestate:\n headers[constants.TRACESTATE_HEADER_NAME] = trace_parent.tracestate\n", "path": "elasticapm/instrumentation/packages/asyncio/aiohttp_client.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm import async_capture_span\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule\nfrom elasticapm.traces import DroppedSpan, execution_context\nfrom elasticapm.utils import get_host_from_url, sanitize_url\nfrom elasticapm.utils.disttracing import TracingOptions\n\n\nclass AioHttpClientInstrumentation(AsyncAbstractInstrumentedModule):\n name = \"aiohttp_client\"\n\n instrument_list = [(\"aiohttp.client\", \"ClientSession._request\")]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n method = kwargs.get(\"method\", args[0])\n url = kwargs.get(\"url\", kwargs.get(\"str_or_url\", None))\n if url is None:\n url = args[1]\n url = str(url)\n\n signature = \" \".join([method.upper(), get_host_from_url(url)])\n url = sanitize_url(url)\n transaction = execution_context.get_transaction()\n\n async with async_capture_span(\n signature,\n span_type=\"external\",\n span_subtype=\"http\",\n extra={\"http\": {\"url\": url}},\n leaf=True,\n ) as span:\n leaf_span = span\n while isinstance(leaf_span, DroppedSpan):\n leaf_span = leaf_span.parent\n\n parent_id = leaf_span.id if leaf_span else transaction.id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=parent_id, trace_options=TracingOptions(recorded=True)\n )\n headers = kwargs.get(\"headers\") or {}\n self._set_disttracing_headers(headers, trace_parent, transaction)\n kwargs[\"headers\"] = headers\n response = await wrapped(*args, **kwargs)\n if response:\n if span.context:\n span.context[\"http\"][\"status_code\"] = response.status\n span.set_success() if response.status < 400 else span.set_failure()\n return response\n\n def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):\n # since we don't have a span, we set the span id to the transaction id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=transaction.id, trace_options=TracingOptions(recorded=False)\n )\n\n headers = kwargs.get(\"headers\") or {}\n self._set_disttracing_headers(headers, trace_parent, transaction)\n kwargs[\"headers\"] = headers\n return args, kwargs\n\n def _set_disttracing_headers(self, headers, trace_parent, transaction):\n trace_parent_str = trace_parent.to_string()\n headers[constants.TRACEPARENT_HEADER_NAME] = trace_parent_str\n if transaction.tracer.config.use_elastic_traceparent_header:\n headers[constants.TRACEPARENT_LEGACY_HEADER_NAME] = trace_parent_str\n if trace_parent.tracestate:\n headers[constants.TRACESTATE_HEADER_NAME] = trace_parent.tracestate\n", "path": "elasticapm/instrumentation/packages/asyncio/aiohttp_client.py"}]} | 1,917 | 231 |
gh_patches_debug_17921 | rasdani/github-patches | git_diff | mirumee__ariadne-59 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop Python 3.5
Due to lack of support for [variable type annotations](https://www.python.org/dev/peps/pep-0526/) I suggest to drop support for Python 3.5. This is already a problem in #30 in which either code or mypy is failing and only solution is to remove hints.
We might consider testing ariadne on 3.7 for future-proofing our project.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/executable_schema.py`
Content:
```
1 from collections import defaultdict
2 from itertools import chain
3 from typing import Iterator, List, Union
4
5 from graphql import GraphQLSchema
6
7 from .build_schema import build_schema_from_type_definitions
8 from .resolvers import add_resolve_functions_to_schema
9
10
11 def decompose_maps(resolvers_maps: List[dict]) -> Iterator[tuple]:
12 def flatten(rm):
13 for key, value in rm.items():
14 for resolver_name, resolver in value.items():
15 yield (key, resolver_name, resolver)
16
17 return chain.from_iterable(flatten(m) for m in resolvers_maps)
18
19
20 def merge_resolvers(resolver_list: Iterator[tuple]) -> dict:
21 output = defaultdict(dict) # type: dict
22 for key, resolver_name, resolver in resolver_list:
23 output[key][resolver_name] = resolver
24 return output
25
26
27 def join_type_defs(type_defs: List[str]) -> str:
28 return "\n\n".join(t.strip() for t in type_defs)
29
30
31 def make_executable_schema(
32 type_defs: Union[str, List[str]], resolvers: Union[dict, List[dict]]
33 ) -> GraphQLSchema:
34 if isinstance(type_defs, list):
35 type_defs = join_type_defs(type_defs)
36
37 schema = build_schema_from_type_definitions(type_defs)
38
39 if isinstance(resolvers, list):
40 add_resolve_functions_to_schema(
41 schema, merge_resolvers(decompose_maps(resolvers))
42 )
43 elif isinstance(resolvers, dict):
44 add_resolve_functions_to_schema(schema, resolvers)
45
46 return schema
47
```
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2 import os
3 from setuptools import setup
4
5 CLASSIFIERS = [
6 "Intended Audience :: Developers",
7 "License :: OSI Approved :: BSD License",
8 "Operating System :: OS Independent",
9 "Programming Language :: Python",
10 "Programming Language :: Python :: 3.5",
11 "Programming Language :: Python :: 3.6",
12 "Programming Language :: Python :: 3.7",
13 "Topic :: Software Development :: Libraries :: Python Modules",
14 ]
15
16 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
17 with open(README_PATH, "r") as f:
18 README = f.read()
19
20 setup(
21 name="ariadne",
22 author="Mirumee Software",
23 author_email="[email protected]",
24 description="Ariadne is a Python library for implementing GraphQL servers.",
25 long_description=README,
26 long_description_content_type="text/markdown",
27 license="BSD",
28 version="0.1.0",
29 url="https://github.com/mirumee/ariadne",
30 packages=["ariadne"],
31 install_requires=["graphql-core>=2.1", "typing>=3.6.0"],
32 classifiers=CLASSIFIERS,
33 platforms=["any"],
34 )
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py
--- a/ariadne/executable_schema.py
+++ b/ariadne/executable_schema.py
@@ -18,7 +18,7 @@
def merge_resolvers(resolver_list: Iterator[tuple]) -> dict:
- output = defaultdict(dict) # type: dict
+ output: dict = defaultdict(dict)
for key, resolver_name, resolver in resolver_list:
output[key][resolver_name] = resolver
return output
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,6 @@
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
- "Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries :: Python Modules",
| {"golden_diff": "diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py\n--- a/ariadne/executable_schema.py\n+++ b/ariadne/executable_schema.py\n@@ -18,7 +18,7 @@\n \n \n def merge_resolvers(resolver_list: Iterator[tuple]) -> dict:\n- output = defaultdict(dict) # type: dict\n+ output: dict = defaultdict(dict)\n for key, resolver_name, resolver in resolver_list:\n output[key][resolver_name] = resolver\n return output\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,7 +7,6 @@\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n", "issue": "Drop Python 3.5\nDue to lack of support for [variable type annotations](https://www.python.org/dev/peps/pep-0526/) I suggest to drop support for Python 3.5. This is already a problem in #30 in which either code or mypy is failing and only solution is to remove hints.\r\n\r\nWe might consider testing ariadne on 3.7 for future-proofing our project.\n", "before_files": [{"content": "from collections import defaultdict\nfrom itertools import chain\nfrom typing import Iterator, List, Union\n\nfrom graphql import GraphQLSchema\n\nfrom .build_schema import build_schema_from_type_definitions\nfrom .resolvers import add_resolve_functions_to_schema\n\n\ndef decompose_maps(resolvers_maps: List[dict]) -> Iterator[tuple]:\n def flatten(rm):\n for key, value in rm.items():\n for resolver_name, resolver in value.items():\n yield (key, resolver_name, resolver)\n\n return chain.from_iterable(flatten(m) for m in resolvers_maps)\n\n\ndef merge_resolvers(resolver_list: Iterator[tuple]) -> dict:\n output = defaultdict(dict) # type: dict\n for key, resolver_name, resolver in resolver_list:\n output[key][resolver_name] = resolver\n return output\n\n\ndef join_type_defs(type_defs: List[str]) -> str:\n return \"\\n\\n\".join(t.strip() for t in type_defs)\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]], resolvers: Union[dict, List[dict]]\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n schema = build_schema_from_type_definitions(type_defs)\n\n if isinstance(resolvers, list):\n add_resolve_functions_to_schema(\n schema, merge_resolvers(decompose_maps(resolvers))\n )\n elif isinstance(resolvers, dict):\n add_resolve_functions_to_schema(schema, resolvers)\n\n return schema\n", "path": "ariadne/executable_schema.py"}, {"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.1.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n install_requires=[\"graphql-core>=2.1\", \"typing>=3.6.0\"],\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from collections import defaultdict\nfrom itertools import chain\nfrom typing import Iterator, List, Union\n\nfrom graphql import GraphQLSchema\n\nfrom .build_schema import build_schema_from_type_definitions\nfrom .resolvers import add_resolve_functions_to_schema\n\n\ndef decompose_maps(resolvers_maps: List[dict]) -> Iterator[tuple]:\n def flatten(rm):\n for key, value in rm.items():\n for resolver_name, resolver in value.items():\n yield (key, resolver_name, resolver)\n\n return chain.from_iterable(flatten(m) for m in resolvers_maps)\n\n\ndef merge_resolvers(resolver_list: Iterator[tuple]) -> dict:\n output: dict = defaultdict(dict)\n for key, resolver_name, resolver in resolver_list:\n output[key][resolver_name] = resolver\n return output\n\n\ndef join_type_defs(type_defs: List[str]) -> str:\n return \"\\n\\n\".join(t.strip() for t in type_defs)\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]], resolvers: Union[dict, List[dict]]\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n schema = build_schema_from_type_definitions(type_defs)\n\n if isinstance(resolvers, list):\n add_resolve_functions_to_schema(\n schema, merge_resolvers(decompose_maps(resolvers))\n )\n elif isinstance(resolvers, dict):\n add_resolve_functions_to_schema(schema, resolvers)\n\n return schema\n", "path": "ariadne/executable_schema.py"}, {"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.1.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n install_requires=[\"graphql-core>=2.1\", \"typing>=3.6.0\"],\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n)\n", "path": "setup.py"}]} | 1,111 | 224 |
gh_patches_debug_4177 | rasdani/github-patches | git_diff | pretix__pretix-808 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Display remaining quota of add-on products
Currently if you enable "Show number of tickets left" on an event that only applies to products, not add-on products. In my opinion if there is an quota presend for add-on products it should also be displayed (if the quota is exceeded that is already displayed).
Display remaining quota of add-on products
Currently if you enable "Show number of tickets left" on an event that only applies to products, not add-on products. In my opinion if there is an quota presend for add-on products it should also be displayed (if the quota is exceeded that is already displayed).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pretix/presale/forms/checkout.py`
Content:
```
1 from itertools import chain
2
3 from django import forms
4 from django.core.exceptions import ValidationError
5 from django.db.models import Count, Prefetch, Q
6 from django.utils.encoding import force_text
7 from django.utils.formats import number_format
8 from django.utils.timezone import now
9 from django.utils.translation import ugettext_lazy as _
10
11 from pretix.base.forms.questions import (
12 BaseInvoiceAddressForm, BaseQuestionsForm,
13 )
14 from pretix.base.models import ItemVariation
15 from pretix.base.models.tax import TAXED_ZERO
16 from pretix.base.templatetags.money import money_filter
17 from pretix.base.templatetags.rich_text import rich_text
18 from pretix.base.validators import EmailBlacklistValidator
19 from pretix.presale.signals import contact_form_fields
20
21
22 class ContactForm(forms.Form):
23 required_css_class = 'required'
24 email = forms.EmailField(label=_('E-mail'),
25 help_text=_('Make sure to enter a valid email address. We will send you an order '
26 'confirmation including a link that you need in case you want to make '
27 'modifications to your order or download your ticket later.'),
28 validators=[EmailBlacklistValidator()],
29 widget=forms.EmailInput(attrs={'autofocus': 'autofocus'}))
30
31 def __init__(self, *args, **kwargs):
32 self.event = kwargs.pop('event')
33 self.request = kwargs.pop('request')
34 super().__init__(*args, **kwargs)
35
36 if self.event.settings.order_email_asked_twice:
37 self.fields['email_repeat'] = forms.EmailField(
38 label=_('E-mail address (repeated)'),
39 help_text=_('Please enter the same email address again to make sure you typed it correctly.')
40 )
41
42 responses = contact_form_fields.send(self.event, request=self.request)
43 for r, response in sorted(responses, key=lambda r: str(r[0])):
44 for key, value in response.items():
45 # We need to be this explicit, since OrderedDict.update does not retain ordering
46 self.fields[key] = value
47
48 def clean(self):
49 if self.event.settings.order_email_asked_twice and self.cleaned_data.get('email') and self.cleaned_data.get('email_repeat'):
50 if self.cleaned_data.get('email').lower() != self.cleaned_data.get('email_repeat').lower():
51 raise ValidationError(_('Please enter the same email address twice.'))
52
53
54 class InvoiceAddressForm(BaseInvoiceAddressForm):
55 required_css_class = 'required'
56 vat_warning = True
57
58
59 class QuestionsForm(BaseQuestionsForm):
60 """
61 This form class is responsible for asking order-related questions. This includes
62 the attendee name for admission tickets, if the corresponding setting is enabled,
63 as well as additional questions defined by the organizer.
64 """
65 required_css_class = 'required'
66
67
68 class AddOnRadioSelect(forms.RadioSelect):
69 option_template_name = 'pretixpresale/forms/addon_choice_option.html'
70
71 def optgroups(self, name, value, attrs=None):
72 attrs = attrs or {}
73 groups = []
74 has_selected = False
75 for index, (option_value, option_label, option_desc) in enumerate(chain(self.choices)):
76 if option_value is None:
77 option_value = ''
78 if isinstance(option_label, (list, tuple)):
79 raise TypeError('Choice groups are not supported here')
80 group_name = None
81 subgroup = []
82 groups.append((group_name, subgroup, index))
83
84 selected = (
85 force_text(option_value) in value and
86 (has_selected is False or self.allow_multiple_selected)
87 )
88 if selected is True and has_selected is False:
89 has_selected = True
90 attrs['description'] = option_desc
91 subgroup.append(self.create_option(
92 name, option_value, option_label, selected, index,
93 subindex=None, attrs=attrs,
94 ))
95
96 return groups
97
98
99 class AddOnVariationField(forms.ChoiceField):
100 def valid_value(self, value):
101 text_value = force_text(value)
102 for k, v, d in self.choices:
103 if value == k or text_value == force_text(k):
104 return True
105 return False
106
107
108 class AddOnsForm(forms.Form):
109 """
110 This form class is responsible for selecting add-ons to a product in the cart.
111 """
112
113 def _label(self, event, item_or_variation, avail, override_price=None):
114 if isinstance(item_or_variation, ItemVariation):
115 variation = item_or_variation
116 item = item_or_variation.item
117 price = variation.price
118 label = variation.value
119 else:
120 item = item_or_variation
121 price = item.default_price
122 label = item.name
123
124 if override_price:
125 price = override_price
126
127 if self.price_included:
128 price = TAXED_ZERO
129 else:
130 price = item.tax(price)
131
132 if not price.gross:
133 n = '{name}'.format(
134 name=label
135 )
136 elif not price.rate:
137 n = _('{name} (+ {price})').format(
138 name=label, price=money_filter(price.gross, event.currency)
139 )
140 elif event.settings.display_net_prices:
141 n = _('{name} (+ {price} plus {taxes}% {taxname})').format(
142 name=label, price=money_filter(price.net, event.currency),
143 taxes=number_format(price.rate), taxname=price.name
144 )
145 else:
146 n = _('{name} (+ {price} incl. {taxes}% {taxname})').format(
147 name=label, price=money_filter(price.gross, event.currency),
148 taxes=number_format(price.rate), taxname=price.name
149 )
150
151 if avail[0] < 20:
152 n += ' – {}'.format(_('SOLD OUT'))
153 elif avail[0] < 100:
154 n += ' – {}'.format(_('Currently unavailable'))
155
156 return n
157
158 def __init__(self, *args, **kwargs):
159 """
160 Takes additional keyword arguments:
161
162 :param category: The category to choose from
163 :param event: The event this belongs to
164 :param subevent: The event the parent cart position belongs to
165 :param initial: The current set of add-ons
166 :param quota_cache: A shared dictionary for quota caching
167 :param item_cache: A shared dictionary for item/category caching
168 """
169 category = kwargs.pop('category')
170 event = kwargs.pop('event')
171 subevent = kwargs.pop('subevent')
172 current_addons = kwargs.pop('initial')
173 quota_cache = kwargs.pop('quota_cache')
174 item_cache = kwargs.pop('item_cache')
175 self.price_included = kwargs.pop('price_included')
176
177 super().__init__(*args, **kwargs)
178
179 if subevent:
180 item_price_override = subevent.item_price_overrides
181 var_price_override = subevent.var_price_overrides
182 else:
183 item_price_override = {}
184 var_price_override = {}
185
186 ckey = '{}-{}'.format(subevent.pk if subevent else 0, category.pk)
187 if ckey not in item_cache:
188 # Get all items to possibly show
189 items = category.items.filter(
190 Q(active=True)
191 & Q(Q(available_from__isnull=True) | Q(available_from__lte=now()))
192 & Q(Q(available_until__isnull=True) | Q(available_until__gte=now()))
193 & Q(hide_without_voucher=False)
194 ).select_related('tax_rule').prefetch_related(
195 Prefetch('quotas',
196 to_attr='_subevent_quotas',
197 queryset=event.quotas.filter(subevent=subevent)),
198 Prefetch('variations', to_attr='available_variations',
199 queryset=ItemVariation.objects.filter(active=True, quotas__isnull=False).prefetch_related(
200 Prefetch('quotas',
201 to_attr='_subevent_quotas',
202 queryset=event.quotas.filter(subevent=subevent))
203 ).distinct()),
204 ).annotate(
205 quotac=Count('quotas'),
206 has_variations=Count('variations')
207 ).filter(
208 quotac__gt=0
209 ).order_by('category__position', 'category_id', 'position', 'name')
210 item_cache[ckey] = items
211 else:
212 items = item_cache[ckey]
213
214 for i in items:
215 if i.has_variations:
216 choices = [('', _('no selection'), '')]
217 for v in i.available_variations:
218 cached_availability = v.check_quotas(subevent=subevent, _cache=quota_cache)
219 if v._subevent_quotas:
220 choices.append(
221 (v.pk,
222 self._label(event, v, cached_availability,
223 override_price=var_price_override.get(v.pk)),
224 v.description)
225 )
226
227 field = AddOnVariationField(
228 choices=choices,
229 label=i.name,
230 required=False,
231 widget=AddOnRadioSelect,
232 help_text=rich_text(str(i.description)),
233 initial=current_addons.get(i.pk),
234 )
235 if len(choices) > 1:
236 self.fields['item_%s' % i.pk] = field
237 else:
238 if not i._subevent_quotas:
239 continue
240 cached_availability = i.check_quotas(subevent=subevent, _cache=quota_cache)
241 field = forms.BooleanField(
242 label=self._label(event, i, cached_availability,
243 override_price=item_price_override.get(i.pk)),
244 required=False,
245 initial=i.pk in current_addons,
246 help_text=rich_text(str(i.description)),
247 )
248 self.fields['item_%s' % i.pk] = field
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pretix/presale/forms/checkout.py b/src/pretix/presale/forms/checkout.py
--- a/src/pretix/presale/forms/checkout.py
+++ b/src/pretix/presale/forms/checkout.py
@@ -152,6 +152,9 @@
n += ' – {}'.format(_('SOLD OUT'))
elif avail[0] < 100:
n += ' – {}'.format(_('Currently unavailable'))
+ else:
+ if avail[1] is not None and event.settings.show_quota_left:
+ n += ' – {}'.format(_('%(num)s currently available') % {'num': avail[1]})
return n
| {"golden_diff": "diff --git a/src/pretix/presale/forms/checkout.py b/src/pretix/presale/forms/checkout.py\n--- a/src/pretix/presale/forms/checkout.py\n+++ b/src/pretix/presale/forms/checkout.py\n@@ -152,6 +152,9 @@\n n += ' \u2013 {}'.format(_('SOLD OUT'))\n elif avail[0] < 100:\n n += ' \u2013 {}'.format(_('Currently unavailable'))\n+ else:\n+ if avail[1] is not None and event.settings.show_quota_left:\n+ n += ' \u2013 {}'.format(_('%(num)s currently available') % {'num': avail[1]})\n \n return n\n", "issue": "Display remaining quota of add-on products\nCurrently if you enable \"Show number of tickets left\" on an event that only applies to products, not add-on products. In my opinion if there is an quota presend for add-on products it should also be displayed (if the quota is exceeded that is already displayed).\nDisplay remaining quota of add-on products\nCurrently if you enable \"Show number of tickets left\" on an event that only applies to products, not add-on products. In my opinion if there is an quota presend for add-on products it should also be displayed (if the quota is exceeded that is already displayed).\n", "before_files": [{"content": "from itertools import chain\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Count, Prefetch, Q\nfrom django.utils.encoding import force_text\nfrom django.utils.formats import number_format\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom pretix.base.forms.questions import (\n BaseInvoiceAddressForm, BaseQuestionsForm,\n)\nfrom pretix.base.models import ItemVariation\nfrom pretix.base.models.tax import TAXED_ZERO\nfrom pretix.base.templatetags.money import money_filter\nfrom pretix.base.templatetags.rich_text import rich_text\nfrom pretix.base.validators import EmailBlacklistValidator\nfrom pretix.presale.signals import contact_form_fields\n\n\nclass ContactForm(forms.Form):\n required_css_class = 'required'\n email = forms.EmailField(label=_('E-mail'),\n help_text=_('Make sure to enter a valid email address. We will send you an order '\n 'confirmation including a link that you need in case you want to make '\n 'modifications to your order or download your ticket later.'),\n validators=[EmailBlacklistValidator()],\n widget=forms.EmailInput(attrs={'autofocus': 'autofocus'}))\n\n def __init__(self, *args, **kwargs):\n self.event = kwargs.pop('event')\n self.request = kwargs.pop('request')\n super().__init__(*args, **kwargs)\n\n if self.event.settings.order_email_asked_twice:\n self.fields['email_repeat'] = forms.EmailField(\n label=_('E-mail address (repeated)'),\n help_text=_('Please enter the same email address again to make sure you typed it correctly.')\n )\n\n responses = contact_form_fields.send(self.event, request=self.request)\n for r, response in sorted(responses, key=lambda r: str(r[0])):\n for key, value in response.items():\n # We need to be this explicit, since OrderedDict.update does not retain ordering\n self.fields[key] = value\n\n def clean(self):\n if self.event.settings.order_email_asked_twice and self.cleaned_data.get('email') and self.cleaned_data.get('email_repeat'):\n if self.cleaned_data.get('email').lower() != self.cleaned_data.get('email_repeat').lower():\n raise ValidationError(_('Please enter the same email address twice.'))\n\n\nclass InvoiceAddressForm(BaseInvoiceAddressForm):\n required_css_class = 'required'\n vat_warning = True\n\n\nclass QuestionsForm(BaseQuestionsForm):\n \"\"\"\n This form class is responsible for asking order-related questions. This includes\n the attendee name for admission tickets, if the corresponding setting is enabled,\n as well as additional questions defined by the organizer.\n \"\"\"\n required_css_class = 'required'\n\n\nclass AddOnRadioSelect(forms.RadioSelect):\n option_template_name = 'pretixpresale/forms/addon_choice_option.html'\n\n def optgroups(self, name, value, attrs=None):\n attrs = attrs or {}\n groups = []\n has_selected = False\n for index, (option_value, option_label, option_desc) in enumerate(chain(self.choices)):\n if option_value is None:\n option_value = ''\n if isinstance(option_label, (list, tuple)):\n raise TypeError('Choice groups are not supported here')\n group_name = None\n subgroup = []\n groups.append((group_name, subgroup, index))\n\n selected = (\n force_text(option_value) in value and\n (has_selected is False or self.allow_multiple_selected)\n )\n if selected is True and has_selected is False:\n has_selected = True\n attrs['description'] = option_desc\n subgroup.append(self.create_option(\n name, option_value, option_label, selected, index,\n subindex=None, attrs=attrs,\n ))\n\n return groups\n\n\nclass AddOnVariationField(forms.ChoiceField):\n def valid_value(self, value):\n text_value = force_text(value)\n for k, v, d in self.choices:\n if value == k or text_value == force_text(k):\n return True\n return False\n\n\nclass AddOnsForm(forms.Form):\n \"\"\"\n This form class is responsible for selecting add-ons to a product in the cart.\n \"\"\"\n\n def _label(self, event, item_or_variation, avail, override_price=None):\n if isinstance(item_or_variation, ItemVariation):\n variation = item_or_variation\n item = item_or_variation.item\n price = variation.price\n label = variation.value\n else:\n item = item_or_variation\n price = item.default_price\n label = item.name\n\n if override_price:\n price = override_price\n\n if self.price_included:\n price = TAXED_ZERO\n else:\n price = item.tax(price)\n\n if not price.gross:\n n = '{name}'.format(\n name=label\n )\n elif not price.rate:\n n = _('{name} (+ {price})').format(\n name=label, price=money_filter(price.gross, event.currency)\n )\n elif event.settings.display_net_prices:\n n = _('{name} (+ {price} plus {taxes}% {taxname})').format(\n name=label, price=money_filter(price.net, event.currency),\n taxes=number_format(price.rate), taxname=price.name\n )\n else:\n n = _('{name} (+ {price} incl. {taxes}% {taxname})').format(\n name=label, price=money_filter(price.gross, event.currency),\n taxes=number_format(price.rate), taxname=price.name\n )\n\n if avail[0] < 20:\n n += ' \u2013 {}'.format(_('SOLD OUT'))\n elif avail[0] < 100:\n n += ' \u2013 {}'.format(_('Currently unavailable'))\n\n return n\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Takes additional keyword arguments:\n\n :param category: The category to choose from\n :param event: The event this belongs to\n :param subevent: The event the parent cart position belongs to\n :param initial: The current set of add-ons\n :param quota_cache: A shared dictionary for quota caching\n :param item_cache: A shared dictionary for item/category caching\n \"\"\"\n category = kwargs.pop('category')\n event = kwargs.pop('event')\n subevent = kwargs.pop('subevent')\n current_addons = kwargs.pop('initial')\n quota_cache = kwargs.pop('quota_cache')\n item_cache = kwargs.pop('item_cache')\n self.price_included = kwargs.pop('price_included')\n\n super().__init__(*args, **kwargs)\n\n if subevent:\n item_price_override = subevent.item_price_overrides\n var_price_override = subevent.var_price_overrides\n else:\n item_price_override = {}\n var_price_override = {}\n\n ckey = '{}-{}'.format(subevent.pk if subevent else 0, category.pk)\n if ckey not in item_cache:\n # Get all items to possibly show\n items = category.items.filter(\n Q(active=True)\n & Q(Q(available_from__isnull=True) | Q(available_from__lte=now()))\n & Q(Q(available_until__isnull=True) | Q(available_until__gte=now()))\n & Q(hide_without_voucher=False)\n ).select_related('tax_rule').prefetch_related(\n Prefetch('quotas',\n to_attr='_subevent_quotas',\n queryset=event.quotas.filter(subevent=subevent)),\n Prefetch('variations', to_attr='available_variations',\n queryset=ItemVariation.objects.filter(active=True, quotas__isnull=False).prefetch_related(\n Prefetch('quotas',\n to_attr='_subevent_quotas',\n queryset=event.quotas.filter(subevent=subevent))\n ).distinct()),\n ).annotate(\n quotac=Count('quotas'),\n has_variations=Count('variations')\n ).filter(\n quotac__gt=0\n ).order_by('category__position', 'category_id', 'position', 'name')\n item_cache[ckey] = items\n else:\n items = item_cache[ckey]\n\n for i in items:\n if i.has_variations:\n choices = [('', _('no selection'), '')]\n for v in i.available_variations:\n cached_availability = v.check_quotas(subevent=subevent, _cache=quota_cache)\n if v._subevent_quotas:\n choices.append(\n (v.pk,\n self._label(event, v, cached_availability,\n override_price=var_price_override.get(v.pk)),\n v.description)\n )\n\n field = AddOnVariationField(\n choices=choices,\n label=i.name,\n required=False,\n widget=AddOnRadioSelect,\n help_text=rich_text(str(i.description)),\n initial=current_addons.get(i.pk),\n )\n if len(choices) > 1:\n self.fields['item_%s' % i.pk] = field\n else:\n if not i._subevent_quotas:\n continue\n cached_availability = i.check_quotas(subevent=subevent, _cache=quota_cache)\n field = forms.BooleanField(\n label=self._label(event, i, cached_availability,\n override_price=item_price_override.get(i.pk)),\n required=False,\n initial=i.pk in current_addons,\n help_text=rich_text(str(i.description)),\n )\n self.fields['item_%s' % i.pk] = field\n", "path": "src/pretix/presale/forms/checkout.py"}], "after_files": [{"content": "from itertools import chain\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Count, Prefetch, Q\nfrom django.utils.encoding import force_text\nfrom django.utils.formats import number_format\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom pretix.base.forms.questions import (\n BaseInvoiceAddressForm, BaseQuestionsForm,\n)\nfrom pretix.base.models import ItemVariation\nfrom pretix.base.models.tax import TAXED_ZERO\nfrom pretix.base.templatetags.money import money_filter\nfrom pretix.base.templatetags.rich_text import rich_text\nfrom pretix.base.validators import EmailBlacklistValidator\nfrom pretix.presale.signals import contact_form_fields\n\n\nclass ContactForm(forms.Form):\n required_css_class = 'required'\n email = forms.EmailField(label=_('E-mail'),\n help_text=_('Make sure to enter a valid email address. We will send you an order '\n 'confirmation including a link that you need in case you want to make '\n 'modifications to your order or download your ticket later.'),\n validators=[EmailBlacklistValidator()],\n widget=forms.EmailInput(attrs={'autofocus': 'autofocus'}))\n\n def __init__(self, *args, **kwargs):\n self.event = kwargs.pop('event')\n self.request = kwargs.pop('request')\n super().__init__(*args, **kwargs)\n\n if self.event.settings.order_email_asked_twice:\n self.fields['email_repeat'] = forms.EmailField(\n label=_('E-mail address (repeated)'),\n help_text=_('Please enter the same email address again to make sure you typed it correctly.')\n )\n\n responses = contact_form_fields.send(self.event, request=self.request)\n for r, response in sorted(responses, key=lambda r: str(r[0])):\n for key, value in response.items():\n # We need to be this explicit, since OrderedDict.update does not retain ordering\n self.fields[key] = value\n\n def clean(self):\n if self.event.settings.order_email_asked_twice and self.cleaned_data.get('email') and self.cleaned_data.get('email_repeat'):\n if self.cleaned_data.get('email').lower() != self.cleaned_data.get('email_repeat').lower():\n raise ValidationError(_('Please enter the same email address twice.'))\n\n\nclass InvoiceAddressForm(BaseInvoiceAddressForm):\n required_css_class = 'required'\n vat_warning = True\n\n\nclass QuestionsForm(BaseQuestionsForm):\n \"\"\"\n This form class is responsible for asking order-related questions. This includes\n the attendee name for admission tickets, if the corresponding setting is enabled,\n as well as additional questions defined by the organizer.\n \"\"\"\n required_css_class = 'required'\n\n\nclass AddOnRadioSelect(forms.RadioSelect):\n option_template_name = 'pretixpresale/forms/addon_choice_option.html'\n\n def optgroups(self, name, value, attrs=None):\n attrs = attrs or {}\n groups = []\n has_selected = False\n for index, (option_value, option_label, option_desc) in enumerate(chain(self.choices)):\n if option_value is None:\n option_value = ''\n if isinstance(option_label, (list, tuple)):\n raise TypeError('Choice groups are not supported here')\n group_name = None\n subgroup = []\n groups.append((group_name, subgroup, index))\n\n selected = (\n force_text(option_value) in value and\n (has_selected is False or self.allow_multiple_selected)\n )\n if selected is True and has_selected is False:\n has_selected = True\n attrs['description'] = option_desc\n subgroup.append(self.create_option(\n name, option_value, option_label, selected, index,\n subindex=None, attrs=attrs,\n ))\n\n return groups\n\n\nclass AddOnVariationField(forms.ChoiceField):\n def valid_value(self, value):\n text_value = force_text(value)\n for k, v, d in self.choices:\n if value == k or text_value == force_text(k):\n return True\n return False\n\n\nclass AddOnsForm(forms.Form):\n \"\"\"\n This form class is responsible for selecting add-ons to a product in the cart.\n \"\"\"\n\n def _label(self, event, item_or_variation, avail, override_price=None):\n if isinstance(item_or_variation, ItemVariation):\n variation = item_or_variation\n item = item_or_variation.item\n price = variation.price\n label = variation.value\n else:\n item = item_or_variation\n price = item.default_price\n label = item.name\n\n if override_price:\n price = override_price\n\n if self.price_included:\n price = TAXED_ZERO\n else:\n price = item.tax(price)\n\n if not price.gross:\n n = '{name}'.format(\n name=label\n )\n elif not price.rate:\n n = _('{name} (+ {price})').format(\n name=label, price=money_filter(price.gross, event.currency)\n )\n elif event.settings.display_net_prices:\n n = _('{name} (+ {price} plus {taxes}% {taxname})').format(\n name=label, price=money_filter(price.net, event.currency),\n taxes=number_format(price.rate), taxname=price.name\n )\n else:\n n = _('{name} (+ {price} incl. {taxes}% {taxname})').format(\n name=label, price=money_filter(price.gross, event.currency),\n taxes=number_format(price.rate), taxname=price.name\n )\n\n if avail[0] < 20:\n n += ' \u2013 {}'.format(_('SOLD OUT'))\n elif avail[0] < 100:\n n += ' \u2013 {}'.format(_('Currently unavailable'))\n else:\n if avail[1] is not None and event.settings.show_quota_left:\n n += ' \u2013 {}'.format(_('%(num)s currently available') % {'num': avail[1]})\n\n return n\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Takes additional keyword arguments:\n\n :param category: The category to choose from\n :param event: The event this belongs to\n :param subevent: The event the parent cart position belongs to\n :param initial: The current set of add-ons\n :param quota_cache: A shared dictionary for quota caching\n :param item_cache: A shared dictionary for item/category caching\n \"\"\"\n category = kwargs.pop('category')\n event = kwargs.pop('event')\n subevent = kwargs.pop('subevent')\n current_addons = kwargs.pop('initial')\n quota_cache = kwargs.pop('quota_cache')\n item_cache = kwargs.pop('item_cache')\n self.price_included = kwargs.pop('price_included')\n\n super().__init__(*args, **kwargs)\n\n if subevent:\n item_price_override = subevent.item_price_overrides\n var_price_override = subevent.var_price_overrides\n else:\n item_price_override = {}\n var_price_override = {}\n\n ckey = '{}-{}'.format(subevent.pk if subevent else 0, category.pk)\n if ckey not in item_cache:\n # Get all items to possibly show\n items = category.items.filter(\n Q(active=True)\n & Q(Q(available_from__isnull=True) | Q(available_from__lte=now()))\n & Q(Q(available_until__isnull=True) | Q(available_until__gte=now()))\n & Q(hide_without_voucher=False)\n ).select_related('tax_rule').prefetch_related(\n Prefetch('quotas',\n to_attr='_subevent_quotas',\n queryset=event.quotas.filter(subevent=subevent)),\n Prefetch('variations', to_attr='available_variations',\n queryset=ItemVariation.objects.filter(active=True, quotas__isnull=False).prefetch_related(\n Prefetch('quotas',\n to_attr='_subevent_quotas',\n queryset=event.quotas.filter(subevent=subevent))\n ).distinct()),\n ).annotate(\n quotac=Count('quotas'),\n has_variations=Count('variations')\n ).filter(\n quotac__gt=0\n ).order_by('category__position', 'category_id', 'position', 'name')\n item_cache[ckey] = items\n else:\n items = item_cache[ckey]\n\n for i in items:\n if i.has_variations:\n choices = [('', _('no selection'), '')]\n for v in i.available_variations:\n cached_availability = v.check_quotas(subevent=subevent, _cache=quota_cache)\n if v._subevent_quotas:\n choices.append(\n (v.pk,\n self._label(event, v, cached_availability,\n override_price=var_price_override.get(v.pk)),\n v.description)\n )\n\n field = AddOnVariationField(\n choices=choices,\n label=i.name,\n required=False,\n widget=AddOnRadioSelect,\n help_text=rich_text(str(i.description)),\n initial=current_addons.get(i.pk),\n )\n if len(choices) > 1:\n self.fields['item_%s' % i.pk] = field\n else:\n if not i._subevent_quotas:\n continue\n cached_availability = i.check_quotas(subevent=subevent, _cache=quota_cache)\n field = forms.BooleanField(\n label=self._label(event, i, cached_availability,\n override_price=item_price_override.get(i.pk)),\n required=False,\n initial=i.pk in current_addons,\n help_text=rich_text(str(i.description)),\n )\n self.fields['item_%s' % i.pk] = field\n", "path": "src/pretix/presale/forms/checkout.py"}]} | 3,072 | 155 |
gh_patches_debug_7699 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-3802 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tab-take should not show the tabs in current window
<!-- If this is a bug report, please remember to mention your version info from
`:open qute:version` or `qutebrowser --version` -->
If I am in one window and write ```:tab-take ```, the options of available tabs will pop up. All the tabs in the current window will be shown in the options as well. However, a window can not take its own tab, so I think we should remove the tabs in the current window from the options for the ```:tab-take```


.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/completion/models/miscmodels.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Functions that return miscellaneous completion models."""
21
22 from qutebrowser.config import configdata
23 from qutebrowser.utils import objreg, log
24 from qutebrowser.completion.models import completionmodel, listcategory, util
25
26
27 def command(*, info):
28 """A CompletionModel filled with non-hidden commands and descriptions."""
29 model = completionmodel.CompletionModel(column_widths=(20, 60, 20))
30 cmdlist = util.get_cmd_completions(info, include_aliases=True,
31 include_hidden=False)
32 model.add_category(listcategory.ListCategory("Commands", cmdlist))
33 return model
34
35
36 def helptopic(*, info):
37 """A CompletionModel filled with help topics."""
38 model = completionmodel.CompletionModel()
39
40 cmdlist = util.get_cmd_completions(info, include_aliases=False,
41 include_hidden=True, prefix=':')
42 settings = ((opt.name, opt.description)
43 for opt in configdata.DATA.values())
44
45 model.add_category(listcategory.ListCategory("Commands", cmdlist))
46 model.add_category(listcategory.ListCategory("Settings", settings))
47 return model
48
49
50 def quickmark(*, info=None): # pylint: disable=unused-argument
51 """A CompletionModel filled with all quickmarks."""
52 def delete(data):
53 """Delete a quickmark from the completion menu."""
54 name = data[0]
55 quickmark_manager = objreg.get('quickmark-manager')
56 log.completion.debug('Deleting quickmark {}'.format(name))
57 quickmark_manager.delete(name)
58
59 model = completionmodel.CompletionModel(column_widths=(30, 70, 0))
60 marks = objreg.get('quickmark-manager').marks.items()
61 model.add_category(listcategory.ListCategory('Quickmarks', marks,
62 delete_func=delete,
63 sort=False))
64 return model
65
66
67 def bookmark(*, info=None): # pylint: disable=unused-argument
68 """A CompletionModel filled with all bookmarks."""
69 def delete(data):
70 """Delete a bookmark from the completion menu."""
71 urlstr = data[0]
72 log.completion.debug('Deleting bookmark {}'.format(urlstr))
73 bookmark_manager = objreg.get('bookmark-manager')
74 bookmark_manager.delete(urlstr)
75
76 model = completionmodel.CompletionModel(column_widths=(30, 70, 0))
77 marks = objreg.get('bookmark-manager').marks.items()
78 model.add_category(listcategory.ListCategory('Bookmarks', marks,
79 delete_func=delete,
80 sort=False))
81 return model
82
83
84 def session(*, info=None): # pylint: disable=unused-argument
85 """A CompletionModel filled with session names."""
86 model = completionmodel.CompletionModel()
87 try:
88 manager = objreg.get('session-manager')
89 sessions = ((name,) for name in manager.list_sessions()
90 if not name.startswith('_'))
91 model.add_category(listcategory.ListCategory("Sessions", sessions))
92 except OSError:
93 log.completion.exception("Failed to list sessions!")
94 return model
95
96
97 def _buffer(skip_win_id=None):
98 """Helper to get the completion model for buffer/other_buffer.
99
100 Args:
101 skip_win_id: The id of the window to skip, or None to include all.
102 """
103 def delete_buffer(data):
104 """Close the selected tab."""
105 win_id, tab_index = data[0].split('/')
106 tabbed_browser = objreg.get('tabbed-browser', scope='window',
107 window=int(win_id))
108 tabbed_browser.on_tab_close_requested(int(tab_index) - 1)
109
110 model = completionmodel.CompletionModel(column_widths=(6, 40, 54))
111
112 for win_id in objreg.window_registry:
113 if skip_win_id and win_id == skip_win_id:
114 continue
115 tabbed_browser = objreg.get('tabbed-browser', scope='window',
116 window=win_id)
117 if tabbed_browser.shutting_down:
118 continue
119 tabs = []
120 for idx in range(tabbed_browser.widget.count()):
121 tab = tabbed_browser.widget.widget(idx)
122 tabs.append(("{}/{}".format(win_id, idx + 1),
123 tab.url().toDisplayString(),
124 tabbed_browser.widget.page_title(idx)))
125 cat = listcategory.ListCategory("{}".format(win_id), tabs,
126 delete_func=delete_buffer)
127 model.add_category(cat)
128
129 return model
130
131
132 def buffer(*, info=None): # pylint: disable=unused-argument
133 """A model to complete on open tabs across all windows.
134
135 Used for switching the buffer command.
136 """
137 return _buffer()
138
139
140 def other_buffer(*, info):
141 """A model to complete on open tabs across all windows except the current.
142
143 Used for the tab-take command.
144 """
145 return _buffer(skip_win_id=info.win_id)
146
147
148 def window(*, info):
149 """A model to complete on all open windows."""
150 model = completionmodel.CompletionModel(column_widths=(6, 30, 64))
151
152 windows = []
153
154 for win_id in objreg.window_registry:
155 if win_id == info.win_id:
156 continue
157 tabbed_browser = objreg.get('tabbed-browser', scope='window',
158 window=win_id)
159 tab_titles = (tab.title() for tab in tabbed_browser.widgets())
160 windows.append(("{}".format(win_id),
161 objreg.window_registry[win_id].windowTitle(),
162 ", ".join(tab_titles)))
163
164 model.add_category(listcategory.ListCategory("Windows", windows))
165
166 return model
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutebrowser/completion/models/miscmodels.py b/qutebrowser/completion/models/miscmodels.py
--- a/qutebrowser/completion/models/miscmodels.py
+++ b/qutebrowser/completion/models/miscmodels.py
@@ -110,7 +110,7 @@
model = completionmodel.CompletionModel(column_widths=(6, 40, 54))
for win_id in objreg.window_registry:
- if skip_win_id and win_id == skip_win_id:
+ if skip_win_id is not None and win_id == skip_win_id:
continue
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
| {"golden_diff": "diff --git a/qutebrowser/completion/models/miscmodels.py b/qutebrowser/completion/models/miscmodels.py\n--- a/qutebrowser/completion/models/miscmodels.py\n+++ b/qutebrowser/completion/models/miscmodels.py\n@@ -110,7 +110,7 @@\n model = completionmodel.CompletionModel(column_widths=(6, 40, 54))\n \n for win_id in objreg.window_registry:\n- if skip_win_id and win_id == skip_win_id:\n+ if skip_win_id is not None and win_id == skip_win_id:\n continue\n tabbed_browser = objreg.get('tabbed-browser', scope='window',\n window=win_id)\n", "issue": "Tab-take should not show the tabs in current window\n<!-- If this is a bug report, please remember to mention your version info from\r\n`:open qute:version` or `qutebrowser --version` -->\r\nIf I am in one window and write ```:tab-take ```, the options of available tabs will pop up. All the tabs in the current window will be shown in the options as well. However, a window can not take its own tab, so I think we should remove the tabs in the current window from the options for the ```:tab-take```\r\n\r\n\r\n\r\n. \n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Functions that return miscellaneous completion models.\"\"\"\n\nfrom qutebrowser.config import configdata\nfrom qutebrowser.utils import objreg, log\nfrom qutebrowser.completion.models import completionmodel, listcategory, util\n\n\ndef command(*, info):\n \"\"\"A CompletionModel filled with non-hidden commands and descriptions.\"\"\"\n model = completionmodel.CompletionModel(column_widths=(20, 60, 20))\n cmdlist = util.get_cmd_completions(info, include_aliases=True,\n include_hidden=False)\n model.add_category(listcategory.ListCategory(\"Commands\", cmdlist))\n return model\n\n\ndef helptopic(*, info):\n \"\"\"A CompletionModel filled with help topics.\"\"\"\n model = completionmodel.CompletionModel()\n\n cmdlist = util.get_cmd_completions(info, include_aliases=False,\n include_hidden=True, prefix=':')\n settings = ((opt.name, opt.description)\n for opt in configdata.DATA.values())\n\n model.add_category(listcategory.ListCategory(\"Commands\", cmdlist))\n model.add_category(listcategory.ListCategory(\"Settings\", settings))\n return model\n\n\ndef quickmark(*, info=None): # pylint: disable=unused-argument\n \"\"\"A CompletionModel filled with all quickmarks.\"\"\"\n def delete(data):\n \"\"\"Delete a quickmark from the completion menu.\"\"\"\n name = data[0]\n quickmark_manager = objreg.get('quickmark-manager')\n log.completion.debug('Deleting quickmark {}'.format(name))\n quickmark_manager.delete(name)\n\n model = completionmodel.CompletionModel(column_widths=(30, 70, 0))\n marks = objreg.get('quickmark-manager').marks.items()\n model.add_category(listcategory.ListCategory('Quickmarks', marks,\n delete_func=delete,\n sort=False))\n return model\n\n\ndef bookmark(*, info=None): # pylint: disable=unused-argument\n \"\"\"A CompletionModel filled with all bookmarks.\"\"\"\n def delete(data):\n \"\"\"Delete a bookmark from the completion menu.\"\"\"\n urlstr = data[0]\n log.completion.debug('Deleting bookmark {}'.format(urlstr))\n bookmark_manager = objreg.get('bookmark-manager')\n bookmark_manager.delete(urlstr)\n\n model = completionmodel.CompletionModel(column_widths=(30, 70, 0))\n marks = objreg.get('bookmark-manager').marks.items()\n model.add_category(listcategory.ListCategory('Bookmarks', marks,\n delete_func=delete,\n sort=False))\n return model\n\n\ndef session(*, info=None): # pylint: disable=unused-argument\n \"\"\"A CompletionModel filled with session names.\"\"\"\n model = completionmodel.CompletionModel()\n try:\n manager = objreg.get('session-manager')\n sessions = ((name,) for name in manager.list_sessions()\n if not name.startswith('_'))\n model.add_category(listcategory.ListCategory(\"Sessions\", sessions))\n except OSError:\n log.completion.exception(\"Failed to list sessions!\")\n return model\n\n\ndef _buffer(skip_win_id=None):\n \"\"\"Helper to get the completion model for buffer/other_buffer.\n\n Args:\n skip_win_id: The id of the window to skip, or None to include all.\n \"\"\"\n def delete_buffer(data):\n \"\"\"Close the selected tab.\"\"\"\n win_id, tab_index = data[0].split('/')\n tabbed_browser = objreg.get('tabbed-browser', scope='window',\n window=int(win_id))\n tabbed_browser.on_tab_close_requested(int(tab_index) - 1)\n\n model = completionmodel.CompletionModel(column_widths=(6, 40, 54))\n\n for win_id in objreg.window_registry:\n if skip_win_id and win_id == skip_win_id:\n continue\n tabbed_browser = objreg.get('tabbed-browser', scope='window',\n window=win_id)\n if tabbed_browser.shutting_down:\n continue\n tabs = []\n for idx in range(tabbed_browser.widget.count()):\n tab = tabbed_browser.widget.widget(idx)\n tabs.append((\"{}/{}\".format(win_id, idx + 1),\n tab.url().toDisplayString(),\n tabbed_browser.widget.page_title(idx)))\n cat = listcategory.ListCategory(\"{}\".format(win_id), tabs,\n delete_func=delete_buffer)\n model.add_category(cat)\n\n return model\n\n\ndef buffer(*, info=None): # pylint: disable=unused-argument\n \"\"\"A model to complete on open tabs across all windows.\n\n Used for switching the buffer command.\n \"\"\"\n return _buffer()\n\n\ndef other_buffer(*, info):\n \"\"\"A model to complete on open tabs across all windows except the current.\n\n Used for the tab-take command.\n \"\"\"\n return _buffer(skip_win_id=info.win_id)\n\n\ndef window(*, info):\n \"\"\"A model to complete on all open windows.\"\"\"\n model = completionmodel.CompletionModel(column_widths=(6, 30, 64))\n\n windows = []\n\n for win_id in objreg.window_registry:\n if win_id == info.win_id:\n continue\n tabbed_browser = objreg.get('tabbed-browser', scope='window',\n window=win_id)\n tab_titles = (tab.title() for tab in tabbed_browser.widgets())\n windows.append((\"{}\".format(win_id),\n objreg.window_registry[win_id].windowTitle(),\n \", \".join(tab_titles)))\n\n model.add_category(listcategory.ListCategory(\"Windows\", windows))\n\n return model\n", "path": "qutebrowser/completion/models/miscmodels.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Functions that return miscellaneous completion models.\"\"\"\n\nfrom qutebrowser.config import configdata\nfrom qutebrowser.utils import objreg, log\nfrom qutebrowser.completion.models import completionmodel, listcategory, util\n\n\ndef command(*, info):\n \"\"\"A CompletionModel filled with non-hidden commands and descriptions.\"\"\"\n model = completionmodel.CompletionModel(column_widths=(20, 60, 20))\n cmdlist = util.get_cmd_completions(info, include_aliases=True,\n include_hidden=False)\n model.add_category(listcategory.ListCategory(\"Commands\", cmdlist))\n return model\n\n\ndef helptopic(*, info):\n \"\"\"A CompletionModel filled with help topics.\"\"\"\n model = completionmodel.CompletionModel()\n\n cmdlist = util.get_cmd_completions(info, include_aliases=False,\n include_hidden=True, prefix=':')\n settings = ((opt.name, opt.description)\n for opt in configdata.DATA.values())\n\n model.add_category(listcategory.ListCategory(\"Commands\", cmdlist))\n model.add_category(listcategory.ListCategory(\"Settings\", settings))\n return model\n\n\ndef quickmark(*, info=None): # pylint: disable=unused-argument\n \"\"\"A CompletionModel filled with all quickmarks.\"\"\"\n def delete(data):\n \"\"\"Delete a quickmark from the completion menu.\"\"\"\n name = data[0]\n quickmark_manager = objreg.get('quickmark-manager')\n log.completion.debug('Deleting quickmark {}'.format(name))\n quickmark_manager.delete(name)\n\n model = completionmodel.CompletionModel(column_widths=(30, 70, 0))\n marks = objreg.get('quickmark-manager').marks.items()\n model.add_category(listcategory.ListCategory('Quickmarks', marks,\n delete_func=delete,\n sort=False))\n return model\n\n\ndef bookmark(*, info=None): # pylint: disable=unused-argument\n \"\"\"A CompletionModel filled with all bookmarks.\"\"\"\n def delete(data):\n \"\"\"Delete a bookmark from the completion menu.\"\"\"\n urlstr = data[0]\n log.completion.debug('Deleting bookmark {}'.format(urlstr))\n bookmark_manager = objreg.get('bookmark-manager')\n bookmark_manager.delete(urlstr)\n\n model = completionmodel.CompletionModel(column_widths=(30, 70, 0))\n marks = objreg.get('bookmark-manager').marks.items()\n model.add_category(listcategory.ListCategory('Bookmarks', marks,\n delete_func=delete,\n sort=False))\n return model\n\n\ndef session(*, info=None): # pylint: disable=unused-argument\n \"\"\"A CompletionModel filled with session names.\"\"\"\n model = completionmodel.CompletionModel()\n try:\n manager = objreg.get('session-manager')\n sessions = ((name,) for name in manager.list_sessions()\n if not name.startswith('_'))\n model.add_category(listcategory.ListCategory(\"Sessions\", sessions))\n except OSError:\n log.completion.exception(\"Failed to list sessions!\")\n return model\n\n\ndef _buffer(skip_win_id=None):\n \"\"\"Helper to get the completion model for buffer/other_buffer.\n\n Args:\n skip_win_id: The id of the window to skip, or None to include all.\n \"\"\"\n def delete_buffer(data):\n \"\"\"Close the selected tab.\"\"\"\n win_id, tab_index = data[0].split('/')\n tabbed_browser = objreg.get('tabbed-browser', scope='window',\n window=int(win_id))\n tabbed_browser.on_tab_close_requested(int(tab_index) - 1)\n\n model = completionmodel.CompletionModel(column_widths=(6, 40, 54))\n\n for win_id in objreg.window_registry:\n if skip_win_id is not None and win_id == skip_win_id:\n continue\n tabbed_browser = objreg.get('tabbed-browser', scope='window',\n window=win_id)\n if tabbed_browser.shutting_down:\n continue\n tabs = []\n for idx in range(tabbed_browser.widget.count()):\n tab = tabbed_browser.widget.widget(idx)\n tabs.append((\"{}/{}\".format(win_id, idx + 1),\n tab.url().toDisplayString(),\n tabbed_browser.widget.page_title(idx)))\n cat = listcategory.ListCategory(\"{}\".format(win_id), tabs,\n delete_func=delete_buffer)\n model.add_category(cat)\n\n return model\n\n\ndef buffer(*, info=None): # pylint: disable=unused-argument\n \"\"\"A model to complete on open tabs across all windows.\n\n Used for switching the buffer command.\n \"\"\"\n return _buffer()\n\n\ndef other_buffer(*, info):\n \"\"\"A model to complete on open tabs across all windows except the current.\n\n Used for the tab-take command.\n \"\"\"\n return _buffer(skip_win_id=info.win_id)\n\n\ndef window(*, info):\n \"\"\"A model to complete on all open windows.\"\"\"\n model = completionmodel.CompletionModel(column_widths=(6, 30, 64))\n\n windows = []\n\n for win_id in objreg.window_registry:\n if win_id == info.win_id:\n continue\n tabbed_browser = objreg.get('tabbed-browser', scope='window',\n window=win_id)\n tab_titles = (tab.title() for tab in tabbed_browser.widgets())\n windows.append((\"{}\".format(win_id),\n objreg.window_registry[win_id].windowTitle(),\n \", \".join(tab_titles)))\n\n model.add_category(listcategory.ListCategory(\"Windows\", windows))\n\n return model\n", "path": "qutebrowser/completion/models/miscmodels.py"}]} | 2,325 | 152 |
gh_patches_debug_10782 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1076 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Type refactoring has a regression with inheritance and explicit fields.
```python
@strawberry.input
class A:
a: str = strawberry.field(default='', desc='')
@strawberry.input
class B(A):
b: Optional[str] = strawberry.field(default=None, desc='')
@strawberry.type
class Query:
@strawberry.field
def field(self, arg: B) -> str:
return ''
schema = strawberry.Schema(query=Query)
result = schema.execute_sync('{ field(arg: {}) }')
assert not result.errors
```
raises `TypeError: B fields cannot be resolved. unhashable type: 'StrawberryAnnotation'`.
`StrawberryAnnotation` has a custom `__eq__` without a `__hash__`, causing a set lookup to fail. However adding a suitable `__hash__` just lead to the next `TypeError`.
`StrawberryOptional` likely has the same problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/types/type_resolver.py`
Content:
```
1 import dataclasses
2 import sys
3 from typing import Dict, List, Type
4
5 from strawberry.annotation import StrawberryAnnotation
6 from strawberry.exceptions import (
7 FieldWithResolverAndDefaultFactoryError,
8 FieldWithResolverAndDefaultValueError,
9 PrivateStrawberryFieldError,
10 )
11 from strawberry.field import StrawberryField
12 from strawberry.private import Private
13 from strawberry.utils.str_converters import to_camel_case
14
15 from ..arguments import UNSET
16
17
18 def _get_fields(cls: Type) -> List[StrawberryField]:
19 """Get all the strawberry fields off a strawberry.type cls
20
21 This function returns a list of StrawberryFields (one for each field item), while
22 also paying attention the name and typing of the field.
23
24 StrawberryFields can be defined on a strawberry.type class as either a dataclass-
25 style field or using strawberry.field as a decorator.
26
27 >>> import strawberry
28 >>> @strawberry.type
29 ... class Query:
30 ... type_1a: int = 5
31 ... type_1b: int = strawberry.field(...)
32 ... type_1c: int = strawberry.field(resolver=...)
33 ...
34 ... @strawberry.field
35 ... def type_2(self) -> int:
36 ... ...
37
38 Type #1:
39 A pure dataclass-style field. Will not have a StrawberryField; one will need to
40 be created in this function. Type annotation is required.
41
42 Type #2:
43 A field defined using @strawberry.field as a decorator around the resolver. The
44 resolver must be type-annotated.
45
46 The StrawberryField.python_name value will be assigned to the field's name on the
47 class if one is not set by either using an explicit strawberry.field(name=...) or by
48 passing a named function (i.e. not an anonymous lambda) to strawberry.field
49 (typically as a decorator).
50 """
51 # Deferred import to avoid import cycles
52 from strawberry.field import StrawberryField
53
54 fields: Dict[str, StrawberryField] = {}
55
56 # before trying to find any fields, let's first add the fields defined in
57 # parent classes, we do this by checking if parents have a type definition
58 for base in cls.__bases__:
59 if hasattr(base, "_type_definition"):
60 base_fields = {
61 field.graphql_name: field
62 # TODO: we need to rename _fields to something else
63 for field in base._type_definition._fields # type: ignore
64 }
65
66 # Add base's fields to cls' fields
67 fields = {**fields, **base_fields}
68
69 # then we can proceed with finding the fields for the current class
70 for field in dataclasses.fields(cls):
71
72 if isinstance(field, StrawberryField):
73 # Check that the field type is not Private
74 if isinstance(field.type, Private):
75 raise PrivateStrawberryFieldError(field.python_name, cls.__name__)
76
77 # Check that default is not set if a resolver is defined
78 if field.default != dataclasses.MISSING and field.base_resolver is not None:
79 raise FieldWithResolverAndDefaultValueError(
80 field.python_name, cls.__name__
81 )
82
83 # Check that default_factory is not set if a resolver is defined
84 # Note: using getattr because of this issue:
85 # https://github.com/python/mypy/issues/6910
86 if (
87 getattr(field, "default_factory") != dataclasses.MISSING # noqa
88 and field.base_resolver is not None
89 ):
90 raise FieldWithResolverAndDefaultFactoryError(
91 field.python_name, cls.__name__
92 )
93
94 # we make sure that the origin is either the field's resolver when
95 # called as:
96 #
97 # >>> @strawberry.field
98 # ... def x(self): ...
99 #
100 # or the class where this field was defined, so we always have
101 # the correct origin for determining field types when resolving
102 # the types.
103 field.origin = field.origin or cls
104
105 # Make sure types are StrawberryAnnotations
106 if not isinstance(field.type, StrawberryAnnotation):
107 module = sys.modules[field.origin.__module__]
108 field.type_annotation = StrawberryAnnotation(
109 annotation=field.type_annotation, namespace=module.__dict__
110 )
111
112 # Create a StrawberryField for fields that didn't use strawberry.field
113 else:
114 # Only ignore Private fields that weren't defined using StrawberryFields
115 if isinstance(field.type, Private):
116 continue
117
118 field_type = field.type
119
120 module = sys.modules[cls.__module__]
121
122 # Create a StrawberryField, for fields of Types #1 and #2a
123 field = StrawberryField(
124 python_name=field.name,
125 graphql_name=to_camel_case(field.name),
126 type_annotation=StrawberryAnnotation(
127 annotation=field_type,
128 namespace=module.__dict__,
129 ),
130 origin=cls,
131 default=getattr(cls, field.name, UNSET),
132 )
133
134 field_name = field.graphql_name
135
136 assert_message = "Field must have a name by the time the schema is generated"
137 assert field_name is not None, assert_message
138
139 # TODO: Raise exception if field_name already in fields
140 fields[field_name] = field
141
142 return list(fields.values())
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/types/type_resolver.py b/strawberry/types/type_resolver.py
--- a/strawberry/types/type_resolver.py
+++ b/strawberry/types/type_resolver.py
@@ -103,7 +103,7 @@
field.origin = field.origin or cls
# Make sure types are StrawberryAnnotations
- if not isinstance(field.type, StrawberryAnnotation):
+ if not isinstance(field.type_annotation, StrawberryAnnotation):
module = sys.modules[field.origin.__module__]
field.type_annotation = StrawberryAnnotation(
annotation=field.type_annotation, namespace=module.__dict__
| {"golden_diff": "diff --git a/strawberry/types/type_resolver.py b/strawberry/types/type_resolver.py\n--- a/strawberry/types/type_resolver.py\n+++ b/strawberry/types/type_resolver.py\n@@ -103,7 +103,7 @@\n field.origin = field.origin or cls\n \n # Make sure types are StrawberryAnnotations\n- if not isinstance(field.type, StrawberryAnnotation):\n+ if not isinstance(field.type_annotation, StrawberryAnnotation):\n module = sys.modules[field.origin.__module__]\n field.type_annotation = StrawberryAnnotation(\n annotation=field.type_annotation, namespace=module.__dict__\n", "issue": "Type refactoring has a regression with inheritance and explicit fields.\n```python\r\[email protected]\r\nclass A:\r\n a: str = strawberry.field(default='', desc='')\r\n\r\[email protected]\r\nclass B(A):\r\n b: Optional[str] = strawberry.field(default=None, desc='')\r\n\r\[email protected]\r\nclass Query:\r\n @strawberry.field\r\n def field(self, arg: B) -> str:\r\n return ''\r\n\r\nschema = strawberry.Schema(query=Query)\r\nresult = schema.execute_sync('{ field(arg: {}) }')\r\nassert not result.errors\r\n```\r\nraises `TypeError: B fields cannot be resolved. unhashable type: 'StrawberryAnnotation'`.\r\n\r\n`StrawberryAnnotation` has a custom `__eq__` without a `__hash__`, causing a set lookup to fail. However adding a suitable `__hash__` just lead to the next `TypeError`.\r\n\r\n`StrawberryOptional` likely has the same problem.\r\n\n", "before_files": [{"content": "import dataclasses\nimport sys\nfrom typing import Dict, List, Type\n\nfrom strawberry.annotation import StrawberryAnnotation\nfrom strawberry.exceptions import (\n FieldWithResolverAndDefaultFactoryError,\n FieldWithResolverAndDefaultValueError,\n PrivateStrawberryFieldError,\n)\nfrom strawberry.field import StrawberryField\nfrom strawberry.private import Private\nfrom strawberry.utils.str_converters import to_camel_case\n\nfrom ..arguments import UNSET\n\n\ndef _get_fields(cls: Type) -> List[StrawberryField]:\n \"\"\"Get all the strawberry fields off a strawberry.type cls\n\n This function returns a list of StrawberryFields (one for each field item), while\n also paying attention the name and typing of the field.\n\n StrawberryFields can be defined on a strawberry.type class as either a dataclass-\n style field or using strawberry.field as a decorator.\n\n >>> import strawberry\n >>> @strawberry.type\n ... class Query:\n ... type_1a: int = 5\n ... type_1b: int = strawberry.field(...)\n ... type_1c: int = strawberry.field(resolver=...)\n ...\n ... @strawberry.field\n ... def type_2(self) -> int:\n ... ...\n\n Type #1:\n A pure dataclass-style field. Will not have a StrawberryField; one will need to\n be created in this function. Type annotation is required.\n\n Type #2:\n A field defined using @strawberry.field as a decorator around the resolver. The\n resolver must be type-annotated.\n\n The StrawberryField.python_name value will be assigned to the field's name on the\n class if one is not set by either using an explicit strawberry.field(name=...) or by\n passing a named function (i.e. not an anonymous lambda) to strawberry.field\n (typically as a decorator).\n \"\"\"\n # Deferred import to avoid import cycles\n from strawberry.field import StrawberryField\n\n fields: Dict[str, StrawberryField] = {}\n\n # before trying to find any fields, let's first add the fields defined in\n # parent classes, we do this by checking if parents have a type definition\n for base in cls.__bases__:\n if hasattr(base, \"_type_definition\"):\n base_fields = {\n field.graphql_name: field\n # TODO: we need to rename _fields to something else\n for field in base._type_definition._fields # type: ignore\n }\n\n # Add base's fields to cls' fields\n fields = {**fields, **base_fields}\n\n # then we can proceed with finding the fields for the current class\n for field in dataclasses.fields(cls):\n\n if isinstance(field, StrawberryField):\n # Check that the field type is not Private\n if isinstance(field.type, Private):\n raise PrivateStrawberryFieldError(field.python_name, cls.__name__)\n\n # Check that default is not set if a resolver is defined\n if field.default != dataclasses.MISSING and field.base_resolver is not None:\n raise FieldWithResolverAndDefaultValueError(\n field.python_name, cls.__name__\n )\n\n # Check that default_factory is not set if a resolver is defined\n # Note: using getattr because of this issue:\n # https://github.com/python/mypy/issues/6910\n if (\n getattr(field, \"default_factory\") != dataclasses.MISSING # noqa\n and field.base_resolver is not None\n ):\n raise FieldWithResolverAndDefaultFactoryError(\n field.python_name, cls.__name__\n )\n\n # we make sure that the origin is either the field's resolver when\n # called as:\n #\n # >>> @strawberry.field\n # ... def x(self): ...\n #\n # or the class where this field was defined, so we always have\n # the correct origin for determining field types when resolving\n # the types.\n field.origin = field.origin or cls\n\n # Make sure types are StrawberryAnnotations\n if not isinstance(field.type, StrawberryAnnotation):\n module = sys.modules[field.origin.__module__]\n field.type_annotation = StrawberryAnnotation(\n annotation=field.type_annotation, namespace=module.__dict__\n )\n\n # Create a StrawberryField for fields that didn't use strawberry.field\n else:\n # Only ignore Private fields that weren't defined using StrawberryFields\n if isinstance(field.type, Private):\n continue\n\n field_type = field.type\n\n module = sys.modules[cls.__module__]\n\n # Create a StrawberryField, for fields of Types #1 and #2a\n field = StrawberryField(\n python_name=field.name,\n graphql_name=to_camel_case(field.name),\n type_annotation=StrawberryAnnotation(\n annotation=field_type,\n namespace=module.__dict__,\n ),\n origin=cls,\n default=getattr(cls, field.name, UNSET),\n )\n\n field_name = field.graphql_name\n\n assert_message = \"Field must have a name by the time the schema is generated\"\n assert field_name is not None, assert_message\n\n # TODO: Raise exception if field_name already in fields\n fields[field_name] = field\n\n return list(fields.values())\n", "path": "strawberry/types/type_resolver.py"}], "after_files": [{"content": "import dataclasses\nimport sys\nfrom typing import Dict, List, Type\n\nfrom strawberry.annotation import StrawberryAnnotation\nfrom strawberry.exceptions import (\n FieldWithResolverAndDefaultFactoryError,\n FieldWithResolverAndDefaultValueError,\n PrivateStrawberryFieldError,\n)\nfrom strawberry.field import StrawberryField\nfrom strawberry.private import Private\nfrom strawberry.utils.str_converters import to_camel_case\n\nfrom ..arguments import UNSET\n\n\ndef _get_fields(cls: Type) -> List[StrawberryField]:\n \"\"\"Get all the strawberry fields off a strawberry.type cls\n\n This function returns a list of StrawberryFields (one for each field item), while\n also paying attention the name and typing of the field.\n\n StrawberryFields can be defined on a strawberry.type class as either a dataclass-\n style field or using strawberry.field as a decorator.\n\n >>> import strawberry\n >>> @strawberry.type\n ... class Query:\n ... type_1a: int = 5\n ... type_1b: int = strawberry.field(...)\n ... type_1c: int = strawberry.field(resolver=...)\n ...\n ... @strawberry.field\n ... def type_2(self) -> int:\n ... ...\n\n Type #1:\n A pure dataclass-style field. Will not have a StrawberryField; one will need to\n be created in this function. Type annotation is required.\n\n Type #2:\n A field defined using @strawberry.field as a decorator around the resolver. The\n resolver must be type-annotated.\n\n The StrawberryField.python_name value will be assigned to the field's name on the\n class if one is not set by either using an explicit strawberry.field(name=...) or by\n passing a named function (i.e. not an anonymous lambda) to strawberry.field\n (typically as a decorator).\n \"\"\"\n # Deferred import to avoid import cycles\n from strawberry.field import StrawberryField\n\n fields: Dict[str, StrawberryField] = {}\n\n # before trying to find any fields, let's first add the fields defined in\n # parent classes, we do this by checking if parents have a type definition\n for base in cls.__bases__:\n if hasattr(base, \"_type_definition\"):\n base_fields = {\n field.graphql_name: field\n # TODO: we need to rename _fields to something else\n for field in base._type_definition._fields # type: ignore\n }\n\n # Add base's fields to cls' fields\n fields = {**fields, **base_fields}\n\n # then we can proceed with finding the fields for the current class\n for field in dataclasses.fields(cls):\n\n if isinstance(field, StrawberryField):\n # Check that the field type is not Private\n if isinstance(field.type, Private):\n raise PrivateStrawberryFieldError(field.python_name, cls.__name__)\n\n # Check that default is not set if a resolver is defined\n if field.default != dataclasses.MISSING and field.base_resolver is not None:\n raise FieldWithResolverAndDefaultValueError(\n field.python_name, cls.__name__\n )\n\n # Check that default_factory is not set if a resolver is defined\n # Note: using getattr because of this issue:\n # https://github.com/python/mypy/issues/6910\n if (\n getattr(field, \"default_factory\") != dataclasses.MISSING # noqa\n and field.base_resolver is not None\n ):\n raise FieldWithResolverAndDefaultFactoryError(\n field.python_name, cls.__name__\n )\n\n # we make sure that the origin is either the field's resolver when\n # called as:\n #\n # >>> @strawberry.field\n # ... def x(self): ...\n #\n # or the class where this field was defined, so we always have\n # the correct origin for determining field types when resolving\n # the types.\n field.origin = field.origin or cls\n\n # Make sure types are StrawberryAnnotations\n if not isinstance(field.type_annotation, StrawberryAnnotation):\n module = sys.modules[field.origin.__module__]\n field.type_annotation = StrawberryAnnotation(\n annotation=field.type_annotation, namespace=module.__dict__\n )\n\n # Create a StrawberryField for fields that didn't use strawberry.field\n else:\n # Only ignore Private fields that weren't defined using StrawberryFields\n if isinstance(field.type, Private):\n continue\n\n field_type = field.type\n\n module = sys.modules[cls.__module__]\n\n # Create a StrawberryField, for fields of Types #1 and #2a\n field = StrawberryField(\n python_name=field.name,\n graphql_name=to_camel_case(field.name),\n type_annotation=StrawberryAnnotation(\n annotation=field_type,\n namespace=module.__dict__,\n ),\n origin=cls,\n default=getattr(cls, field.name, UNSET),\n )\n\n field_name = field.graphql_name\n\n assert_message = \"Field must have a name by the time the schema is generated\"\n assert field_name is not None, assert_message\n\n # TODO: Raise exception if field_name already in fields\n fields[field_name] = field\n\n return list(fields.values())\n", "path": "strawberry/types/type_resolver.py"}]} | 1,917 | 131 |
gh_patches_debug_15599 | rasdani/github-patches | git_diff | getsentry__sentry-22143 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sentry ratelimit cannot be changed when using self-hosted
## Important Details
How are you running Sentry?
<!-- Please pick one of the following -->
On-Premise wo/ Docker, version 20.8.0
## Description
Sentry ratelimit cannot be changed when running on premises
## Steps to Reproduce
1. Go to web-interface, Admin/Settings
2. Set a non-zero ratelimit.
3. Get an error.
````
Oct 6 07:18:49 jsentry sentry[4128]: 10.100.33.5 - - [06/Oct/2020:04:18:49 +0000] "GET /api/0/internal/options/ HTTP/1.1" 200 20407 "https://sentry.findmykids.org/manage/settings/" "Mozilla/5.0 (X11; FreeBSD amd64; rv:76.0) Gecko/20100101 Firefox/76.0"
Oct 6 07:19:09 jsentry sentry[4128]: Traceback (most recent call last):
Oct 6 07:19:09 jsentry sentry[4128]: File "/usr/local/lib/python2.7/site-packages/sentry-20.8.0-py2.7.egg/sentry/api/base.py", line 134, in handle_exception
Oct 6 07:19:09 jsentry sentry[4128]: response = super(Endpoint, self).handle_exception(exc)
Oct 6 07:19:09 jsentry sentry[4128]: File "/usr/local/lib/python2.7/site-packages/djangorestframework-3.6.4-py2.7.egg/rest_framework/views.py", line 449, in handle_exception
Oct 6 07:19:09 jsentry sentry[4128]: self.raise_uncaught_exception(exc)
Oct 6 07:19:09 jsentry sentry[4128]: File "/usr/local/lib/python2.7/site-packages/sentry-20.8.0-py2.7.egg/sentry/api/base.py", line 247, in dispatch
Oct 6 07:19:09 jsentry sentry[4128]: response = handler(request, *args, **kwargs)
Oct 6 07:19:09 jsentry sentry[4128]: File "/usr/local/lib/python2.7/site-packages/sentry-20.8.0-py2.7.egg/sentry/api/endpoints/system_options.py", line 74, in put
Oct 6 07:19:09 jsentry sentry[4128]: options.set(k, v)
Oct 6 07:19:09 jsentry sentry[4128]: File "/usr/local/lib/python2.7/site-packages/sentry-20.8.0-py2.7.egg/sentry/options/manager.py", line 83, in set
Oct 6 07:19:09 jsentry sentry[4128]: "%r cannot be changed at runtime because it is configured on disk" % key
Oct 6 07:19:09 jsentry sentry[4128]: AssertionError: u'system.url-prefix' cannot be changed at runtime because it is configured on disk
Oct 6 07:19:09 jsentry sentry[4128]: 10.100.33.5 - - [06/Oct/2020:04:19:09 +0000] "PUT /api/0/internal/options/ HTTP/1.1" 500 746 "https://sentry.findmykids.org/manage/settings/" "Mozilla/5.0 (X11; FreeBSD amd64; rv:76.0) Gecko/20100101 Firefox/76.0"
````
### What you expected to happen
Ratelimit should be changeable from web-interface.
### Possible Solution
Stop sending system.url-prefix in PUT request ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/api/endpoints/system_options.py`
Content:
```
1 from __future__ import absolute_import
2
3 import six
4
5 import sentry
6
7 from django.conf import settings
8 from rest_framework.response import Response
9
10 from sentry import options
11 from sentry.api.base import Endpoint
12 from sentry.api.permissions import SuperuserPermission
13 from sentry.utils.email import is_smtp_enabled
14
15
16 class SystemOptionsEndpoint(Endpoint):
17 permission_classes = (SuperuserPermission,)
18
19 def get(self, request):
20 query = request.GET.get("query")
21 if query == "is:required":
22 option_list = options.filter(flag=options.FLAG_REQUIRED)
23 elif query:
24 return Response(u"{} is not a supported search query".format(query), status=400)
25 else:
26 option_list = options.all()
27
28 smtp_disabled = not is_smtp_enabled()
29
30 results = {}
31 for k in option_list:
32 disabled, disabled_reason = False, None
33
34 if smtp_disabled and k.name[:5] == "mail.":
35 disabled_reason, disabled = "smtpDisabled", True
36 elif bool(
37 k.flags & options.FLAG_PRIORITIZE_DISK and settings.SENTRY_OPTIONS.get(k.name)
38 ):
39 # TODO(mattrobenolt): Expose this as a property on Key.
40 disabled_reason, disabled = "diskPriority", True
41
42 # TODO(mattrobenolt): help, placeholder, title, type
43 results[k.name] = {
44 "value": options.get(k.name),
45 "field": {
46 "default": k.default(),
47 "required": bool(k.flags & options.FLAG_REQUIRED),
48 "disabled": disabled,
49 "disabledReason": disabled_reason,
50 "isSet": options.isset(k.name),
51 "allowEmpty": bool(k.flags & options.FLAG_ALLOW_EMPTY),
52 },
53 }
54
55 return Response(results)
56
57 def put(self, request):
58 # TODO(dcramer): this should validate options before saving them
59 for k, v in six.iteritems(request.data):
60 if v and isinstance(v, six.string_types):
61 v = v.strip()
62 try:
63 option = options.lookup_key(k)
64 except options.UnknownOption:
65 # TODO(dcramer): unify API errors
66 return Response(
67 {"error": "unknown_option", "errorDetail": {"option": k}}, status=400
68 )
69
70 try:
71 if not (option.flags & options.FLAG_ALLOW_EMPTY) and not v:
72 options.delete(k)
73 else:
74 options.set(k, v)
75 except TypeError as e:
76 return Response(
77 {
78 "error": "invalid_type",
79 "errorDetail": {"option": k, "message": six.text_type(e)},
80 },
81 status=400,
82 )
83 # TODO(dcramer): this has nothing to do with configuring options and
84 # should not be set here
85 options.set("sentry:version-configured", sentry.get_version())
86 return Response(status=200)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/api/endpoints/system_options.py b/src/sentry/api/endpoints/system_options.py
--- a/src/sentry/api/endpoints/system_options.py
+++ b/src/sentry/api/endpoints/system_options.py
@@ -72,10 +72,13 @@
options.delete(k)
else:
options.set(k, v)
- except TypeError as e:
+ except (TypeError, AssertionError) as e:
+ # TODO(chadwhitacre): Use a custom exception for the
+ # immutability case, especially since asserts disappear with
+ # `python -O`.
return Response(
{
- "error": "invalid_type",
+ "error": "invalid_type" if type(e) is TypeError else "immutable_option",
"errorDetail": {"option": k, "message": six.text_type(e)},
},
status=400,
| {"golden_diff": "diff --git a/src/sentry/api/endpoints/system_options.py b/src/sentry/api/endpoints/system_options.py\n--- a/src/sentry/api/endpoints/system_options.py\n+++ b/src/sentry/api/endpoints/system_options.py\n@@ -72,10 +72,13 @@\n options.delete(k)\n else:\n options.set(k, v)\n- except TypeError as e:\n+ except (TypeError, AssertionError) as e:\n+ # TODO(chadwhitacre): Use a custom exception for the\n+ # immutability case, especially since asserts disappear with\n+ # `python -O`.\n return Response(\n {\n- \"error\": \"invalid_type\",\n+ \"error\": \"invalid_type\" if type(e) is TypeError else \"immutable_option\",\n \"errorDetail\": {\"option\": k, \"message\": six.text_type(e)},\n },\n status=400,\n", "issue": "Sentry ratelimit cannot be changed when using self-hosted\n## Important Details\r\n\r\nHow are you running Sentry?\r\n\r\n<!-- Please pick one of the following -->\r\nOn-Premise wo/ Docker, version 20.8.0\r\n\r\n## Description\r\nSentry ratelimit cannot be changed when running on premises\r\n\r\n## Steps to Reproduce\r\n\r\n1. Go to web-interface, Admin/Settings\r\n2. Set a non-zero ratelimit.\r\n3. Get an error.\r\n\r\n````\r\nOct 6 07:18:49 jsentry sentry[4128]: 10.100.33.5 - - [06/Oct/2020:04:18:49 +0000] \"GET /api/0/internal/options/ HTTP/1.1\" 200 20407 \"https://sentry.findmykids.org/manage/settings/\" \"Mozilla/5.0 (X11; FreeBSD amd64; rv:76.0) Gecko/20100101 Firefox/76.0\"\r\nOct 6 07:19:09 jsentry sentry[4128]: Traceback (most recent call last):\r\nOct 6 07:19:09 jsentry sentry[4128]: File \"/usr/local/lib/python2.7/site-packages/sentry-20.8.0-py2.7.egg/sentry/api/base.py\", line 134, in handle_exception\r\nOct 6 07:19:09 jsentry sentry[4128]: response = super(Endpoint, self).handle_exception(exc)\r\nOct 6 07:19:09 jsentry sentry[4128]: File \"/usr/local/lib/python2.7/site-packages/djangorestframework-3.6.4-py2.7.egg/rest_framework/views.py\", line 449, in handle_exception\r\nOct 6 07:19:09 jsentry sentry[4128]: self.raise_uncaught_exception(exc)\r\nOct 6 07:19:09 jsentry sentry[4128]: File \"/usr/local/lib/python2.7/site-packages/sentry-20.8.0-py2.7.egg/sentry/api/base.py\", line 247, in dispatch\r\nOct 6 07:19:09 jsentry sentry[4128]: response = handler(request, *args, **kwargs)\r\nOct 6 07:19:09 jsentry sentry[4128]: File \"/usr/local/lib/python2.7/site-packages/sentry-20.8.0-py2.7.egg/sentry/api/endpoints/system_options.py\", line 74, in put\r\nOct 6 07:19:09 jsentry sentry[4128]: options.set(k, v)\r\nOct 6 07:19:09 jsentry sentry[4128]: File \"/usr/local/lib/python2.7/site-packages/sentry-20.8.0-py2.7.egg/sentry/options/manager.py\", line 83, in set\r\nOct 6 07:19:09 jsentry sentry[4128]: \"%r cannot be changed at runtime because it is configured on disk\" % key\r\nOct 6 07:19:09 jsentry sentry[4128]: AssertionError: u'system.url-prefix' cannot be changed at runtime because it is configured on disk\r\nOct 6 07:19:09 jsentry sentry[4128]: 10.100.33.5 - - [06/Oct/2020:04:19:09 +0000] \"PUT /api/0/internal/options/ HTTP/1.1\" 500 746 \"https://sentry.findmykids.org/manage/settings/\" \"Mozilla/5.0 (X11; FreeBSD amd64; rv:76.0) Gecko/20100101 Firefox/76.0\"\r\n````\r\n\r\n### What you expected to happen\r\nRatelimit should be changeable from web-interface.\r\n\r\n### Possible Solution\r\nStop sending system.url-prefix in PUT request ?\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport six\n\nimport sentry\n\nfrom django.conf import settings\nfrom rest_framework.response import Response\n\nfrom sentry import options\nfrom sentry.api.base import Endpoint\nfrom sentry.api.permissions import SuperuserPermission\nfrom sentry.utils.email import is_smtp_enabled\n\n\nclass SystemOptionsEndpoint(Endpoint):\n permission_classes = (SuperuserPermission,)\n\n def get(self, request):\n query = request.GET.get(\"query\")\n if query == \"is:required\":\n option_list = options.filter(flag=options.FLAG_REQUIRED)\n elif query:\n return Response(u\"{} is not a supported search query\".format(query), status=400)\n else:\n option_list = options.all()\n\n smtp_disabled = not is_smtp_enabled()\n\n results = {}\n for k in option_list:\n disabled, disabled_reason = False, None\n\n if smtp_disabled and k.name[:5] == \"mail.\":\n disabled_reason, disabled = \"smtpDisabled\", True\n elif bool(\n k.flags & options.FLAG_PRIORITIZE_DISK and settings.SENTRY_OPTIONS.get(k.name)\n ):\n # TODO(mattrobenolt): Expose this as a property on Key.\n disabled_reason, disabled = \"diskPriority\", True\n\n # TODO(mattrobenolt): help, placeholder, title, type\n results[k.name] = {\n \"value\": options.get(k.name),\n \"field\": {\n \"default\": k.default(),\n \"required\": bool(k.flags & options.FLAG_REQUIRED),\n \"disabled\": disabled,\n \"disabledReason\": disabled_reason,\n \"isSet\": options.isset(k.name),\n \"allowEmpty\": bool(k.flags & options.FLAG_ALLOW_EMPTY),\n },\n }\n\n return Response(results)\n\n def put(self, request):\n # TODO(dcramer): this should validate options before saving them\n for k, v in six.iteritems(request.data):\n if v and isinstance(v, six.string_types):\n v = v.strip()\n try:\n option = options.lookup_key(k)\n except options.UnknownOption:\n # TODO(dcramer): unify API errors\n return Response(\n {\"error\": \"unknown_option\", \"errorDetail\": {\"option\": k}}, status=400\n )\n\n try:\n if not (option.flags & options.FLAG_ALLOW_EMPTY) and not v:\n options.delete(k)\n else:\n options.set(k, v)\n except TypeError as e:\n return Response(\n {\n \"error\": \"invalid_type\",\n \"errorDetail\": {\"option\": k, \"message\": six.text_type(e)},\n },\n status=400,\n )\n # TODO(dcramer): this has nothing to do with configuring options and\n # should not be set here\n options.set(\"sentry:version-configured\", sentry.get_version())\n return Response(status=200)\n", "path": "src/sentry/api/endpoints/system_options.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport six\n\nimport sentry\n\nfrom django.conf import settings\nfrom rest_framework.response import Response\n\nfrom sentry import options\nfrom sentry.api.base import Endpoint\nfrom sentry.api.permissions import SuperuserPermission\nfrom sentry.utils.email import is_smtp_enabled\n\n\nclass SystemOptionsEndpoint(Endpoint):\n permission_classes = (SuperuserPermission,)\n\n def get(self, request):\n query = request.GET.get(\"query\")\n if query == \"is:required\":\n option_list = options.filter(flag=options.FLAG_REQUIRED)\n elif query:\n return Response(u\"{} is not a supported search query\".format(query), status=400)\n else:\n option_list = options.all()\n\n smtp_disabled = not is_smtp_enabled()\n\n results = {}\n for k in option_list:\n disabled, disabled_reason = False, None\n\n if smtp_disabled and k.name[:5] == \"mail.\":\n disabled_reason, disabled = \"smtpDisabled\", True\n elif bool(\n k.flags & options.FLAG_PRIORITIZE_DISK and settings.SENTRY_OPTIONS.get(k.name)\n ):\n # TODO(mattrobenolt): Expose this as a property on Key.\n disabled_reason, disabled = \"diskPriority\", True\n\n # TODO(mattrobenolt): help, placeholder, title, type\n results[k.name] = {\n \"value\": options.get(k.name),\n \"field\": {\n \"default\": k.default(),\n \"required\": bool(k.flags & options.FLAG_REQUIRED),\n \"disabled\": disabled,\n \"disabledReason\": disabled_reason,\n \"isSet\": options.isset(k.name),\n \"allowEmpty\": bool(k.flags & options.FLAG_ALLOW_EMPTY),\n },\n }\n\n return Response(results)\n\n def put(self, request):\n # TODO(dcramer): this should validate options before saving them\n for k, v in six.iteritems(request.data):\n if v and isinstance(v, six.string_types):\n v = v.strip()\n try:\n option = options.lookup_key(k)\n except options.UnknownOption:\n # TODO(dcramer): unify API errors\n return Response(\n {\"error\": \"unknown_option\", \"errorDetail\": {\"option\": k}}, status=400\n )\n\n try:\n if not (option.flags & options.FLAG_ALLOW_EMPTY) and not v:\n options.delete(k)\n else:\n options.set(k, v)\n except (TypeError, AssertionError) as e:\n # TODO(chadwhitacre): Use a custom exception for the\n # immutability case, especially since asserts disappear with\n # `python -O`.\n return Response(\n {\n \"error\": \"invalid_type\" if type(e) is TypeError else \"immutable_option\",\n \"errorDetail\": {\"option\": k, \"message\": six.text_type(e)},\n },\n status=400,\n )\n # TODO(dcramer): this has nothing to do with configuring options and\n # should not be set here\n options.set(\"sentry:version-configured\", sentry.get_version())\n return Response(status=200)\n", "path": "src/sentry/api/endpoints/system_options.py"}]} | 2,041 | 196 |
gh_patches_debug_9142 | rasdani/github-patches | git_diff | scrapy__scrapy-4008 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Request doesn't set callback to self.parse automatically when errback is set
Hi, I'm facing an `AssertionError` when set `errback` and not `callback` in `Request`, is that expected? :
```
...
def start_requests(self):
yield Request(
self.start_url,
# callback=self.parse,
errback=self.parse_error,
meta={
'dont_redirect': True,
},
)
...
```
trigger this :
```
2019-01-16 15:47:22 [scrapy.core.engine] ERROR: Error while obtaining start requests
Traceback (most recent call last):
File "/home/joaquin/Repos/example/env/lib/python3.6/site-packages/scrapy/core/engine.py", line 127, in _next_request
request = next(slot.start_requests)
File "/home/joaquin/Repos/example/example/spiders/br/rj/example.py", line 128, in start_requests
'dont_redirect': True,
File "/home/joaquin/Repos/example/env/lib/python3.6/site-packages/scrapy/http/request/__init__.py", line 34, in __init__
assert callback or not errback, "Cannot use errback without a callback"
AssertionError: Cannot use errback without a callback
2019-01-16 15:47:22 [scrapy.core.engine] INFO: Closing spider (finished)
2019-01-16 15:47:22 [scrapy.core.engine] INFO: Spider closed (finished)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/http/request/__init__.py`
Content:
```
1 """
2 This module implements the Request class which is used to represent HTTP
3 requests in Scrapy.
4
5 See documentation in docs/topics/request-response.rst
6 """
7 import six
8 from w3lib.url import safe_url_string
9
10 from scrapy.http.headers import Headers
11 from scrapy.utils.python import to_bytes
12 from scrapy.utils.trackref import object_ref
13 from scrapy.utils.url import escape_ajax
14 from scrapy.http.common import obsolete_setter
15 from scrapy.utils.curl import curl_to_request_kwargs
16
17
18 class Request(object_ref):
19
20 def __init__(self, url, callback=None, method='GET', headers=None, body=None,
21 cookies=None, meta=None, encoding='utf-8', priority=0,
22 dont_filter=False, errback=None, flags=None, cb_kwargs=None):
23
24 self._encoding = encoding # this one has to be set first
25 self.method = str(method).upper()
26 self._set_url(url)
27 self._set_body(body)
28 assert isinstance(priority, int), "Request priority not an integer: %r" % priority
29 self.priority = priority
30
31 if callback is not None and not callable(callback):
32 raise TypeError('callback must be a callable, got %s' % type(callback).__name__)
33 if errback is not None and not callable(errback):
34 raise TypeError('errback must be a callable, got %s' % type(errback).__name__)
35 assert callback or not errback, "Cannot use errback without a callback"
36 self.callback = callback
37 self.errback = errback
38
39 self.cookies = cookies or {}
40 self.headers = Headers(headers or {}, encoding=encoding)
41 self.dont_filter = dont_filter
42
43 self._meta = dict(meta) if meta else None
44 self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None
45 self.flags = [] if flags is None else list(flags)
46
47 @property
48 def cb_kwargs(self):
49 if self._cb_kwargs is None:
50 self._cb_kwargs = {}
51 return self._cb_kwargs
52
53 @property
54 def meta(self):
55 if self._meta is None:
56 self._meta = {}
57 return self._meta
58
59 def _get_url(self):
60 return self._url
61
62 def _set_url(self, url):
63 if not isinstance(url, six.string_types):
64 raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__)
65
66 s = safe_url_string(url, self.encoding)
67 self._url = escape_ajax(s)
68
69 if ('://' not in self._url) and (not self._url.startswith('data:')):
70 raise ValueError('Missing scheme in request url: %s' % self._url)
71
72 url = property(_get_url, obsolete_setter(_set_url, 'url'))
73
74 def _get_body(self):
75 return self._body
76
77 def _set_body(self, body):
78 if body is None:
79 self._body = b''
80 else:
81 self._body = to_bytes(body, self.encoding)
82
83 body = property(_get_body, obsolete_setter(_set_body, 'body'))
84
85 @property
86 def encoding(self):
87 return self._encoding
88
89 def __str__(self):
90 return "<%s %s>" % (self.method, self.url)
91
92 __repr__ = __str__
93
94 def copy(self):
95 """Return a copy of this Request"""
96 return self.replace()
97
98 def replace(self, *args, **kwargs):
99 """Create a new Request with the same attributes except for those
100 given new values.
101 """
102 for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags',
103 'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']:
104 kwargs.setdefault(x, getattr(self, x))
105 cls = kwargs.pop('cls', self.__class__)
106 return cls(*args, **kwargs)
107
108 @classmethod
109 def from_curl(cls, curl_command, ignore_unknown_options=True, **kwargs):
110 """Create a Request object from a string containing a `cURL
111 <https://curl.haxx.se/>`_ command. It populates the HTTP method, the
112 URL, the headers, the cookies and the body. It accepts the same
113 arguments as the :class:`Request` class, taking preference and
114 overriding the values of the same arguments contained in the cURL
115 command.
116
117 Unrecognized options are ignored by default. To raise an error when
118 finding unknown options call this method by passing
119 ``ignore_unknown_options=False``.
120
121 .. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request`
122 subclasses, such as :class:`~scrapy.http.JSONRequest`, or
123 :class:`~scrapy.http.XmlRpcRequest`, as well as having
124 :ref:`downloader middlewares <topics-downloader-middleware>`
125 and
126 :ref:`spider middlewares <topics-spider-middleware>`
127 enabled, such as
128 :class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`,
129 :class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`,
130 or
131 :class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`,
132 may modify the :class:`~scrapy.http.Request` object.
133
134 """
135 request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options)
136 request_kwargs.update(kwargs)
137 return cls(**request_kwargs)
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/http/request/__init__.py b/scrapy/http/request/__init__.py
--- a/scrapy/http/request/__init__.py
+++ b/scrapy/http/request/__init__.py
@@ -32,7 +32,6 @@
raise TypeError('callback must be a callable, got %s' % type(callback).__name__)
if errback is not None and not callable(errback):
raise TypeError('errback must be a callable, got %s' % type(errback).__name__)
- assert callback or not errback, "Cannot use errback without a callback"
self.callback = callback
self.errback = errback
| {"golden_diff": "diff --git a/scrapy/http/request/__init__.py b/scrapy/http/request/__init__.py\n--- a/scrapy/http/request/__init__.py\n+++ b/scrapy/http/request/__init__.py\n@@ -32,7 +32,6 @@\n raise TypeError('callback must be a callable, got %s' % type(callback).__name__)\n if errback is not None and not callable(errback):\n raise TypeError('errback must be a callable, got %s' % type(errback).__name__)\n- assert callback or not errback, \"Cannot use errback without a callback\"\n self.callback = callback\n self.errback = errback\n", "issue": "Request doesn't set callback to self.parse automatically when errback is set\nHi, I'm facing an `AssertionError` when set `errback` and not `callback` in `Request`, is that expected? :\r\n\r\n```\r\n...\r\n def start_requests(self):\r\n yield Request(\r\n self.start_url,\r\n # callback=self.parse,\r\n errback=self.parse_error,\r\n meta={\r\n 'dont_redirect': True,\r\n },\r\n )\r\n...\r\n```\r\ntrigger this : \r\n```\r\n2019-01-16 15:47:22 [scrapy.core.engine] ERROR: Error while obtaining start requests\r\nTraceback (most recent call last):\r\n File \"/home/joaquin/Repos/example/env/lib/python3.6/site-packages/scrapy/core/engine.py\", line 127, in _next_request\r\n request = next(slot.start_requests)\r\n File \"/home/joaquin/Repos/example/example/spiders/br/rj/example.py\", line 128, in start_requests\r\n 'dont_redirect': True,\r\n File \"/home/joaquin/Repos/example/env/lib/python3.6/site-packages/scrapy/http/request/__init__.py\", line 34, in __init__\r\n assert callback or not errback, \"Cannot use errback without a callback\"\r\nAssertionError: Cannot use errback without a callback\r\n2019-01-16 15:47:22 [scrapy.core.engine] INFO: Closing spider (finished)\r\n2019-01-16 15:47:22 [scrapy.core.engine] INFO: Spider closed (finished)\r\n```\n", "before_files": [{"content": "\"\"\"\nThis module implements the Request class which is used to represent HTTP\nrequests in Scrapy.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\nimport six\nfrom w3lib.url import safe_url_string\n\nfrom scrapy.http.headers import Headers\nfrom scrapy.utils.python import to_bytes\nfrom scrapy.utils.trackref import object_ref\nfrom scrapy.utils.url import escape_ajax\nfrom scrapy.http.common import obsolete_setter\nfrom scrapy.utils.curl import curl_to_request_kwargs\n\n\nclass Request(object_ref):\n\n def __init__(self, url, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding='utf-8', priority=0,\n dont_filter=False, errback=None, flags=None, cb_kwargs=None):\n\n self._encoding = encoding # this one has to be set first\n self.method = str(method).upper()\n self._set_url(url)\n self._set_body(body)\n assert isinstance(priority, int), \"Request priority not an integer: %r\" % priority\n self.priority = priority\n\n if callback is not None and not callable(callback):\n raise TypeError('callback must be a callable, got %s' % type(callback).__name__)\n if errback is not None and not callable(errback):\n raise TypeError('errback must be a callable, got %s' % type(errback).__name__)\n assert callback or not errback, \"Cannot use errback without a callback\"\n self.callback = callback\n self.errback = errback\n\n self.cookies = cookies or {}\n self.headers = Headers(headers or {}, encoding=encoding)\n self.dont_filter = dont_filter\n\n self._meta = dict(meta) if meta else None\n self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None\n self.flags = [] if flags is None else list(flags)\n\n @property\n def cb_kwargs(self):\n if self._cb_kwargs is None:\n self._cb_kwargs = {}\n return self._cb_kwargs\n\n @property\n def meta(self):\n if self._meta is None:\n self._meta = {}\n return self._meta\n\n def _get_url(self):\n return self._url\n\n def _set_url(self, url):\n if not isinstance(url, six.string_types):\n raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__)\n\n s = safe_url_string(url, self.encoding)\n self._url = escape_ajax(s)\n\n if ('://' not in self._url) and (not self._url.startswith('data:')):\n raise ValueError('Missing scheme in request url: %s' % self._url)\n\n url = property(_get_url, obsolete_setter(_set_url, 'url'))\n\n def _get_body(self):\n return self._body\n\n def _set_body(self, body):\n if body is None:\n self._body = b''\n else:\n self._body = to_bytes(body, self.encoding)\n\n body = property(_get_body, obsolete_setter(_set_body, 'body'))\n\n @property\n def encoding(self):\n return self._encoding\n\n def __str__(self):\n return \"<%s %s>\" % (self.method, self.url)\n\n __repr__ = __str__\n\n def copy(self):\n \"\"\"Return a copy of this Request\"\"\"\n return self.replace()\n\n def replace(self, *args, **kwargs):\n \"\"\"Create a new Request with the same attributes except for those\n given new values.\n \"\"\"\n for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags',\n 'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']:\n kwargs.setdefault(x, getattr(self, x))\n cls = kwargs.pop('cls', self.__class__)\n return cls(*args, **kwargs)\n\n @classmethod\n def from_curl(cls, curl_command, ignore_unknown_options=True, **kwargs):\n \"\"\"Create a Request object from a string containing a `cURL\n <https://curl.haxx.se/>`_ command. It populates the HTTP method, the\n URL, the headers, the cookies and the body. It accepts the same\n arguments as the :class:`Request` class, taking preference and\n overriding the values of the same arguments contained in the cURL\n command.\n\n Unrecognized options are ignored by default. To raise an error when\n finding unknown options call this method by passing\n ``ignore_unknown_options=False``.\n\n .. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request`\n subclasses, such as :class:`~scrapy.http.JSONRequest`, or\n :class:`~scrapy.http.XmlRpcRequest`, as well as having\n :ref:`downloader middlewares <topics-downloader-middleware>`\n and\n :ref:`spider middlewares <topics-spider-middleware>`\n enabled, such as\n :class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`,\n :class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`,\n or\n :class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`,\n may modify the :class:`~scrapy.http.Request` object.\n\n \"\"\"\n request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options)\n request_kwargs.update(kwargs)\n return cls(**request_kwargs)\n", "path": "scrapy/http/request/__init__.py"}], "after_files": [{"content": "\"\"\"\nThis module implements the Request class which is used to represent HTTP\nrequests in Scrapy.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\nimport six\nfrom w3lib.url import safe_url_string\n\nfrom scrapy.http.headers import Headers\nfrom scrapy.utils.python import to_bytes\nfrom scrapy.utils.trackref import object_ref\nfrom scrapy.utils.url import escape_ajax\nfrom scrapy.http.common import obsolete_setter\nfrom scrapy.utils.curl import curl_to_request_kwargs\n\n\nclass Request(object_ref):\n\n def __init__(self, url, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding='utf-8', priority=0,\n dont_filter=False, errback=None, flags=None, cb_kwargs=None):\n\n self._encoding = encoding # this one has to be set first\n self.method = str(method).upper()\n self._set_url(url)\n self._set_body(body)\n assert isinstance(priority, int), \"Request priority not an integer: %r\" % priority\n self.priority = priority\n\n if callback is not None and not callable(callback):\n raise TypeError('callback must be a callable, got %s' % type(callback).__name__)\n if errback is not None and not callable(errback):\n raise TypeError('errback must be a callable, got %s' % type(errback).__name__)\n self.callback = callback\n self.errback = errback\n\n self.cookies = cookies or {}\n self.headers = Headers(headers or {}, encoding=encoding)\n self.dont_filter = dont_filter\n\n self._meta = dict(meta) if meta else None\n self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None\n self.flags = [] if flags is None else list(flags)\n\n @property\n def cb_kwargs(self):\n if self._cb_kwargs is None:\n self._cb_kwargs = {}\n return self._cb_kwargs\n\n @property\n def meta(self):\n if self._meta is None:\n self._meta = {}\n return self._meta\n\n def _get_url(self):\n return self._url\n\n def _set_url(self, url):\n if not isinstance(url, six.string_types):\n raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__)\n\n s = safe_url_string(url, self.encoding)\n self._url = escape_ajax(s)\n\n if ('://' not in self._url) and (not self._url.startswith('data:')):\n raise ValueError('Missing scheme in request url: %s' % self._url)\n\n url = property(_get_url, obsolete_setter(_set_url, 'url'))\n\n def _get_body(self):\n return self._body\n\n def _set_body(self, body):\n if body is None:\n self._body = b''\n else:\n self._body = to_bytes(body, self.encoding)\n\n body = property(_get_body, obsolete_setter(_set_body, 'body'))\n\n @property\n def encoding(self):\n return self._encoding\n\n def __str__(self):\n return \"<%s %s>\" % (self.method, self.url)\n\n __repr__ = __str__\n\n def copy(self):\n \"\"\"Return a copy of this Request\"\"\"\n return self.replace()\n\n def replace(self, *args, **kwargs):\n \"\"\"Create a new Request with the same attributes except for those\n given new values.\n \"\"\"\n for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags',\n 'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']:\n kwargs.setdefault(x, getattr(self, x))\n cls = kwargs.pop('cls', self.__class__)\n return cls(*args, **kwargs)\n\n @classmethod\n def from_curl(cls, curl_command, ignore_unknown_options=True, **kwargs):\n \"\"\"Create a Request object from a string containing a `cURL\n <https://curl.haxx.se/>`_ command. It populates the HTTP method, the\n URL, the headers, the cookies and the body. It accepts the same\n arguments as the :class:`Request` class, taking preference and\n overriding the values of the same arguments contained in the cURL\n command.\n\n Unrecognized options are ignored by default. To raise an error when\n finding unknown options call this method by passing\n ``ignore_unknown_options=False``.\n\n .. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request`\n subclasses, such as :class:`~scrapy.http.JSONRequest`, or\n :class:`~scrapy.http.XmlRpcRequest`, as well as having\n :ref:`downloader middlewares <topics-downloader-middleware>`\n and\n :ref:`spider middlewares <topics-spider-middleware>`\n enabled, such as\n :class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`,\n :class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`,\n or\n :class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`,\n may modify the :class:`~scrapy.http.Request` object.\n\n \"\"\"\n request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options)\n request_kwargs.update(kwargs)\n return cls(**request_kwargs)\n", "path": "scrapy/http/request/__init__.py"}]} | 2,103 | 143 |
gh_patches_debug_3815 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-2381 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation site code uses incorrect class from ddtrace/contrib/wsgi/wsgi.py
### Which version of dd-trace-py are you using?
0.48.1
### What libraries am I using:
Python: 3.8
### How can we reproduce your problem?
1. Copy the example code from https://ddtrace.readthedocs.io/en/stable/integrations.html#wsgi
2. Example code cites `DDTraceMiddleware` class
2. Run any wsgi application with said code
### What is the result that you get?
```
ImportError: cannot import name 'DDTraceMiddleware' from 'ddtrace.contrib.wsgi' (/usr/local/lib/python3.8/site-packages/ddtrace/contrib/wsgi/__init__.py)
```
### What is the result that you expected?
Example code in readthedocs site should properly cite the import for the `DDWSGIMiddleware` class: https://github.com/DataDog/dd-trace-py/blob/master/ddtrace/contrib/wsgi/wsgi.py#L83
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/wsgi/__init__.py`
Content:
```
1 """
2 The Datadog WSGI middleware traces all WSGI requests.
3
4
5 Usage
6 ~~~~~
7
8 The middleware can be used manually via the following command::
9
10
11 from ddtrace.contrib.wsgi import DDTraceMiddleware
12
13 # application is a WSGI application
14 application = DDTraceMiddleware(application)
15
16
17 Global Configuration
18 ~~~~~~~~~~~~~~~~~~~~
19
20 .. py:data:: ddtrace.config.wsgi["service"]
21
22 The service name reported for the WSGI application.
23
24 This option can also be set with the ``DD_SERVICE`` environment
25 variable.
26
27 Default: ``"wsgi"``
28
29 .. py:data:: ddtrace.config.wsgi["distributed_tracing"]
30
31 Configuration that allows distributed tracing to be enabled.
32
33 Default: ``True``
34
35
36 :ref:`All HTTP tags <http-tagging>` are supported for this integration.
37
38 """
39 from .wsgi import DDWSGIMiddleware
40
41
42 __all__ = [
43 "DDWSGIMiddleware",
44 ]
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/contrib/wsgi/__init__.py b/ddtrace/contrib/wsgi/__init__.py
--- a/ddtrace/contrib/wsgi/__init__.py
+++ b/ddtrace/contrib/wsgi/__init__.py
@@ -8,10 +8,10 @@
The middleware can be used manually via the following command::
- from ddtrace.contrib.wsgi import DDTraceMiddleware
+ from ddtrace.contrib.wsgi import DDWSGIMiddleware
# application is a WSGI application
- application = DDTraceMiddleware(application)
+ application = DDWSGIMiddleware(application)
Global Configuration
| {"golden_diff": "diff --git a/ddtrace/contrib/wsgi/__init__.py b/ddtrace/contrib/wsgi/__init__.py\n--- a/ddtrace/contrib/wsgi/__init__.py\n+++ b/ddtrace/contrib/wsgi/__init__.py\n@@ -8,10 +8,10 @@\n The middleware can be used manually via the following command::\n \n \n- from ddtrace.contrib.wsgi import DDTraceMiddleware\n+ from ddtrace.contrib.wsgi import DDWSGIMiddleware\n \n # application is a WSGI application\n- application = DDTraceMiddleware(application)\n+ application = DDWSGIMiddleware(application)\n \n \n Global Configuration\n", "issue": "Documentation site code uses incorrect class from ddtrace/contrib/wsgi/wsgi.py\n### Which version of dd-trace-py are you using?\r\n0.48.1\r\n\r\n### What libraries am I using:\r\nPython: 3.8\r\n\r\n### How can we reproduce your problem?\r\n1. Copy the example code from https://ddtrace.readthedocs.io/en/stable/integrations.html#wsgi\r\n2. Example code cites `DDTraceMiddleware` class\r\n2. Run any wsgi application with said code\r\n\r\n### What is the result that you get?\r\n```\r\nImportError: cannot import name 'DDTraceMiddleware' from 'ddtrace.contrib.wsgi' (/usr/local/lib/python3.8/site-packages/ddtrace/contrib/wsgi/__init__.py)\r\n```\r\n\r\n### What is the result that you expected?\r\nExample code in readthedocs site should properly cite the import for the `DDWSGIMiddleware` class: https://github.com/DataDog/dd-trace-py/blob/master/ddtrace/contrib/wsgi/wsgi.py#L83\r\n\n", "before_files": [{"content": "\"\"\"\nThe Datadog WSGI middleware traces all WSGI requests.\n\n\nUsage\n~~~~~\n\nThe middleware can be used manually via the following command::\n\n\n from ddtrace.contrib.wsgi import DDTraceMiddleware\n\n # application is a WSGI application\n application = DDTraceMiddleware(application)\n\n\nGlobal Configuration\n~~~~~~~~~~~~~~~~~~~~\n\n.. py:data:: ddtrace.config.wsgi[\"service\"]\n\n The service name reported for the WSGI application.\n\n This option can also be set with the ``DD_SERVICE`` environment\n variable.\n\n Default: ``\"wsgi\"``\n\n.. py:data:: ddtrace.config.wsgi[\"distributed_tracing\"]\n\n Configuration that allows distributed tracing to be enabled.\n\n Default: ``True``\n\n\n:ref:`All HTTP tags <http-tagging>` are supported for this integration.\n\n\"\"\"\nfrom .wsgi import DDWSGIMiddleware\n\n\n__all__ = [\n \"DDWSGIMiddleware\",\n]\n", "path": "ddtrace/contrib/wsgi/__init__.py"}], "after_files": [{"content": "\"\"\"\nThe Datadog WSGI middleware traces all WSGI requests.\n\n\nUsage\n~~~~~\n\nThe middleware can be used manually via the following command::\n\n\n from ddtrace.contrib.wsgi import DDWSGIMiddleware\n\n # application is a WSGI application\n application = DDWSGIMiddleware(application)\n\n\nGlobal Configuration\n~~~~~~~~~~~~~~~~~~~~\n\n.. py:data:: ddtrace.config.wsgi[\"service\"]\n\n The service name reported for the WSGI application.\n\n This option can also be set with the ``DD_SERVICE`` environment\n variable.\n\n Default: ``\"wsgi\"``\n\n.. py:data:: ddtrace.config.wsgi[\"distributed_tracing\"]\n\n Configuration that allows distributed tracing to be enabled.\n\n Default: ``True``\n\n\n:ref:`All HTTP tags <http-tagging>` are supported for this integration.\n\n\"\"\"\nfrom .wsgi import DDWSGIMiddleware\n\n\n__all__ = [\n \"DDWSGIMiddleware\",\n]\n", "path": "ddtrace/contrib/wsgi/__init__.py"}]} | 778 | 138 |
gh_patches_debug_7648 | rasdani/github-patches | git_diff | fidals__shopelectro-616 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Long serv response
Every page responses with timeout of ~3s.
Sometimes category and product pages return 502 error.
Checked this pages: Main, Category, Product
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/models.py`
Content:
```
1 import random
2 import string
3 import typing
4 from uuid import uuid4
5
6 from django.conf import settings
7 from django.db import models
8 from django.urls import reverse
9 from django.utils.translation import ugettext_lazy as _
10
11 from catalog import models as catalog_models
12 from ecommerce import models as ecommerce_models
13 from pages import models as pages_models
14
15
16 def randomize_slug(slug: str) -> str:
17 slug_hash = ''.join(
18 random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)
19 )
20 return f'{slug}_{slug_hash}'
21
22
23 class SECategoryQuerySet(catalog_models.CategoryQuerySet):
24 def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':
25 categories_with_pictures = (
26 self
27 .filter(products__page__images__isnull=False)
28 .distinct()
29 )
30
31 return categories_with_pictures.get_ancestors(include_self=True)
32
33
34 class SECategoryManager(
35 catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)
36 ):
37 pass
38
39
40 class Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):
41
42 objects = SECategoryManager()
43 # pages.models.Page.objects_ field. It has the same problem.
44 objects_ = SECategoryManager()
45 uuid = models.UUIDField(default=uuid4, editable=False)
46
47 @classmethod
48 def get_default_parent(cls):
49 return pages_models.CustomPage.objects.filter(slug='catalog').first()
50
51 @property
52 def image(self):
53 products = self.products.all()
54 return products[0].image if products else None
55
56 def get_absolute_url(self):
57 return reverse('category', args=(self.page.slug,))
58
59
60 class Product(catalog_models.AbstractProduct, pages_models.SyncPageMixin):
61
62 # That's why we are needed to explicitly add objects manager here
63 # because of Django special managers behaviour.
64 # Se se#480 for details.
65 objects = catalog_models.ProductManager()
66
67 category = models.ForeignKey(
68 Category,
69 on_delete=models.CASCADE,
70 null=True,
71 related_name='products',
72 verbose_name=_('category'),
73 )
74
75 tags = models.ManyToManyField(
76 'Tag',
77 related_name='products',
78 blank=True,
79 verbose_name=_('tags'),
80 )
81
82 vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))
83 uuid = models.UUIDField(default=uuid4, editable=False)
84 purchase_price = models.FloatField(
85 default=0, verbose_name=_('purchase_price'))
86 wholesale_small = models.FloatField(
87 default=0, verbose_name=_('wholesale_small'))
88 wholesale_medium = models.FloatField(
89 default=0, verbose_name=_('wholesale_medium'))
90 wholesale_large = models.FloatField(
91 default=0, verbose_name=_('wholesale_large'))
92
93 def get_absolute_url(self):
94 return reverse('product', args=(self.vendor_code,))
95
96 @property
97 def average_rate(self):
98 """Return rounded to first decimal averaged rating."""
99 rating = self.product_feedbacks.aggregate(
100 avg=models.Avg('rating')).get('avg', 0)
101 return round(rating, 1)
102
103 @property
104 def feedback_count(self):
105 return self.product_feedbacks.count()
106
107 @property
108 def feedback(self):
109 return self.product_feedbacks.all().order_by('-date')
110
111 def get_params(self):
112 return Tag.objects.filter_by_products([self]).get_group_tags_pairs()
113
114 def get_brand_name(self) -> str:
115 brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)
116 return brand.name if brand else ''
117
118
119 class ProductFeedback(models.Model):
120 product = models.ForeignKey(
121 Product, on_delete=models.CASCADE, null=True,
122 related_name='product_feedbacks'
123 )
124
125 date = models.DateTimeField(
126 auto_now=True, db_index=True, verbose_name=_('date'))
127 name = models.CharField(
128 max_length=255, db_index=True, verbose_name=_('name'))
129 rating = models.PositiveSmallIntegerField(
130 default=1, db_index=True, verbose_name=_('rating'))
131 dignities = models.TextField(
132 default='', blank=True, verbose_name=_('dignities'))
133 limitations = models.TextField(
134 default='', blank=True, verbose_name=_('limitations'))
135 general = models.TextField(
136 default='', blank=True, verbose_name=_('limitations'))
137
138
139 def _default_payment():
140 """Default payment option is first element of first tuple in options."""
141 assert settings.PAYMENT_OPTIONS[0][0], 'No payment options!'
142 return settings.PAYMENT_OPTIONS[0][0]
143
144
145 class Order(ecommerce_models.Order):
146 address = models.TextField(blank=True, default='')
147 payment_type = models.CharField(
148 max_length=255,
149 choices=settings.PAYMENT_OPTIONS,
150 default=_default_payment()
151 )
152 comment = models.TextField(blank=True, default='')
153 # total price - total purchase price
154 revenue = models.FloatField(default=0, verbose_name=_('revenue'))
155
156 @property
157 def payment_type_name(self):
158 """Return name for an order's payment option."""
159 return next(
160 name for option, name in settings.PAYMENT_OPTIONS
161 if self.payment_type == option
162 )
163
164 def set_positions(self, cart):
165 """
166 Save cart's state into Order instance.
167
168 @todo #589:60m Create Cart model.
169 See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672
170 """
171 self.revenue = cart.total_revenue()
172 self.save()
173 for id_, position in cart:
174 self.positions.create(
175 order=self,
176 product_id=id_,
177 vendor_code=position['vendor_code'],
178 name=position['name'],
179 price=position['price'],
180 quantity=position['quantity'],
181 )
182 return self
183
184
185 class CategoryPage(pages_models.ModelPage):
186 """Create proxy model for Admin."""
187
188 class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)
189 proxy = True
190
191 # noinspection PyTypeChecker
192 objects = pages_models.ModelPage.create_model_page_managers(Category)
193
194
195 class ProductPage(pages_models.ModelPage):
196 """Create proxy model for Admin."""
197
198 class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)
199 proxy = True
200
201 # noinspection PyTypeChecker
202 objects = (
203 pages_models.ModelPage
204 .create_model_page_managers(Product)
205 )
206
207
208 class TagGroup(catalog_models.TagGroup):
209 pass
210
211
212 class TagQuerySet(catalog_models.TagQuerySet):
213 pass
214
215
216 class Tag(catalog_models.Tag):
217 group = models.ForeignKey(
218 TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',
219 )
220
221
222 class ExcludedModelTPageQuerySet(pages_models.PageQuerySet):
223 def exclude_type(self):
224 return self.exclude(type=pages_models.Page.MODEL_TYPE)
225
226 # @todo #rf169:30m Fix model.Manager bad inheritance
227 # Now we have this problem:
228 # ```
229 # In [2]: type(ExcludedModelTPage.objects.all())
230 # Out[2]: mptt.querysets.TreeQuerySet
231 # ```
232 # But should be `pages.models.PageQuerySet`.
233 # Or just rm all excluded staff
234 # in favor on direct excluded filter using.
235 class ExcludedModelTPageManager(
236 models.Manager.from_queryset(ExcludedModelTPageQuerySet)
237 ):
238
239 def get_queryset(self):
240 return super().get_queryset().exclude(type=pages_models.Page.MODEL_TYPE)
241
242
243 class ExcludedModelTPage(pages_models.Page):
244
245 class Meta(pages_models.Page.Meta): # Ignore PycodestyleBear (E303)
246 proxy = True
247
248 objects = ExcludedModelTPageManager()
249 # pages.models.Page.objects_ field. It has the same problem.
250 objects_ = ExcludedModelTPageManager()
251
```
Path: `etc/gunicorn.py`
Content:
```
1 import multiprocessing
2 import os
3
4 default_workers = multiprocessing.cpu_count() * 2 + 1
5 workers = os.environ.get('WEB_CONCURRENCY', default_workers)
6 worker_class = 'sync'
7 max_requests = 300
8 max_requests_jitter = 300
9 errorlog = '-'
10
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/etc/gunicorn.py b/etc/gunicorn.py
--- a/etc/gunicorn.py
+++ b/etc/gunicorn.py
@@ -6,4 +6,5 @@
worker_class = 'sync'
max_requests = 300
max_requests_jitter = 300
+timeout = 120
errorlog = '-'
diff --git a/shopelectro/models.py b/shopelectro/models.py
--- a/shopelectro/models.py
+++ b/shopelectro/models.py
@@ -223,6 +223,7 @@
def exclude_type(self):
return self.exclude(type=pages_models.Page.MODEL_TYPE)
+
# @todo #rf169:30m Fix model.Manager bad inheritance
# Now we have this problem:
# ```
| {"golden_diff": "diff --git a/etc/gunicorn.py b/etc/gunicorn.py\n--- a/etc/gunicorn.py\n+++ b/etc/gunicorn.py\n@@ -6,4 +6,5 @@\n worker_class = 'sync'\n max_requests = 300\n max_requests_jitter = 300\n+timeout = 120\n errorlog = '-'\ndiff --git a/shopelectro/models.py b/shopelectro/models.py\n--- a/shopelectro/models.py\n+++ b/shopelectro/models.py\n@@ -223,6 +223,7 @@\n def exclude_type(self):\n return self.exclude(type=pages_models.Page.MODEL_TYPE)\n \n+\n # @todo #rf169:30m Fix model.Manager bad inheritance\n # Now we have this problem:\n # ```\n", "issue": "Long serv response\nEvery page responses with timeout of ~3s.\r\nSometimes category and product pages return 502 error.\r\n\r\nChecked this pages: Main, Category, Product\r\n\r\n\n", "before_files": [{"content": "import random\nimport string\nimport typing\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom catalog import models as catalog_models\nfrom ecommerce import models as ecommerce_models\nfrom pages import models as pages_models\n\n\ndef randomize_slug(slug: str) -> str:\n slug_hash = ''.join(\n random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)\n )\n return f'{slug}_{slug_hash}'\n\n\nclass SECategoryQuerySet(catalog_models.CategoryQuerySet):\n def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':\n categories_with_pictures = (\n self\n .filter(products__page__images__isnull=False)\n .distinct()\n )\n\n return categories_with_pictures.get_ancestors(include_self=True)\n\n\nclass SECategoryManager(\n catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)\n):\n pass\n\n\nclass Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):\n\n objects = SECategoryManager()\n # pages.models.Page.objects_ field. It has the same problem.\n objects_ = SECategoryManager()\n uuid = models.UUIDField(default=uuid4, editable=False)\n\n @classmethod\n def get_default_parent(cls):\n return pages_models.CustomPage.objects.filter(slug='catalog').first()\n\n @property\n def image(self):\n products = self.products.all()\n return products[0].image if products else None\n\n def get_absolute_url(self):\n return reverse('category', args=(self.page.slug,))\n\n\nclass Product(catalog_models.AbstractProduct, pages_models.SyncPageMixin):\n\n # That's why we are needed to explicitly add objects manager here\n # because of Django special managers behaviour.\n # Se se#480 for details.\n objects = catalog_models.ProductManager()\n\n category = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n null=True,\n related_name='products',\n verbose_name=_('category'),\n )\n\n tags = models.ManyToManyField(\n 'Tag',\n related_name='products',\n blank=True,\n verbose_name=_('tags'),\n )\n\n vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))\n uuid = models.UUIDField(default=uuid4, editable=False)\n purchase_price = models.FloatField(\n default=0, verbose_name=_('purchase_price'))\n wholesale_small = models.FloatField(\n default=0, verbose_name=_('wholesale_small'))\n wholesale_medium = models.FloatField(\n default=0, verbose_name=_('wholesale_medium'))\n wholesale_large = models.FloatField(\n default=0, verbose_name=_('wholesale_large'))\n\n def get_absolute_url(self):\n return reverse('product', args=(self.vendor_code,))\n\n @property\n def average_rate(self):\n \"\"\"Return rounded to first decimal averaged rating.\"\"\"\n rating = self.product_feedbacks.aggregate(\n avg=models.Avg('rating')).get('avg', 0)\n return round(rating, 1)\n\n @property\n def feedback_count(self):\n return self.product_feedbacks.count()\n\n @property\n def feedback(self):\n return self.product_feedbacks.all().order_by('-date')\n\n def get_params(self):\n return Tag.objects.filter_by_products([self]).get_group_tags_pairs()\n\n def get_brand_name(self) -> str:\n brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)\n return brand.name if brand else ''\n\n\nclass ProductFeedback(models.Model):\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, null=True,\n related_name='product_feedbacks'\n )\n\n date = models.DateTimeField(\n auto_now=True, db_index=True, verbose_name=_('date'))\n name = models.CharField(\n max_length=255, db_index=True, verbose_name=_('name'))\n rating = models.PositiveSmallIntegerField(\n default=1, db_index=True, verbose_name=_('rating'))\n dignities = models.TextField(\n default='', blank=True, verbose_name=_('dignities'))\n limitations = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n general = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n\n\ndef _default_payment():\n \"\"\"Default payment option is first element of first tuple in options.\"\"\"\n assert settings.PAYMENT_OPTIONS[0][0], 'No payment options!'\n return settings.PAYMENT_OPTIONS[0][0]\n\n\nclass Order(ecommerce_models.Order):\n address = models.TextField(blank=True, default='')\n payment_type = models.CharField(\n max_length=255,\n choices=settings.PAYMENT_OPTIONS,\n default=_default_payment()\n )\n comment = models.TextField(blank=True, default='')\n # total price - total purchase price\n revenue = models.FloatField(default=0, verbose_name=_('revenue'))\n\n @property\n def payment_type_name(self):\n \"\"\"Return name for an order's payment option.\"\"\"\n return next(\n name for option, name in settings.PAYMENT_OPTIONS\n if self.payment_type == option\n )\n\n def set_positions(self, cart):\n \"\"\"\n Save cart's state into Order instance.\n\n @todo #589:60m Create Cart model.\n See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672\n \"\"\"\n self.revenue = cart.total_revenue()\n self.save()\n for id_, position in cart:\n self.positions.create(\n order=self,\n product_id=id_,\n vendor_code=position['vendor_code'],\n name=position['name'],\n price=position['price'],\n quantity=position['quantity'],\n )\n return self\n\n\nclass CategoryPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = pages_models.ModelPage.create_model_page_managers(Category)\n\n\nclass ProductPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = (\n pages_models.ModelPage\n .create_model_page_managers(Product)\n )\n\n\nclass TagGroup(catalog_models.TagGroup):\n pass\n\n\nclass TagQuerySet(catalog_models.TagQuerySet):\n pass\n\n\nclass Tag(catalog_models.Tag):\n group = models.ForeignKey(\n TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',\n )\n\n\nclass ExcludedModelTPageQuerySet(pages_models.PageQuerySet):\n def exclude_type(self):\n return self.exclude(type=pages_models.Page.MODEL_TYPE)\n\n# @todo #rf169:30m Fix model.Manager bad inheritance\n# Now we have this problem:\n# ```\n# In [2]: type(ExcludedModelTPage.objects.all())\n# Out[2]: mptt.querysets.TreeQuerySet\n# ```\n# But should be `pages.models.PageQuerySet`.\n# Or just rm all excluded staff\n# in favor on direct excluded filter using.\nclass ExcludedModelTPageManager(\n models.Manager.from_queryset(ExcludedModelTPageQuerySet)\n):\n\n def get_queryset(self):\n return super().get_queryset().exclude(type=pages_models.Page.MODEL_TYPE)\n\n\nclass ExcludedModelTPage(pages_models.Page):\n\n class Meta(pages_models.Page.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n objects = ExcludedModelTPageManager()\n # pages.models.Page.objects_ field. It has the same problem.\n objects_ = ExcludedModelTPageManager()\n", "path": "shopelectro/models.py"}, {"content": "import multiprocessing\nimport os\n\ndefault_workers = multiprocessing.cpu_count() * 2 + 1\nworkers = os.environ.get('WEB_CONCURRENCY', default_workers)\nworker_class = 'sync'\nmax_requests = 300\nmax_requests_jitter = 300\nerrorlog = '-'\n", "path": "etc/gunicorn.py"}], "after_files": [{"content": "import random\nimport string\nimport typing\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom catalog import models as catalog_models\nfrom ecommerce import models as ecommerce_models\nfrom pages import models as pages_models\n\n\ndef randomize_slug(slug: str) -> str:\n slug_hash = ''.join(\n random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)\n )\n return f'{slug}_{slug_hash}'\n\n\nclass SECategoryQuerySet(catalog_models.CategoryQuerySet):\n def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':\n categories_with_pictures = (\n self\n .filter(products__page__images__isnull=False)\n .distinct()\n )\n\n return categories_with_pictures.get_ancestors(include_self=True)\n\n\nclass SECategoryManager(\n catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)\n):\n pass\n\n\nclass Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):\n\n objects = SECategoryManager()\n # pages.models.Page.objects_ field. It has the same problem.\n objects_ = SECategoryManager()\n uuid = models.UUIDField(default=uuid4, editable=False)\n\n @classmethod\n def get_default_parent(cls):\n return pages_models.CustomPage.objects.filter(slug='catalog').first()\n\n @property\n def image(self):\n products = self.products.all()\n return products[0].image if products else None\n\n def get_absolute_url(self):\n return reverse('category', args=(self.page.slug,))\n\n\nclass Product(catalog_models.AbstractProduct, pages_models.SyncPageMixin):\n\n # That's why we are needed to explicitly add objects manager here\n # because of Django special managers behaviour.\n # Se se#480 for details.\n objects = catalog_models.ProductManager()\n\n category = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n null=True,\n related_name='products',\n verbose_name=_('category'),\n )\n\n tags = models.ManyToManyField(\n 'Tag',\n related_name='products',\n blank=True,\n verbose_name=_('tags'),\n )\n\n vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))\n uuid = models.UUIDField(default=uuid4, editable=False)\n purchase_price = models.FloatField(\n default=0, verbose_name=_('purchase_price'))\n wholesale_small = models.FloatField(\n default=0, verbose_name=_('wholesale_small'))\n wholesale_medium = models.FloatField(\n default=0, verbose_name=_('wholesale_medium'))\n wholesale_large = models.FloatField(\n default=0, verbose_name=_('wholesale_large'))\n\n def get_absolute_url(self):\n return reverse('product', args=(self.vendor_code,))\n\n @property\n def average_rate(self):\n \"\"\"Return rounded to first decimal averaged rating.\"\"\"\n rating = self.product_feedbacks.aggregate(\n avg=models.Avg('rating')).get('avg', 0)\n return round(rating, 1)\n\n @property\n def feedback_count(self):\n return self.product_feedbacks.count()\n\n @property\n def feedback(self):\n return self.product_feedbacks.all().order_by('-date')\n\n def get_params(self):\n return Tag.objects.filter_by_products([self]).get_group_tags_pairs()\n\n def get_brand_name(self) -> str:\n brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)\n return brand.name if brand else ''\n\n\nclass ProductFeedback(models.Model):\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, null=True,\n related_name='product_feedbacks'\n )\n\n date = models.DateTimeField(\n auto_now=True, db_index=True, verbose_name=_('date'))\n name = models.CharField(\n max_length=255, db_index=True, verbose_name=_('name'))\n rating = models.PositiveSmallIntegerField(\n default=1, db_index=True, verbose_name=_('rating'))\n dignities = models.TextField(\n default='', blank=True, verbose_name=_('dignities'))\n limitations = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n general = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n\n\ndef _default_payment():\n \"\"\"Default payment option is first element of first tuple in options.\"\"\"\n assert settings.PAYMENT_OPTIONS[0][0], 'No payment options!'\n return settings.PAYMENT_OPTIONS[0][0]\n\n\nclass Order(ecommerce_models.Order):\n address = models.TextField(blank=True, default='')\n payment_type = models.CharField(\n max_length=255,\n choices=settings.PAYMENT_OPTIONS,\n default=_default_payment()\n )\n comment = models.TextField(blank=True, default='')\n # total price - total purchase price\n revenue = models.FloatField(default=0, verbose_name=_('revenue'))\n\n @property\n def payment_type_name(self):\n \"\"\"Return name for an order's payment option.\"\"\"\n return next(\n name for option, name in settings.PAYMENT_OPTIONS\n if self.payment_type == option\n )\n\n def set_positions(self, cart):\n \"\"\"\n Save cart's state into Order instance.\n\n @todo #589:60m Create Cart model.\n See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672\n \"\"\"\n self.revenue = cart.total_revenue()\n self.save()\n for id_, position in cart:\n self.positions.create(\n order=self,\n product_id=id_,\n vendor_code=position['vendor_code'],\n name=position['name'],\n price=position['price'],\n quantity=position['quantity'],\n )\n return self\n\n\nclass CategoryPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = pages_models.ModelPage.create_model_page_managers(Category)\n\n\nclass ProductPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = (\n pages_models.ModelPage\n .create_model_page_managers(Product)\n )\n\n\nclass TagGroup(catalog_models.TagGroup):\n pass\n\n\nclass TagQuerySet(catalog_models.TagQuerySet):\n pass\n\n\nclass Tag(catalog_models.Tag):\n group = models.ForeignKey(\n TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',\n )\n\n\nclass ExcludedModelTPageQuerySet(pages_models.PageQuerySet):\n def exclude_type(self):\n return self.exclude(type=pages_models.Page.MODEL_TYPE)\n\n\n# @todo #rf169:30m Fix model.Manager bad inheritance\n# Now we have this problem:\n# ```\n# In [2]: type(ExcludedModelTPage.objects.all())\n# Out[2]: mptt.querysets.TreeQuerySet\n# ```\n# But should be `pages.models.PageQuerySet`.\n# Or just rm all excluded staff\n# in favor on direct excluded filter using.\nclass ExcludedModelTPageManager(\n models.Manager.from_queryset(ExcludedModelTPageQuerySet)\n):\n\n def get_queryset(self):\n return super().get_queryset().exclude(type=pages_models.Page.MODEL_TYPE)\n\n\nclass ExcludedModelTPage(pages_models.Page):\n\n class Meta(pages_models.Page.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n objects = ExcludedModelTPageManager()\n # pages.models.Page.objects_ field. It has the same problem.\n objects_ = ExcludedModelTPageManager()\n", "path": "shopelectro/models.py"}, {"content": "import multiprocessing\nimport os\n\ndefault_workers = multiprocessing.cpu_count() * 2 + 1\nworkers = os.environ.get('WEB_CONCURRENCY', default_workers)\nworker_class = 'sync'\nmax_requests = 300\nmax_requests_jitter = 300\ntimeout = 120\nerrorlog = '-'\n", "path": "etc/gunicorn.py"}]} | 2,734 | 174 |
gh_patches_debug_15910 | rasdani/github-patches | git_diff | freedomofpress__securedrop-3652 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Journalist API returns error 500 when JSON in malformed or absent
## Description
## Steps to Reproduce
1. `make dev`
2. `curl -X POST 127.0.0.1:8081/api/v1/token`
## Expected Behavior
The server should return an error 400 stating that the request is invalid.
## Actual Behavior
The server returns an error 500 stating that there is a server error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/journalist_app/api.py`
Content:
```
1 from datetime import datetime, timedelta
2 from functools import wraps
3 import hashlib
4 import json
5 from werkzeug.exceptions import default_exceptions # type: ignore
6
7 from flask import abort, Blueprint, current_app, jsonify, request, send_file
8
9 from db import db
10 from journalist_app import utils
11 from models import (Journalist, Reply, Source, Submission,
12 LoginThrottledException, InvalidUsernameException,
13 BadTokenException, WrongPasswordException)
14 from store import NotEncrypted
15
16
17 TOKEN_EXPIRATION_MINS = 60 * 8
18
19
20 def get_user_object(request):
21 """Helper function to use in token_required views that need a user
22 object
23 """
24 auth_token = request.headers.get('Authorization').split(" ")[1]
25 user = Journalist.validate_api_token_and_get_user(auth_token)
26 return user
27
28
29 def token_required(f):
30 @wraps(f)
31 def decorated_function(*args, **kwargs):
32 try:
33 auth_header = request.headers['Authorization']
34 except KeyError:
35 return abort(403, 'API token not found in Authorization header.')
36
37 if auth_header:
38 auth_token = auth_header.split(" ")[1]
39 else:
40 auth_token = ''
41 if not Journalist.validate_api_token_and_get_user(auth_token):
42 return abort(403, 'API token is invalid or expired.')
43 return f(*args, **kwargs)
44 return decorated_function
45
46
47 def get_or_404(model, object_id, column=''):
48 if column:
49 result = model.query.filter(column == object_id).one_or_none()
50 else:
51 result = model.query.get(object_id)
52 if result is None:
53 abort(404)
54 return result
55
56
57 def make_blueprint(config):
58 api = Blueprint('api', __name__)
59
60 @api.route('/')
61 def get_endpoints():
62 endpoints = {'sources_url': '/api/v1/sources',
63 'current_user_url': '/api/v1/user',
64 'submissions_url': '/api/v1/submissions',
65 'auth_token_url': '/api/v1/token'}
66 return jsonify(endpoints), 200
67
68 @api.route('/token', methods=['POST'])
69 def get_token():
70 creds = json.loads(request.data)
71
72 username = creds.get('username', None)
73 passphrase = creds.get('passphrase', None)
74 one_time_code = creds.get('one_time_code', None)
75
76 if username is None:
77 return abort(400, 'username field is missing')
78 if passphrase is None:
79 return abort(400, 'passphrase field is missing')
80 if one_time_code is None:
81 return abort(400, 'one_time_code field is missing')
82
83 try:
84 journalist = Journalist.login(username, passphrase, one_time_code)
85 token_expiry = datetime.utcnow() + timedelta(
86 seconds=TOKEN_EXPIRATION_MINS * 60)
87 response = jsonify({'token': journalist.generate_api_token(
88 expiration=TOKEN_EXPIRATION_MINS * 60),
89 'expiration': token_expiry.isoformat() + 'Z'})
90
91 # Update access metadata
92 journalist.last_access = datetime.utcnow()
93 db.session.add(journalist)
94 db.session.commit()
95
96 return response, 200
97 except (LoginThrottledException, InvalidUsernameException,
98 BadTokenException, WrongPasswordException):
99 return abort(403, 'Token authentication failed.')
100
101 @api.route('/sources', methods=['GET'])
102 @token_required
103 def get_all_sources():
104 sources = Source.query.filter_by(pending=False).all()
105 return jsonify(
106 {'sources': [source.to_json() for source in sources]}), 200
107
108 @api.route('/sources/<source_uuid>', methods=['GET', 'DELETE'])
109 @token_required
110 def single_source(source_uuid):
111 if request.method == 'GET':
112 source = get_or_404(Source, source_uuid, column=Source.uuid)
113 return jsonify(source.to_json()), 200
114 elif request.method == 'DELETE':
115 source = get_or_404(Source, source_uuid, column=Source.uuid)
116 utils.delete_collection(source.filesystem_id)
117 return jsonify({'message': 'Source and submissions deleted'}), 200
118
119 @api.route('/sources/<source_uuid>/add_star', methods=['POST'])
120 @token_required
121 def add_star(source_uuid):
122 source = get_or_404(Source, source_uuid, column=Source.uuid)
123 utils.make_star_true(source.filesystem_id)
124 db.session.commit()
125 return jsonify({'message': 'Star added'}), 201
126
127 @api.route('/sources/<source_uuid>/remove_star', methods=['DELETE'])
128 @token_required
129 def remove_star(source_uuid):
130 source = get_or_404(Source, source_uuid, column=Source.uuid)
131 utils.make_star_false(source.filesystem_id)
132 db.session.commit()
133 return jsonify({'message': 'Star removed'}), 200
134
135 @api.route('/sources/<source_uuid>/flag', methods=['POST'])
136 @token_required
137 def flag(source_uuid):
138 source = get_or_404(Source, source_uuid,
139 column=Source.uuid)
140 source.flagged = True
141 db.session.commit()
142 return jsonify({'message': 'Source flagged for reply'}), 200
143
144 @api.route('/sources/<source_uuid>/submissions', methods=['GET'])
145 @token_required
146 def all_source_submissions(source_uuid):
147 source = get_or_404(Source, source_uuid, column=Source.uuid)
148 return jsonify(
149 {'submissions': [submission.to_json() for
150 submission in source.submissions]}), 200
151
152 @api.route('/sources/<source_uuid>/submissions/<submission_uuid>/download', # noqa
153 methods=['GET'])
154 @token_required
155 def download_submission(source_uuid, submission_uuid):
156 source = get_or_404(Source, source_uuid, column=Source.uuid)
157 submission = get_or_404(Submission, submission_uuid,
158 column=Submission.uuid)
159
160 # Mark as downloaded
161 submission.downloaded = True
162 db.session.commit()
163
164 response = send_file(current_app.storage.path(source.filesystem_id,
165 submission.filename),
166 mimetype="application/pgp-encrypted",
167 as_attachment=True,
168 add_etags=False) # Disable Flask default ETag
169
170 response.direct_passthrough = False
171 response.headers['Etag'] = '"sha256:{}"'.format(
172 hashlib.sha256(response.get_data()).hexdigest())
173 return response
174
175 @api.route('/sources/<source_uuid>/submissions/<submission_uuid>',
176 methods=['GET', 'DELETE'])
177 @token_required
178 def single_submission(source_uuid, submission_uuid):
179 if request.method == 'GET':
180 submission = get_or_404(Submission, submission_uuid,
181 column=Submission.uuid)
182 return jsonify(submission.to_json()), 200
183 elif request.method == 'DELETE':
184 submission = get_or_404(Submission, submission_uuid,
185 column=Submission.uuid)
186 source = get_or_404(Source, source_uuid, column=Source.uuid)
187 utils.delete_file(source.filesystem_id, submission.filename,
188 submission)
189 return jsonify({'message': 'Submission deleted'}), 200
190
191 @api.route('/sources/<source_uuid>/reply', methods=['POST'])
192 @token_required
193 def post_reply(source_uuid):
194 source = get_or_404(Source, source_uuid,
195 column=Source.uuid)
196 if request.json is None:
197 abort(400, 'please send requests in valid JSON')
198
199 if 'reply' not in request.json:
200 abort(400, 'reply not found in request body')
201
202 user = get_user_object(request)
203
204 data = json.loads(request.data)
205 if not data['reply']:
206 abort(400, 'reply should not be empty')
207
208 source.interaction_count += 1
209 try:
210 filename = current_app.storage.save_pre_encrypted_reply(
211 source.filesystem_id,
212 source.interaction_count,
213 source.journalist_filename,
214 data['reply'])
215 except NotEncrypted:
216 return jsonify(
217 {'message': 'You must encrypt replies client side'}), 400
218
219 reply = Reply(user, source,
220 current_app.storage.path(source.filesystem_id, filename))
221 db.session.add(reply)
222 db.session.add(source)
223 db.session.commit()
224 return jsonify({'message': 'Your reply has been stored'}), 201
225
226 @api.route('/submissions', methods=['GET'])
227 @token_required
228 def get_all_submissions():
229 submissions = Submission.query.all()
230 return jsonify({'submissions': [submission.to_json() for
231 submission in submissions]}), 200
232
233 @api.route('/user', methods=['GET'])
234 @token_required
235 def get_current_user():
236 user = get_user_object(request)
237 return jsonify(user.to_json()), 200
238
239 def _handle_http_exception(error):
240 # Workaround for no blueprint-level 404/5 error handlers, see:
241 # https://github.com/pallets/flask/issues/503#issuecomment-71383286
242 response = jsonify({'error': error.name,
243 'message': error.description})
244
245 return response, error.code
246
247 for code in default_exceptions:
248 api.errorhandler(code)(_handle_http_exception)
249
250 return api
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/journalist_app/api.py b/securedrop/journalist_app/api.py
--- a/securedrop/journalist_app/api.py
+++ b/securedrop/journalist_app/api.py
@@ -65,6 +65,21 @@
'auth_token_url': '/api/v1/token'}
return jsonify(endpoints), 200
+ # Before every post, we validate the payload before processing the request
+ @api.before_request
+ def validate_data():
+ if request.method == 'POST':
+ # flag and star can have empty payloads
+ if not request.data:
+ if ('flag' not in request.path and 'star' not in request.path):
+ return abort(400, 'malformed request')
+ # other requests must have valid JSON payload
+ else:
+ try:
+ json.loads(request.data)
+ except (ValueError):
+ return abort(400, 'malformed request')
+
@api.route('/token', methods=['POST'])
def get_token():
creds = json.loads(request.data)
| {"golden_diff": "diff --git a/securedrop/journalist_app/api.py b/securedrop/journalist_app/api.py\n--- a/securedrop/journalist_app/api.py\n+++ b/securedrop/journalist_app/api.py\n@@ -65,6 +65,21 @@\n 'auth_token_url': '/api/v1/token'}\n return jsonify(endpoints), 200\n \n+ # Before every post, we validate the payload before processing the request\n+ @api.before_request\n+ def validate_data():\n+ if request.method == 'POST':\n+ # flag and star can have empty payloads\n+ if not request.data:\n+ if ('flag' not in request.path and 'star' not in request.path):\n+ return abort(400, 'malformed request')\n+ # other requests must have valid JSON payload\n+ else:\n+ try:\n+ json.loads(request.data)\n+ except (ValueError):\n+ return abort(400, 'malformed request')\n+\n @api.route('/token', methods=['POST'])\n def get_token():\n creds = json.loads(request.data)\n", "issue": "Journalist API returns error 500 when JSON in malformed or absent\n## Description\r\n\r\n## Steps to Reproduce\r\n\r\n1. `make dev`\r\n2. `curl -X POST 127.0.0.1:8081/api/v1/token`\r\n\r\n## Expected Behavior\r\n\r\nThe server should return an error 400 stating that the request is invalid.\r\n\r\n## Actual Behavior\r\n\r\nThe server returns an error 500 stating that there is a server error.\r\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom functools import wraps\nimport hashlib\nimport json\nfrom werkzeug.exceptions import default_exceptions # type: ignore\n\nfrom flask import abort, Blueprint, current_app, jsonify, request, send_file\n\nfrom db import db\nfrom journalist_app import utils\nfrom models import (Journalist, Reply, Source, Submission,\n LoginThrottledException, InvalidUsernameException,\n BadTokenException, WrongPasswordException)\nfrom store import NotEncrypted\n\n\nTOKEN_EXPIRATION_MINS = 60 * 8\n\n\ndef get_user_object(request):\n \"\"\"Helper function to use in token_required views that need a user\n object\n \"\"\"\n auth_token = request.headers.get('Authorization').split(\" \")[1]\n user = Journalist.validate_api_token_and_get_user(auth_token)\n return user\n\n\ndef token_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n auth_header = request.headers['Authorization']\n except KeyError:\n return abort(403, 'API token not found in Authorization header.')\n\n if auth_header:\n auth_token = auth_header.split(\" \")[1]\n else:\n auth_token = ''\n if not Journalist.validate_api_token_and_get_user(auth_token):\n return abort(403, 'API token is invalid or expired.')\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef get_or_404(model, object_id, column=''):\n if column:\n result = model.query.filter(column == object_id).one_or_none()\n else:\n result = model.query.get(object_id)\n if result is None:\n abort(404)\n return result\n\n\ndef make_blueprint(config):\n api = Blueprint('api', __name__)\n\n @api.route('/')\n def get_endpoints():\n endpoints = {'sources_url': '/api/v1/sources',\n 'current_user_url': '/api/v1/user',\n 'submissions_url': '/api/v1/submissions',\n 'auth_token_url': '/api/v1/token'}\n return jsonify(endpoints), 200\n\n @api.route('/token', methods=['POST'])\n def get_token():\n creds = json.loads(request.data)\n\n username = creds.get('username', None)\n passphrase = creds.get('passphrase', None)\n one_time_code = creds.get('one_time_code', None)\n\n if username is None:\n return abort(400, 'username field is missing')\n if passphrase is None:\n return abort(400, 'passphrase field is missing')\n if one_time_code is None:\n return abort(400, 'one_time_code field is missing')\n\n try:\n journalist = Journalist.login(username, passphrase, one_time_code)\n token_expiry = datetime.utcnow() + timedelta(\n seconds=TOKEN_EXPIRATION_MINS * 60)\n response = jsonify({'token': journalist.generate_api_token(\n expiration=TOKEN_EXPIRATION_MINS * 60),\n 'expiration': token_expiry.isoformat() + 'Z'})\n\n # Update access metadata\n journalist.last_access = datetime.utcnow()\n db.session.add(journalist)\n db.session.commit()\n\n return response, 200\n except (LoginThrottledException, InvalidUsernameException,\n BadTokenException, WrongPasswordException):\n return abort(403, 'Token authentication failed.')\n\n @api.route('/sources', methods=['GET'])\n @token_required\n def get_all_sources():\n sources = Source.query.filter_by(pending=False).all()\n return jsonify(\n {'sources': [source.to_json() for source in sources]}), 200\n\n @api.route('/sources/<source_uuid>', methods=['GET', 'DELETE'])\n @token_required\n def single_source(source_uuid):\n if request.method == 'GET':\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n return jsonify(source.to_json()), 200\n elif request.method == 'DELETE':\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n utils.delete_collection(source.filesystem_id)\n return jsonify({'message': 'Source and submissions deleted'}), 200\n\n @api.route('/sources/<source_uuid>/add_star', methods=['POST'])\n @token_required\n def add_star(source_uuid):\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n utils.make_star_true(source.filesystem_id)\n db.session.commit()\n return jsonify({'message': 'Star added'}), 201\n\n @api.route('/sources/<source_uuid>/remove_star', methods=['DELETE'])\n @token_required\n def remove_star(source_uuid):\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n utils.make_star_false(source.filesystem_id)\n db.session.commit()\n return jsonify({'message': 'Star removed'}), 200\n\n @api.route('/sources/<source_uuid>/flag', methods=['POST'])\n @token_required\n def flag(source_uuid):\n source = get_or_404(Source, source_uuid,\n column=Source.uuid)\n source.flagged = True\n db.session.commit()\n return jsonify({'message': 'Source flagged for reply'}), 200\n\n @api.route('/sources/<source_uuid>/submissions', methods=['GET'])\n @token_required\n def all_source_submissions(source_uuid):\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n return jsonify(\n {'submissions': [submission.to_json() for\n submission in source.submissions]}), 200\n\n @api.route('/sources/<source_uuid>/submissions/<submission_uuid>/download', # noqa\n methods=['GET'])\n @token_required\n def download_submission(source_uuid, submission_uuid):\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n submission = get_or_404(Submission, submission_uuid,\n column=Submission.uuid)\n\n # Mark as downloaded\n submission.downloaded = True\n db.session.commit()\n\n response = send_file(current_app.storage.path(source.filesystem_id,\n submission.filename),\n mimetype=\"application/pgp-encrypted\",\n as_attachment=True,\n add_etags=False) # Disable Flask default ETag\n\n response.direct_passthrough = False\n response.headers['Etag'] = '\"sha256:{}\"'.format(\n hashlib.sha256(response.get_data()).hexdigest())\n return response\n\n @api.route('/sources/<source_uuid>/submissions/<submission_uuid>',\n methods=['GET', 'DELETE'])\n @token_required\n def single_submission(source_uuid, submission_uuid):\n if request.method == 'GET':\n submission = get_or_404(Submission, submission_uuid,\n column=Submission.uuid)\n return jsonify(submission.to_json()), 200\n elif request.method == 'DELETE':\n submission = get_or_404(Submission, submission_uuid,\n column=Submission.uuid)\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n utils.delete_file(source.filesystem_id, submission.filename,\n submission)\n return jsonify({'message': 'Submission deleted'}), 200\n\n @api.route('/sources/<source_uuid>/reply', methods=['POST'])\n @token_required\n def post_reply(source_uuid):\n source = get_or_404(Source, source_uuid,\n column=Source.uuid)\n if request.json is None:\n abort(400, 'please send requests in valid JSON')\n\n if 'reply' not in request.json:\n abort(400, 'reply not found in request body')\n\n user = get_user_object(request)\n\n data = json.loads(request.data)\n if not data['reply']:\n abort(400, 'reply should not be empty')\n\n source.interaction_count += 1\n try:\n filename = current_app.storage.save_pre_encrypted_reply(\n source.filesystem_id,\n source.interaction_count,\n source.journalist_filename,\n data['reply'])\n except NotEncrypted:\n return jsonify(\n {'message': 'You must encrypt replies client side'}), 400\n\n reply = Reply(user, source,\n current_app.storage.path(source.filesystem_id, filename))\n db.session.add(reply)\n db.session.add(source)\n db.session.commit()\n return jsonify({'message': 'Your reply has been stored'}), 201\n\n @api.route('/submissions', methods=['GET'])\n @token_required\n def get_all_submissions():\n submissions = Submission.query.all()\n return jsonify({'submissions': [submission.to_json() for\n submission in submissions]}), 200\n\n @api.route('/user', methods=['GET'])\n @token_required\n def get_current_user():\n user = get_user_object(request)\n return jsonify(user.to_json()), 200\n\n def _handle_http_exception(error):\n # Workaround for no blueprint-level 404/5 error handlers, see:\n # https://github.com/pallets/flask/issues/503#issuecomment-71383286\n response = jsonify({'error': error.name,\n 'message': error.description})\n\n return response, error.code\n\n for code in default_exceptions:\n api.errorhandler(code)(_handle_http_exception)\n\n return api\n", "path": "securedrop/journalist_app/api.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\nfrom functools import wraps\nimport hashlib\nimport json\nfrom werkzeug.exceptions import default_exceptions # type: ignore\n\nfrom flask import abort, Blueprint, current_app, jsonify, request, send_file\n\nfrom db import db\nfrom journalist_app import utils\nfrom models import (Journalist, Reply, Source, Submission,\n LoginThrottledException, InvalidUsernameException,\n BadTokenException, WrongPasswordException)\nfrom store import NotEncrypted\n\n\nTOKEN_EXPIRATION_MINS = 60 * 8\n\n\ndef get_user_object(request):\n \"\"\"Helper function to use in token_required views that need a user\n object\n \"\"\"\n auth_token = request.headers.get('Authorization').split(\" \")[1]\n user = Journalist.validate_api_token_and_get_user(auth_token)\n return user\n\n\ndef token_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n auth_header = request.headers['Authorization']\n except KeyError:\n return abort(403, 'API token not found in Authorization header.')\n\n if auth_header:\n auth_token = auth_header.split(\" \")[1]\n else:\n auth_token = ''\n if not Journalist.validate_api_token_and_get_user(auth_token):\n return abort(403, 'API token is invalid or expired.')\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef get_or_404(model, object_id, column=''):\n if column:\n result = model.query.filter(column == object_id).one_or_none()\n else:\n result = model.query.get(object_id)\n if result is None:\n abort(404)\n return result\n\n\ndef make_blueprint(config):\n api = Blueprint('api', __name__)\n\n @api.route('/')\n def get_endpoints():\n endpoints = {'sources_url': '/api/v1/sources',\n 'current_user_url': '/api/v1/user',\n 'submissions_url': '/api/v1/submissions',\n 'auth_token_url': '/api/v1/token'}\n return jsonify(endpoints), 200\n\n # Before every post, we validate the payload before processing the request\n @api.before_request\n def validate_data():\n if request.method == 'POST':\n # flag and star can have empty payloads\n if not request.data:\n if ('flag' not in request.path and 'star' not in request.path):\n return abort(400, 'malformed request')\n # other requests must have valid JSON payload\n else:\n try:\n json.loads(request.data)\n except (ValueError):\n return abort(400, 'malformed request')\n\n @api.route('/token', methods=['POST'])\n def get_token():\n creds = json.loads(request.data)\n\n username = creds.get('username', None)\n passphrase = creds.get('passphrase', None)\n one_time_code = creds.get('one_time_code', None)\n\n if username is None:\n return abort(400, 'username field is missing')\n if passphrase is None:\n return abort(400, 'passphrase field is missing')\n if one_time_code is None:\n return abort(400, 'one_time_code field is missing')\n\n try:\n journalist = Journalist.login(username, passphrase, one_time_code)\n token_expiry = datetime.utcnow() + timedelta(\n seconds=TOKEN_EXPIRATION_MINS * 60)\n response = jsonify({'token': journalist.generate_api_token(\n expiration=TOKEN_EXPIRATION_MINS * 60),\n 'expiration': token_expiry.isoformat() + 'Z'})\n\n # Update access metadata\n journalist.last_access = datetime.utcnow()\n db.session.add(journalist)\n db.session.commit()\n\n return response, 200\n except (LoginThrottledException, InvalidUsernameException,\n BadTokenException, WrongPasswordException):\n return abort(403, 'Token authentication failed.')\n\n @api.route('/sources', methods=['GET'])\n @token_required\n def get_all_sources():\n sources = Source.query.filter_by(pending=False).all()\n return jsonify(\n {'sources': [source.to_json() for source in sources]}), 200\n\n @api.route('/sources/<source_uuid>', methods=['GET', 'DELETE'])\n @token_required\n def single_source(source_uuid):\n if request.method == 'GET':\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n return jsonify(source.to_json()), 200\n elif request.method == 'DELETE':\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n utils.delete_collection(source.filesystem_id)\n return jsonify({'message': 'Source and submissions deleted'}), 200\n\n @api.route('/sources/<source_uuid>/add_star', methods=['POST'])\n @token_required\n def add_star(source_uuid):\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n utils.make_star_true(source.filesystem_id)\n db.session.commit()\n return jsonify({'message': 'Star added'}), 201\n\n @api.route('/sources/<source_uuid>/remove_star', methods=['DELETE'])\n @token_required\n def remove_star(source_uuid):\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n utils.make_star_false(source.filesystem_id)\n db.session.commit()\n return jsonify({'message': 'Star removed'}), 200\n\n @api.route('/sources/<source_uuid>/flag', methods=['POST'])\n @token_required\n def flag(source_uuid):\n source = get_or_404(Source, source_uuid,\n column=Source.uuid)\n source.flagged = True\n db.session.commit()\n return jsonify({'message': 'Source flagged for reply'}), 200\n\n @api.route('/sources/<source_uuid>/submissions', methods=['GET'])\n @token_required\n def all_source_submissions(source_uuid):\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n return jsonify(\n {'submissions': [submission.to_json() for\n submission in source.submissions]}), 200\n\n @api.route('/sources/<source_uuid>/submissions/<submission_uuid>/download', # noqa\n methods=['GET'])\n @token_required\n def download_submission(source_uuid, submission_uuid):\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n submission = get_or_404(Submission, submission_uuid,\n column=Submission.uuid)\n\n # Mark as downloaded\n submission.downloaded = True\n db.session.commit()\n\n response = send_file(current_app.storage.path(source.filesystem_id,\n submission.filename),\n mimetype=\"application/pgp-encrypted\",\n as_attachment=True,\n add_etags=False) # Disable Flask default ETag\n\n response.direct_passthrough = False\n response.headers['Etag'] = '\"sha256:{}\"'.format(\n hashlib.sha256(response.get_data()).hexdigest())\n return response\n\n @api.route('/sources/<source_uuid>/submissions/<submission_uuid>',\n methods=['GET', 'DELETE'])\n @token_required\n def single_submission(source_uuid, submission_uuid):\n if request.method == 'GET':\n submission = get_or_404(Submission, submission_uuid,\n column=Submission.uuid)\n return jsonify(submission.to_json()), 200\n elif request.method == 'DELETE':\n submission = get_or_404(Submission, submission_uuid,\n column=Submission.uuid)\n source = get_or_404(Source, source_uuid, column=Source.uuid)\n utils.delete_file(source.filesystem_id, submission.filename,\n submission)\n return jsonify({'message': 'Submission deleted'}), 200\n\n @api.route('/sources/<source_uuid>/reply', methods=['POST'])\n @token_required\n def post_reply(source_uuid):\n source = get_or_404(Source, source_uuid,\n column=Source.uuid)\n if request.json is None:\n abort(400, 'please send requests in valid JSON')\n\n if 'reply' not in request.json:\n abort(400, 'reply not found in request body')\n\n user = get_user_object(request)\n\n data = json.loads(request.data)\n if not data['reply']:\n abort(400, 'reply should not be empty')\n\n source.interaction_count += 1\n try:\n filename = current_app.storage.save_pre_encrypted_reply(\n source.filesystem_id,\n source.interaction_count,\n source.journalist_filename,\n data['reply'])\n except NotEncrypted:\n return jsonify(\n {'message': 'You must encrypt replies client side'}), 400\n\n reply = Reply(user, source,\n current_app.storage.path(source.filesystem_id, filename))\n db.session.add(reply)\n db.session.add(source)\n db.session.commit()\n return jsonify({'message': 'Your reply has been stored'}), 201\n\n @api.route('/submissions', methods=['GET'])\n @token_required\n def get_all_submissions():\n submissions = Submission.query.all()\n return jsonify({'submissions': [submission.to_json() for\n submission in submissions]}), 200\n\n @api.route('/user', methods=['GET'])\n @token_required\n def get_current_user():\n user = get_user_object(request)\n return jsonify(user.to_json()), 200\n\n def _handle_http_exception(error):\n # Workaround for no blueprint-level 404/5 error handlers, see:\n # https://github.com/pallets/flask/issues/503#issuecomment-71383286\n response = jsonify({'error': error.name,\n 'message': error.description})\n\n return response, error.code\n\n for code in default_exceptions:\n api.errorhandler(code)(_handle_http_exception)\n\n return api\n", "path": "securedrop/journalist_app/api.py"}]} | 3,074 | 245 |
gh_patches_debug_42135 | rasdani/github-patches | git_diff | ultralytics__yolov5-8993 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImportError: cannot import name 'draw_bounding_boxes' from 'torchvision.utils'
### Search before asking
- [X] I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report.
### YOLOv5 Component
Training
### Bug

### Environment
_No response_
### Minimal Reproducible Example
_No response_
### Additional
_No response_
### Are you willing to submit a PR?
- [ ] Yes I'd like to help by submitting a PR!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/loggers/clearml/clearml_utils.py`
Content:
```
1 """Main Logger class for ClearML experiment tracking."""
2 import glob
3 import re
4 from pathlib import Path
5
6 import yaml
7 from torchvision.transforms import ToPILImage
8
9 try:
10 import clearml
11 from clearml import Dataset, Task
12 from torchvision.utils import draw_bounding_boxes # WARNING: requires torchvision>=0.9.0
13
14 assert hasattr(clearml, '__version__') # verify package import not local dir
15 except (ImportError, AssertionError):
16 clearml = None
17
18
19 def construct_dataset(clearml_info_string):
20 dataset_id = clearml_info_string.replace('clearml://', '')
21 dataset = Dataset.get(dataset_id=dataset_id)
22 dataset_root_path = Path(dataset.get_local_copy())
23
24 # We'll search for the yaml file definition in the dataset
25 yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml")))
26 if len(yaml_filenames) > 1:
27 raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains '
28 'the dataset definition this way.')
29 elif len(yaml_filenames) == 0:
30 raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file '
31 'inside the dataset root path.')
32 with open(yaml_filenames[0]) as f:
33 dataset_definition = yaml.safe_load(f)
34
35 assert set(dataset_definition.keys()).issuperset(
36 {'train', 'test', 'val', 'nc', 'names'}
37 ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')"
38
39 data_dict = dict()
40 data_dict['train'] = str(
41 (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None
42 data_dict['test'] = str(
43 (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None
44 data_dict['val'] = str(
45 (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None
46 data_dict['nc'] = dataset_definition['nc']
47 data_dict['names'] = dataset_definition['names']
48
49 return data_dict
50
51
52 class ClearmlLogger:
53 """Log training runs, datasets, models, and predictions to ClearML.
54
55 This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default,
56 this information includes hyperparameters, system configuration and metrics, model metrics, code information and
57 basic data metrics and analyses.
58
59 By providing additional command line arguments to train.py, datasets,
60 models and predictions can also be logged.
61 """
62
63 def __init__(self, opt, hyp):
64 """
65 - Initialize ClearML Task, this object will capture the experiment
66 - Upload dataset version to ClearML Data if opt.upload_dataset is True
67
68 arguments:
69 opt (namespace) -- Commandline arguments for this run
70 hyp (dict) -- Hyperparameters for this run
71
72 """
73 self.current_epoch = 0
74 # Keep tracked of amount of logged images to enforce a limit
75 self.current_epoch_logged_images = set()
76 # Maximum number of images to log to clearML per epoch
77 self.max_imgs_to_log_per_epoch = 16
78 # Get the interval of epochs when bounding box images should be logged
79 self.bbox_interval = opt.bbox_interval
80 self.clearml = clearml
81 self.task = None
82 self.data_dict = None
83 if self.clearml:
84 self.task = Task.init(
85 project_name='YOLOv5',
86 task_name='training',
87 tags=['YOLOv5'],
88 output_uri=True,
89 auto_connect_frameworks={'pytorch': False}
90 # We disconnect pytorch auto-detection, because we added manual model save points in the code
91 )
92 # ClearML's hooks will already grab all general parameters
93 # Only the hyperparameters coming from the yaml config file
94 # will have to be added manually!
95 self.task.connect(hyp, name='Hyperparameters')
96
97 # Get ClearML Dataset Version if requested
98 if opt.data.startswith('clearml://'):
99 # data_dict should have the following keys:
100 # names, nc (number of classes), test, train, val (all three relative paths to ../datasets)
101 self.data_dict = construct_dataset(opt.data)
102 # Set data to data_dict because wandb will crash without this information and opt is the best way
103 # to give it to them
104 opt.data = self.data_dict
105
106 def log_debug_samples(self, files, title='Debug Samples'):
107 """
108 Log files (images) as debug samples in the ClearML task.
109
110 arguments:
111 files (List(PosixPath)) a list of file paths in PosixPath format
112 title (str) A title that groups together images with the same values
113 """
114 for f in files:
115 if f.exists():
116 it = re.search(r'_batch(\d+)', f.name)
117 iteration = int(it.groups()[0]) if it else 0
118 self.task.get_logger().report_image(title=title,
119 series=f.name.replace(it.group(), ''),
120 local_path=str(f),
121 iteration=iteration)
122
123 def log_image_with_boxes(self, image_path, boxes, class_names, image):
124 """
125 Draw the bounding boxes on a single image and report the result as a ClearML debug sample
126
127 arguments:
128 image_path (PosixPath) the path the original image file
129 boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
130 class_names (dict): dict containing mapping of class int to class name
131 image (Tensor): A torch tensor containing the actual image data
132 """
133 if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:
134 # Log every bbox_interval times and deduplicate for any intermittend extra eval runs
135 if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:
136 converter = ToPILImage()
137 labels = []
138 for conf, class_nr in zip(boxes[:, 4], boxes[:, 5]):
139 class_name = class_names[int(class_nr)]
140 confidence = round(float(conf) * 100, 2)
141 labels.append(f"{class_name}: {confidence}%")
142 annotated_image = converter(
143 draw_bounding_boxes(image=image.mul(255).clamp(0, 255).byte().cpu(),
144 boxes=boxes[:, :4],
145 labels=labels))
146 self.task.get_logger().report_image(title='Bounding Boxes',
147 series=image_path.name,
148 iteration=self.current_epoch,
149 image=annotated_image)
150 self.current_epoch_logged_images.add(image_path)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py
--- a/utils/loggers/clearml/clearml_utils.py
+++ b/utils/loggers/clearml/clearml_utils.py
@@ -3,20 +3,22 @@
import re
from pathlib import Path
+import numpy as np
import yaml
-from torchvision.transforms import ToPILImage
+
+from utils.plots import Annotator, colors
try:
import clearml
from clearml import Dataset, Task
- from torchvision.utils import draw_bounding_boxes # WARNING: requires torchvision>=0.9.0
-
assert hasattr(clearml, '__version__') # verify package import not local dir
except (ImportError, AssertionError):
clearml = None
def construct_dataset(clearml_info_string):
+ """Load in a clearml dataset and fill the internal data_dict with its contents.
+ """
dataset_id = clearml_info_string.replace('clearml://', '')
dataset = Dataset.get(dataset_id=dataset_id)
dataset_root_path = Path(dataset.get_local_copy())
@@ -120,9 +122,9 @@
local_path=str(f),
iteration=iteration)
- def log_image_with_boxes(self, image_path, boxes, class_names, image):
+ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25):
"""
- Draw the bounding boxes on a single image and report the result as a ClearML debug sample
+ Draw the bounding boxes on a single image and report the result as a ClearML debug sample.
arguments:
image_path (PosixPath) the path the original image file
@@ -133,16 +135,20 @@
if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:
# Log every bbox_interval times and deduplicate for any intermittend extra eval runs
if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:
- converter = ToPILImage()
- labels = []
- for conf, class_nr in zip(boxes[:, 4], boxes[:, 5]):
+ im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))
+ annotator = Annotator(im=im, pil=True)
+ for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):
+ color = colors(i)
+
class_name = class_names[int(class_nr)]
confidence = round(float(conf) * 100, 2)
- labels.append(f"{class_name}: {confidence}%")
- annotated_image = converter(
- draw_bounding_boxes(image=image.mul(255).clamp(0, 255).byte().cpu(),
- boxes=boxes[:, :4],
- labels=labels))
+ label = f"{class_name}: {confidence}%"
+
+ if confidence > conf_threshold:
+ annotator.rectangle(box.cpu().numpy(), outline=color)
+ annotator.box_label(box.cpu().numpy(), label=label, color=color)
+
+ annotated_image = annotator.result()
self.task.get_logger().report_image(title='Bounding Boxes',
series=image_path.name,
iteration=self.current_epoch,
| {"golden_diff": "diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py\n--- a/utils/loggers/clearml/clearml_utils.py\n+++ b/utils/loggers/clearml/clearml_utils.py\n@@ -3,20 +3,22 @@\n import re\n from pathlib import Path\n \n+import numpy as np\n import yaml\n-from torchvision.transforms import ToPILImage\n+\n+from utils.plots import Annotator, colors\n \n try:\n import clearml\n from clearml import Dataset, Task\n- from torchvision.utils import draw_bounding_boxes # WARNING: requires torchvision>=0.9.0\n-\n assert hasattr(clearml, '__version__') # verify package import not local dir\n except (ImportError, AssertionError):\n clearml = None\n \n \n def construct_dataset(clearml_info_string):\n+ \"\"\"Load in a clearml dataset and fill the internal data_dict with its contents.\n+ \"\"\"\n dataset_id = clearml_info_string.replace('clearml://', '')\n dataset = Dataset.get(dataset_id=dataset_id)\n dataset_root_path = Path(dataset.get_local_copy())\n@@ -120,9 +122,9 @@\n local_path=str(f),\n iteration=iteration)\n \n- def log_image_with_boxes(self, image_path, boxes, class_names, image):\n+ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25):\n \"\"\"\n- Draw the bounding boxes on a single image and report the result as a ClearML debug sample\n+ Draw the bounding boxes on a single image and report the result as a ClearML debug sample.\n \n arguments:\n image_path (PosixPath) the path the original image file\n@@ -133,16 +135,20 @@\n if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:\n # Log every bbox_interval times and deduplicate for any intermittend extra eval runs\n if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:\n- converter = ToPILImage()\n- labels = []\n- for conf, class_nr in zip(boxes[:, 4], boxes[:, 5]):\n+ im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))\n+ annotator = Annotator(im=im, pil=True)\n+ for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):\n+ color = colors(i)\n+\n class_name = class_names[int(class_nr)]\n confidence = round(float(conf) * 100, 2)\n- labels.append(f\"{class_name}: {confidence}%\")\n- annotated_image = converter(\n- draw_bounding_boxes(image=image.mul(255).clamp(0, 255).byte().cpu(),\n- boxes=boxes[:, :4],\n- labels=labels))\n+ label = f\"{class_name}: {confidence}%\"\n+\n+ if confidence > conf_threshold:\n+ annotator.rectangle(box.cpu().numpy(), outline=color)\n+ annotator.box_label(box.cpu().numpy(), label=label, color=color)\n+\n+ annotated_image = annotator.result()\n self.task.get_logger().report_image(title='Bounding Boxes',\n series=image_path.name,\n iteration=self.current_epoch,\n", "issue": "ImportError: cannot import name 'draw_bounding_boxes' from 'torchvision.utils'\n### Search before asking\n\n- [X] I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report.\n\n\n### YOLOv5 Component\n\nTraining\n\n### Bug\n\n\r\n\n\n### Environment\n\n_No response_\n\n### Minimal Reproducible Example\n\n_No response_\n\n### Additional\n\n_No response_\n\n### Are you willing to submit a PR?\n\n- [ ] Yes I'd like to help by submitting a PR!\n", "before_files": [{"content": "\"\"\"Main Logger class for ClearML experiment tracking.\"\"\"\nimport glob\nimport re\nfrom pathlib import Path\n\nimport yaml\nfrom torchvision.transforms import ToPILImage\n\ntry:\n import clearml\n from clearml import Dataset, Task\n from torchvision.utils import draw_bounding_boxes # WARNING: requires torchvision>=0.9.0\n\n assert hasattr(clearml, '__version__') # verify package import not local dir\nexcept (ImportError, AssertionError):\n clearml = None\n\n\ndef construct_dataset(clearml_info_string):\n dataset_id = clearml_info_string.replace('clearml://', '')\n dataset = Dataset.get(dataset_id=dataset_id)\n dataset_root_path = Path(dataset.get_local_copy())\n\n # We'll search for the yaml file definition in the dataset\n yaml_filenames = list(glob.glob(str(dataset_root_path / \"*.yaml\")) + glob.glob(str(dataset_root_path / \"*.yml\")))\n if len(yaml_filenames) > 1:\n raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains '\n 'the dataset definition this way.')\n elif len(yaml_filenames) == 0:\n raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file '\n 'inside the dataset root path.')\n with open(yaml_filenames[0]) as f:\n dataset_definition = yaml.safe_load(f)\n\n assert set(dataset_definition.keys()).issuperset(\n {'train', 'test', 'val', 'nc', 'names'}\n ), \"The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')\"\n\n data_dict = dict()\n data_dict['train'] = str(\n (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None\n data_dict['test'] = str(\n (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None\n data_dict['val'] = str(\n (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None\n data_dict['nc'] = dataset_definition['nc']\n data_dict['names'] = dataset_definition['names']\n\n return data_dict\n\n\nclass ClearmlLogger:\n \"\"\"Log training runs, datasets, models, and predictions to ClearML.\n\n This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default,\n this information includes hyperparameters, system configuration and metrics, model metrics, code information and\n basic data metrics and analyses.\n\n By providing additional command line arguments to train.py, datasets,\n models and predictions can also be logged.\n \"\"\"\n\n def __init__(self, opt, hyp):\n \"\"\"\n - Initialize ClearML Task, this object will capture the experiment\n - Upload dataset version to ClearML Data if opt.upload_dataset is True\n\n arguments:\n opt (namespace) -- Commandline arguments for this run\n hyp (dict) -- Hyperparameters for this run\n\n \"\"\"\n self.current_epoch = 0\n # Keep tracked of amount of logged images to enforce a limit\n self.current_epoch_logged_images = set()\n # Maximum number of images to log to clearML per epoch\n self.max_imgs_to_log_per_epoch = 16\n # Get the interval of epochs when bounding box images should be logged\n self.bbox_interval = opt.bbox_interval\n self.clearml = clearml\n self.task = None\n self.data_dict = None\n if self.clearml:\n self.task = Task.init(\n project_name='YOLOv5',\n task_name='training',\n tags=['YOLOv5'],\n output_uri=True,\n auto_connect_frameworks={'pytorch': False}\n # We disconnect pytorch auto-detection, because we added manual model save points in the code\n )\n # ClearML's hooks will already grab all general parameters\n # Only the hyperparameters coming from the yaml config file\n # will have to be added manually!\n self.task.connect(hyp, name='Hyperparameters')\n\n # Get ClearML Dataset Version if requested\n if opt.data.startswith('clearml://'):\n # data_dict should have the following keys:\n # names, nc (number of classes), test, train, val (all three relative paths to ../datasets)\n self.data_dict = construct_dataset(opt.data)\n # Set data to data_dict because wandb will crash without this information and opt is the best way\n # to give it to them\n opt.data = self.data_dict\n\n def log_debug_samples(self, files, title='Debug Samples'):\n \"\"\"\n Log files (images) as debug samples in the ClearML task.\n\n arguments:\n files (List(PosixPath)) a list of file paths in PosixPath format\n title (str) A title that groups together images with the same values\n \"\"\"\n for f in files:\n if f.exists():\n it = re.search(r'_batch(\\d+)', f.name)\n iteration = int(it.groups()[0]) if it else 0\n self.task.get_logger().report_image(title=title,\n series=f.name.replace(it.group(), ''),\n local_path=str(f),\n iteration=iteration)\n\n def log_image_with_boxes(self, image_path, boxes, class_names, image):\n \"\"\"\n Draw the bounding boxes on a single image and report the result as a ClearML debug sample\n\n arguments:\n image_path (PosixPath) the path the original image file\n boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]\n class_names (dict): dict containing mapping of class int to class name\n image (Tensor): A torch tensor containing the actual image data\n \"\"\"\n if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:\n # Log every bbox_interval times and deduplicate for any intermittend extra eval runs\n if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:\n converter = ToPILImage()\n labels = []\n for conf, class_nr in zip(boxes[:, 4], boxes[:, 5]):\n class_name = class_names[int(class_nr)]\n confidence = round(float(conf) * 100, 2)\n labels.append(f\"{class_name}: {confidence}%\")\n annotated_image = converter(\n draw_bounding_boxes(image=image.mul(255).clamp(0, 255).byte().cpu(),\n boxes=boxes[:, :4],\n labels=labels))\n self.task.get_logger().report_image(title='Bounding Boxes',\n series=image_path.name,\n iteration=self.current_epoch,\n image=annotated_image)\n self.current_epoch_logged_images.add(image_path)\n", "path": "utils/loggers/clearml/clearml_utils.py"}], "after_files": [{"content": "\"\"\"Main Logger class for ClearML experiment tracking.\"\"\"\nimport glob\nimport re\nfrom pathlib import Path\n\nimport numpy as np\nimport yaml\n\nfrom utils.plots import Annotator, colors\n\ntry:\n import clearml\n from clearml import Dataset, Task\n assert hasattr(clearml, '__version__') # verify package import not local dir\nexcept (ImportError, AssertionError):\n clearml = None\n\n\ndef construct_dataset(clearml_info_string):\n \"\"\"Load in a clearml dataset and fill the internal data_dict with its contents.\n \"\"\"\n dataset_id = clearml_info_string.replace('clearml://', '')\n dataset = Dataset.get(dataset_id=dataset_id)\n dataset_root_path = Path(dataset.get_local_copy())\n\n # We'll search for the yaml file definition in the dataset\n yaml_filenames = list(glob.glob(str(dataset_root_path / \"*.yaml\")) + glob.glob(str(dataset_root_path / \"*.yml\")))\n if len(yaml_filenames) > 1:\n raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains '\n 'the dataset definition this way.')\n elif len(yaml_filenames) == 0:\n raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file '\n 'inside the dataset root path.')\n with open(yaml_filenames[0]) as f:\n dataset_definition = yaml.safe_load(f)\n\n assert set(dataset_definition.keys()).issuperset(\n {'train', 'test', 'val', 'nc', 'names'}\n ), \"The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')\"\n\n data_dict = dict()\n data_dict['train'] = str(\n (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None\n data_dict['test'] = str(\n (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None\n data_dict['val'] = str(\n (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None\n data_dict['nc'] = dataset_definition['nc']\n data_dict['names'] = dataset_definition['names']\n\n return data_dict\n\n\nclass ClearmlLogger:\n \"\"\"Log training runs, datasets, models, and predictions to ClearML.\n\n This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default,\n this information includes hyperparameters, system configuration and metrics, model metrics, code information and\n basic data metrics and analyses.\n\n By providing additional command line arguments to train.py, datasets,\n models and predictions can also be logged.\n \"\"\"\n\n def __init__(self, opt, hyp):\n \"\"\"\n - Initialize ClearML Task, this object will capture the experiment\n - Upload dataset version to ClearML Data if opt.upload_dataset is True\n\n arguments:\n opt (namespace) -- Commandline arguments for this run\n hyp (dict) -- Hyperparameters for this run\n\n \"\"\"\n self.current_epoch = 0\n # Keep tracked of amount of logged images to enforce a limit\n self.current_epoch_logged_images = set()\n # Maximum number of images to log to clearML per epoch\n self.max_imgs_to_log_per_epoch = 16\n # Get the interval of epochs when bounding box images should be logged\n self.bbox_interval = opt.bbox_interval\n self.clearml = clearml\n self.task = None\n self.data_dict = None\n if self.clearml:\n self.task = Task.init(\n project_name='YOLOv5',\n task_name='training',\n tags=['YOLOv5'],\n output_uri=True,\n auto_connect_frameworks={'pytorch': False}\n # We disconnect pytorch auto-detection, because we added manual model save points in the code\n )\n # ClearML's hooks will already grab all general parameters\n # Only the hyperparameters coming from the yaml config file\n # will have to be added manually!\n self.task.connect(hyp, name='Hyperparameters')\n\n # Get ClearML Dataset Version if requested\n if opt.data.startswith('clearml://'):\n # data_dict should have the following keys:\n # names, nc (number of classes), test, train, val (all three relative paths to ../datasets)\n self.data_dict = construct_dataset(opt.data)\n # Set data to data_dict because wandb will crash without this information and opt is the best way\n # to give it to them\n opt.data = self.data_dict\n\n def log_debug_samples(self, files, title='Debug Samples'):\n \"\"\"\n Log files (images) as debug samples in the ClearML task.\n\n arguments:\n files (List(PosixPath)) a list of file paths in PosixPath format\n title (str) A title that groups together images with the same values\n \"\"\"\n for f in files:\n if f.exists():\n it = re.search(r'_batch(\\d+)', f.name)\n iteration = int(it.groups()[0]) if it else 0\n self.task.get_logger().report_image(title=title,\n series=f.name.replace(it.group(), ''),\n local_path=str(f),\n iteration=iteration)\n\n def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25):\n \"\"\"\n Draw the bounding boxes on a single image and report the result as a ClearML debug sample.\n\n arguments:\n image_path (PosixPath) the path the original image file\n boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]\n class_names (dict): dict containing mapping of class int to class name\n image (Tensor): A torch tensor containing the actual image data\n \"\"\"\n if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:\n # Log every bbox_interval times and deduplicate for any intermittend extra eval runs\n if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:\n im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))\n annotator = Annotator(im=im, pil=True)\n for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):\n color = colors(i)\n\n class_name = class_names[int(class_nr)]\n confidence = round(float(conf) * 100, 2)\n label = f\"{class_name}: {confidence}%\"\n\n if confidence > conf_threshold:\n annotator.rectangle(box.cpu().numpy(), outline=color)\n annotator.box_label(box.cpu().numpy(), label=label, color=color)\n\n annotated_image = annotator.result()\n self.task.get_logger().report_image(title='Bounding Boxes',\n series=image_path.name,\n iteration=self.current_epoch,\n image=annotated_image)\n self.current_epoch_logged_images.add(image_path)\n", "path": "utils/loggers/clearml/clearml_utils.py"}]} | 2,277 | 771 |
gh_patches_debug_32677 | rasdani/github-patches | git_diff | arviz-devs__arviz-2085 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove deprecated arguments from `plot_pair()`
`plot_kwargs` is an argument of `plot_pair()` yet it is not described in the [documentation](https://python.arviz.org/en/latest/api/generated/arviz.plot_pair.html#arviz.plot_pair) for this function.
**To Reproduce**
Go to the link and search for the description of `plot_kwargs`
**Expected behavior**
A complete docstring were `plot_kwargs` usage and behaviour is described.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/plots/pairplot.py`
Content:
```
1 """Plot a scatter, kde and/or hexbin of sampled parameters."""
2 import warnings
3 from typing import List, Optional, Union
4
5 import numpy as np
6
7 from ..data import convert_to_dataset
8 from ..labels import BaseLabeller
9 from ..sel_utils import xarray_to_ndarray, xarray_var_iter
10 from .plot_utils import get_plotting_function
11 from ..rcparams import rcParams
12 from ..utils import _var_names, get_coords
13
14
15 def plot_pair(
16 data,
17 group="posterior",
18 var_names: Optional[List[str]] = None,
19 filter_vars: Optional[str] = None,
20 combine_dims=None,
21 coords=None,
22 marginals=False,
23 figsize=None,
24 textsize=None,
25 kind: Union[str, List[str]] = "scatter",
26 gridsize="auto",
27 contour: Optional[bool] = None,
28 plot_kwargs=None,
29 fill_last=False,
30 divergences=False,
31 colorbar=False,
32 labeller=None,
33 ax=None,
34 divergences_kwargs=None,
35 scatter_kwargs=None,
36 kde_kwargs=None,
37 hexbin_kwargs=None,
38 backend=None,
39 backend_kwargs=None,
40 marginal_kwargs=None,
41 point_estimate=None,
42 point_estimate_kwargs=None,
43 point_estimate_marker_kwargs=None,
44 reference_values=None,
45 reference_values_kwargs=None,
46 show=None,
47 ):
48 """
49 Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
50
51 Parameters
52 ----------
53 data: obj
54 Any object that can be converted to an :class:`arviz.InferenceData` object.
55 Refer to documentation of :func:`arviz.convert_to_dataset` for details
56 group: str, optional
57 Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
58 var_names: list of variable names, optional
59 Variables to be plotted, if None all variable are plotted. Prefix the
60 variables by ``~`` when you want to exclude them from the plot.
61 filter_vars: {None, "like", "regex"}, optional, default=None
62 If `None` (default), interpret var_names as the real variables names. If "like",
63 interpret var_names as substrings of the real variables names. If "regex",
64 interpret var_names as regular expressions on the real variables names. A la
65 ``pandas.filter``.
66 combine_dims : set_like of str, optional
67 List of dimensions to reduce. Defaults to reducing only the "chain" and "draw" dimensions.
68 See the :ref:`this section <common_combine_dims>` for usage examples.
69 coords: mapping, optional
70 Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel`.
71 marginals: bool, optional
72 If True pairplot will include marginal distributions for every variable
73 figsize: figure size tuple
74 If None, size is (8 + numvars, 8 + numvars)
75 textsize: int
76 Text size for labels. If None it will be autoscaled based on ``figsize``.
77 kind : str or List[str]
78 Type of plot to display (scatter, kde and/or hexbin)
79 gridsize: int or (int, int), optional
80 Only works for ``kind=hexbin``. The number of hexagons in the x-direction.
81 The corresponding number of hexagons in the y-direction is chosen
82 such that the hexagons are approximately regular. Alternatively, gridsize
83 can be a tuple with two elements specifying the number of hexagons
84 in the x-direction and the y-direction.
85 contour : bool, optional, deprecated, Defaults to True.
86 If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
87 **Note:** this default is implemented in the body of the code, not in argument processing.
88 fill_last : bool
89 If True fill the last contour of the 2D KDE plot. Defaults to True.
90 divergences: Boolean
91 If True divergences will be plotted in a different color, only if group is either 'prior'
92 or 'posterior'.
93 colorbar: bool
94 If True a colorbar will be included as part of the plot (Defaults to False).
95 Only works when ``kind=hexbin``
96 labeller : labeller instance, optional
97 Class providing the method ``make_label_vert`` to generate the labels in the plot.
98 Read the :ref:`label_guide` for more details and usage examples.
99 ax: axes, optional
100 Matplotlib axes or bokeh figures.
101 divergences_kwargs: dicts, optional
102 Additional keywords passed to :meth:`matplotlib.axes.Axes.scatter` for divergences
103 scatter_kwargs:
104 Additional keywords passed to :meth:`matplotlib.axes.Axes.scatter` when using scatter kind
105 kde_kwargs: dict, optional
106 Additional keywords passed to :func:`arviz.plot_kde` when using kde kind
107 hexbin_kwargs: dict, optional
108 Additional keywords passed to :meth:`matplotlib.axes.Axes.hexbin` when
109 using hexbin kind
110 backend: str, optional
111 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
112 backend_kwargs: bool, optional
113 These are kwargs specific to the backend being used, passed to
114 :func:`matplotlib.pyplot.subplots` or
115 :func:`bokeh.plotting.figure`.
116 marginal_kwargs: dict, optional
117 Additional keywords passed to :func:`arviz.plot_dist`, modifying the
118 marginal distributions plotted in the diagonal.
119 point_estimate: str, optional
120 Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be
121 plotted using a scatter marker and vertical/horizontal lines.
122 point_estimate_kwargs: dict, optional
123 Additional keywords passed to :meth:`matplotlib.axes.Axes.axvline`,
124 :meth:`matplotlib.axes.Axes.axhline` (matplotlib) or
125 :class:`bokeh:bokeh.models.Span` (bokeh)
126 point_estimate_marker_kwargs: dict, optional
127 Additional keywords passed to :meth:`matplotlib.axes.Axes.scatter`
128 or :meth:`bokeh:bokeh.plotting.Figure.square` in point
129 estimate plot. Not available in bokeh
130 reference_values: dict, optional
131 Reference values for the plotted variables. The Reference values will be plotted
132 using a scatter marker
133 reference_values_kwargs: dict, optional
134 Additional keywords passed to :meth:`matplotlib.axes.Axes.plot` or
135 :meth:`bokeh:bokeh.plotting.Figure.circle` in reference values plot
136 show: bool, optional
137 Call backend show function.
138
139 Returns
140 -------
141 axes: matplotlib axes or bokeh figures
142
143 Examples
144 --------
145 KDE Pair Plot
146
147 .. plot::
148 :context: close-figs
149
150 >>> import arviz as az
151 >>> centered = az.load_arviz_data('centered_eight')
152 >>> coords = {'school': ['Choate', 'Deerfield']}
153 >>> az.plot_pair(centered,
154 >>> var_names=['theta', 'mu', 'tau'],
155 >>> kind='kde',
156 >>> coords=coords,
157 >>> divergences=True,
158 >>> textsize=18)
159
160 Hexbin pair plot
161
162 .. plot::
163 :context: close-figs
164
165 >>> az.plot_pair(centered,
166 >>> var_names=['theta', 'mu'],
167 >>> coords=coords,
168 >>> textsize=18,
169 >>> kind='hexbin')
170
171 Pair plot showing divergences and select variables with regular expressions
172
173 .. plot::
174 :context: close-figs
175
176 >>> az.plot_pair(centered,
177 ... var_names=['^t', 'mu'],
178 ... filter_vars="regex",
179 ... coords=coords,
180 ... divergences=True,
181 ... textsize=18)
182 """
183 valid_kinds = ["scatter", "kde", "hexbin"]
184 kind_boolean: Union[bool, List[bool]]
185 if isinstance(kind, str):
186 kind_boolean = kind in valid_kinds
187 else:
188 kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]
189 if not np.all(kind_boolean):
190 raise ValueError(f"Plot type {kind} not recognized. Plot type must be in {valid_kinds}")
191 if fill_last or contour:
192 warnings.warn(
193 "fill_last and contour will be deprecated. Please use kde_kwargs",
194 UserWarning,
195 )
196 if plot_kwargs:
197 warnings.warn(
198 "plot_kwargs will be deprecated."
199 " Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs",
200 UserWarning,
201 )
202
203 if coords is None:
204 coords = {}
205
206 if labeller is None:
207 labeller = BaseLabeller()
208
209 # Get posterior draws and combine chains
210 dataset = convert_to_dataset(data, group=group)
211 var_names = _var_names(var_names, dataset, filter_vars)
212 plotters = list(
213 xarray_var_iter(
214 get_coords(dataset, coords), var_names=var_names, skip_dims=combine_dims, combined=True
215 )
216 )
217 flat_var_names = [
218 labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters
219 ]
220
221 divergent_data = None
222 diverging_mask = None
223
224 # Assigning divergence group based on group param
225 if group == "posterior":
226 divergent_group = "sample_stats"
227 elif group == "prior":
228 divergent_group = "sample_stats_prior"
229 else:
230 divergences = False
231
232 # Get diverging draws and combine chains
233 if divergences:
234 if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
235 divergent_data = convert_to_dataset(data, group=divergent_group)
236 _, diverging_mask = xarray_to_ndarray(
237 divergent_data, var_names=("diverging",), combined=True
238 )
239 diverging_mask = np.squeeze(diverging_mask)
240 else:
241 divergences = False
242 warnings.warn(
243 "Divergences data not found, plotting without divergences. "
244 "Make sure the sample method provides divergences data and "
245 "that it is present in the `diverging` field of `sample_stats` "
246 "or `sample_stats_prior` or set divergences=False",
247 UserWarning,
248 )
249
250 if gridsize == "auto":
251 gridsize = int(dataset.dims["draw"] ** 0.35)
252
253 numvars = len(flat_var_names)
254
255 if numvars < 2:
256 raise ValueError("Number of variables to be plotted must be 2 or greater.")
257
258 pairplot_kwargs = dict(
259 ax=ax,
260 plotters=plotters,
261 numvars=numvars,
262 figsize=figsize,
263 textsize=textsize,
264 kind=kind,
265 scatter_kwargs=scatter_kwargs,
266 kde_kwargs=kde_kwargs,
267 hexbin_kwargs=hexbin_kwargs,
268 gridsize=gridsize,
269 colorbar=colorbar,
270 divergences=divergences,
271 diverging_mask=diverging_mask,
272 divergences_kwargs=divergences_kwargs,
273 flat_var_names=flat_var_names,
274 backend_kwargs=backend_kwargs,
275 marginal_kwargs=marginal_kwargs,
276 show=show,
277 marginals=marginals,
278 point_estimate=point_estimate,
279 point_estimate_kwargs=point_estimate_kwargs,
280 point_estimate_marker_kwargs=point_estimate_marker_kwargs,
281 reference_values=reference_values,
282 reference_values_kwargs=reference_values_kwargs,
283 )
284
285 if backend is None:
286 backend = rcParams["plot.backend"]
287 backend = backend.lower()
288
289 # TODO: Add backend kwargs
290 plot = get_plotting_function("plot_pair", "pairplot", backend)
291 ax = plot(**pairplot_kwargs)
292 return ax
293
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/plots/pairplot.py b/arviz/plots/pairplot.py
--- a/arviz/plots/pairplot.py
+++ b/arviz/plots/pairplot.py
@@ -24,9 +24,6 @@
textsize=None,
kind: Union[str, List[str]] = "scatter",
gridsize="auto",
- contour: Optional[bool] = None,
- plot_kwargs=None,
- fill_last=False,
divergences=False,
colorbar=False,
labeller=None,
@@ -82,11 +79,6 @@
such that the hexagons are approximately regular. Alternatively, gridsize
can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
- contour : bool, optional, deprecated, Defaults to True.
- If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
- **Note:** this default is implemented in the body of the code, not in argument processing.
- fill_last : bool
- If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences: Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
@@ -188,17 +180,6 @@
kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]
if not np.all(kind_boolean):
raise ValueError(f"Plot type {kind} not recognized. Plot type must be in {valid_kinds}")
- if fill_last or contour:
- warnings.warn(
- "fill_last and contour will be deprecated. Please use kde_kwargs",
- UserWarning,
- )
- if plot_kwargs:
- warnings.warn(
- "plot_kwargs will be deprecated."
- " Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs",
- UserWarning,
- )
if coords is None:
coords = {}
| {"golden_diff": "diff --git a/arviz/plots/pairplot.py b/arviz/plots/pairplot.py\n--- a/arviz/plots/pairplot.py\n+++ b/arviz/plots/pairplot.py\n@@ -24,9 +24,6 @@\n textsize=None,\n kind: Union[str, List[str]] = \"scatter\",\n gridsize=\"auto\",\n- contour: Optional[bool] = None,\n- plot_kwargs=None,\n- fill_last=False,\n divergences=False,\n colorbar=False,\n labeller=None,\n@@ -82,11 +79,6 @@\n such that the hexagons are approximately regular. Alternatively, gridsize\n can be a tuple with two elements specifying the number of hexagons\n in the x-direction and the y-direction.\n- contour : bool, optional, deprecated, Defaults to True.\n- If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.\n- **Note:** this default is implemented in the body of the code, not in argument processing.\n- fill_last : bool\n- If True fill the last contour of the 2D KDE plot. Defaults to True.\n divergences: Boolean\n If True divergences will be plotted in a different color, only if group is either 'prior'\n or 'posterior'.\n@@ -188,17 +180,6 @@\n kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]\n if not np.all(kind_boolean):\n raise ValueError(f\"Plot type {kind} not recognized. Plot type must be in {valid_kinds}\")\n- if fill_last or contour:\n- warnings.warn(\n- \"fill_last and contour will be deprecated. Please use kde_kwargs\",\n- UserWarning,\n- )\n- if plot_kwargs:\n- warnings.warn(\n- \"plot_kwargs will be deprecated.\"\n- \" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs\",\n- UserWarning,\n- )\n \n if coords is None:\n coords = {}\n", "issue": "Remove deprecated arguments from `plot_pair()`\n`plot_kwargs` is an argument of `plot_pair()` yet it is not described in the [documentation](https://python.arviz.org/en/latest/api/generated/arviz.plot_pair.html#arviz.plot_pair) for this function.\r\n\r\n**To Reproduce**\r\nGo to the link and search for the description of `plot_kwargs`\r\n\r\n**Expected behavior**\r\nA complete docstring were `plot_kwargs` usage and behaviour is described.\r\n\r\n\n", "before_files": [{"content": "\"\"\"Plot a scatter, kde and/or hexbin of sampled parameters.\"\"\"\nimport warnings\nfrom typing import List, Optional, Union\n\nimport numpy as np\n\nfrom ..data import convert_to_dataset\nfrom ..labels import BaseLabeller\nfrom ..sel_utils import xarray_to_ndarray, xarray_var_iter\nfrom .plot_utils import get_plotting_function\nfrom ..rcparams import rcParams\nfrom ..utils import _var_names, get_coords\n\n\ndef plot_pair(\n data,\n group=\"posterior\",\n var_names: Optional[List[str]] = None,\n filter_vars: Optional[str] = None,\n combine_dims=None,\n coords=None,\n marginals=False,\n figsize=None,\n textsize=None,\n kind: Union[str, List[str]] = \"scatter\",\n gridsize=\"auto\",\n contour: Optional[bool] = None,\n plot_kwargs=None,\n fill_last=False,\n divergences=False,\n colorbar=False,\n labeller=None,\n ax=None,\n divergences_kwargs=None,\n scatter_kwargs=None,\n kde_kwargs=None,\n hexbin_kwargs=None,\n backend=None,\n backend_kwargs=None,\n marginal_kwargs=None,\n point_estimate=None,\n point_estimate_kwargs=None,\n point_estimate_marker_kwargs=None,\n reference_values=None,\n reference_values_kwargs=None,\n show=None,\n):\n \"\"\"\n Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.\n\n Parameters\n ----------\n data: obj\n Any object that can be converted to an :class:`arviz.InferenceData` object.\n Refer to documentation of :func:`arviz.convert_to_dataset` for details\n group: str, optional\n Specifies which InferenceData group should be plotted. Defaults to 'posterior'.\n var_names: list of variable names, optional\n Variables to be plotted, if None all variable are plotted. Prefix the\n variables by ``~`` when you want to exclude them from the plot.\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\",\n interpret var_names as substrings of the real variables names. If \"regex\",\n interpret var_names as regular expressions on the real variables names. A la\n ``pandas.filter``.\n combine_dims : set_like of str, optional\n List of dimensions to reduce. Defaults to reducing only the \"chain\" and \"draw\" dimensions.\n See the :ref:`this section <common_combine_dims>` for usage examples.\n coords: mapping, optional\n Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel`.\n marginals: bool, optional\n If True pairplot will include marginal distributions for every variable\n figsize: figure size tuple\n If None, size is (8 + numvars, 8 + numvars)\n textsize: int\n Text size for labels. If None it will be autoscaled based on ``figsize``.\n kind : str or List[str]\n Type of plot to display (scatter, kde and/or hexbin)\n gridsize: int or (int, int), optional\n Only works for ``kind=hexbin``. The number of hexagons in the x-direction.\n The corresponding number of hexagons in the y-direction is chosen\n such that the hexagons are approximately regular. Alternatively, gridsize\n can be a tuple with two elements specifying the number of hexagons\n in the x-direction and the y-direction.\n contour : bool, optional, deprecated, Defaults to True.\n If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.\n **Note:** this default is implemented in the body of the code, not in argument processing.\n fill_last : bool\n If True fill the last contour of the 2D KDE plot. Defaults to True.\n divergences: Boolean\n If True divergences will be plotted in a different color, only if group is either 'prior'\n or 'posterior'.\n colorbar: bool\n If True a colorbar will be included as part of the plot (Defaults to False).\n Only works when ``kind=hexbin``\n labeller : labeller instance, optional\n Class providing the method ``make_label_vert`` to generate the labels in the plot.\n Read the :ref:`label_guide` for more details and usage examples.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n divergences_kwargs: dicts, optional\n Additional keywords passed to :meth:`matplotlib.axes.Axes.scatter` for divergences\n scatter_kwargs:\n Additional keywords passed to :meth:`matplotlib.axes.Axes.scatter` when using scatter kind\n kde_kwargs: dict, optional\n Additional keywords passed to :func:`arviz.plot_kde` when using kde kind\n hexbin_kwargs: dict, optional\n Additional keywords passed to :meth:`matplotlib.axes.Axes.hexbin` when\n using hexbin kind\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used, passed to\n :func:`matplotlib.pyplot.subplots` or\n :func:`bokeh.plotting.figure`.\n marginal_kwargs: dict, optional\n Additional keywords passed to :func:`arviz.plot_dist`, modifying the\n marginal distributions plotted in the diagonal.\n point_estimate: str, optional\n Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be\n plotted using a scatter marker and vertical/horizontal lines.\n point_estimate_kwargs: dict, optional\n Additional keywords passed to :meth:`matplotlib.axes.Axes.axvline`,\n :meth:`matplotlib.axes.Axes.axhline` (matplotlib) or\n :class:`bokeh:bokeh.models.Span` (bokeh)\n point_estimate_marker_kwargs: dict, optional\n Additional keywords passed to :meth:`matplotlib.axes.Axes.scatter`\n or :meth:`bokeh:bokeh.plotting.Figure.square` in point\n estimate plot. Not available in bokeh\n reference_values: dict, optional\n Reference values for the plotted variables. The Reference values will be plotted\n using a scatter marker\n reference_values_kwargs: dict, optional\n Additional keywords passed to :meth:`matplotlib.axes.Axes.plot` or\n :meth:`bokeh:bokeh.plotting.Figure.circle` in reference values plot\n show: bool, optional\n Call backend show function.\n\n Returns\n -------\n axes: matplotlib axes or bokeh figures\n\n Examples\n --------\n KDE Pair Plot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> centered = az.load_arviz_data('centered_eight')\n >>> coords = {'school': ['Choate', 'Deerfield']}\n >>> az.plot_pair(centered,\n >>> var_names=['theta', 'mu', 'tau'],\n >>> kind='kde',\n >>> coords=coords,\n >>> divergences=True,\n >>> textsize=18)\n\n Hexbin pair plot\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_pair(centered,\n >>> var_names=['theta', 'mu'],\n >>> coords=coords,\n >>> textsize=18,\n >>> kind='hexbin')\n\n Pair plot showing divergences and select variables with regular expressions\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_pair(centered,\n ... var_names=['^t', 'mu'],\n ... filter_vars=\"regex\",\n ... coords=coords,\n ... divergences=True,\n ... textsize=18)\n \"\"\"\n valid_kinds = [\"scatter\", \"kde\", \"hexbin\"]\n kind_boolean: Union[bool, List[bool]]\n if isinstance(kind, str):\n kind_boolean = kind in valid_kinds\n else:\n kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]\n if not np.all(kind_boolean):\n raise ValueError(f\"Plot type {kind} not recognized. Plot type must be in {valid_kinds}\")\n if fill_last or contour:\n warnings.warn(\n \"fill_last and contour will be deprecated. Please use kde_kwargs\",\n UserWarning,\n )\n if plot_kwargs:\n warnings.warn(\n \"plot_kwargs will be deprecated.\"\n \" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs\",\n UserWarning,\n )\n\n if coords is None:\n coords = {}\n\n if labeller is None:\n labeller = BaseLabeller()\n\n # Get posterior draws and combine chains\n dataset = convert_to_dataset(data, group=group)\n var_names = _var_names(var_names, dataset, filter_vars)\n plotters = list(\n xarray_var_iter(\n get_coords(dataset, coords), var_names=var_names, skip_dims=combine_dims, combined=True\n )\n )\n flat_var_names = [\n labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters\n ]\n\n divergent_data = None\n diverging_mask = None\n\n # Assigning divergence group based on group param\n if group == \"posterior\":\n divergent_group = \"sample_stats\"\n elif group == \"prior\":\n divergent_group = \"sample_stats_prior\"\n else:\n divergences = False\n\n # Get diverging draws and combine chains\n if divergences:\n if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), \"diverging\"):\n divergent_data = convert_to_dataset(data, group=divergent_group)\n _, diverging_mask = xarray_to_ndarray(\n divergent_data, var_names=(\"diverging\",), combined=True\n )\n diverging_mask = np.squeeze(diverging_mask)\n else:\n divergences = False\n warnings.warn(\n \"Divergences data not found, plotting without divergences. \"\n \"Make sure the sample method provides divergences data and \"\n \"that it is present in the `diverging` field of `sample_stats` \"\n \"or `sample_stats_prior` or set divergences=False\",\n UserWarning,\n )\n\n if gridsize == \"auto\":\n gridsize = int(dataset.dims[\"draw\"] ** 0.35)\n\n numvars = len(flat_var_names)\n\n if numvars < 2:\n raise ValueError(\"Number of variables to be plotted must be 2 or greater.\")\n\n pairplot_kwargs = dict(\n ax=ax,\n plotters=plotters,\n numvars=numvars,\n figsize=figsize,\n textsize=textsize,\n kind=kind,\n scatter_kwargs=scatter_kwargs,\n kde_kwargs=kde_kwargs,\n hexbin_kwargs=hexbin_kwargs,\n gridsize=gridsize,\n colorbar=colorbar,\n divergences=divergences,\n diverging_mask=diverging_mask,\n divergences_kwargs=divergences_kwargs,\n flat_var_names=flat_var_names,\n backend_kwargs=backend_kwargs,\n marginal_kwargs=marginal_kwargs,\n show=show,\n marginals=marginals,\n point_estimate=point_estimate,\n point_estimate_kwargs=point_estimate_kwargs,\n point_estimate_marker_kwargs=point_estimate_marker_kwargs,\n reference_values=reference_values,\n reference_values_kwargs=reference_values_kwargs,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_pair\", \"pairplot\", backend)\n ax = plot(**pairplot_kwargs)\n return ax\n", "path": "arviz/plots/pairplot.py"}], "after_files": [{"content": "\"\"\"Plot a scatter, kde and/or hexbin of sampled parameters.\"\"\"\nimport warnings\nfrom typing import List, Optional, Union\n\nimport numpy as np\n\nfrom ..data import convert_to_dataset\nfrom ..labels import BaseLabeller\nfrom ..sel_utils import xarray_to_ndarray, xarray_var_iter\nfrom .plot_utils import get_plotting_function\nfrom ..rcparams import rcParams\nfrom ..utils import _var_names, get_coords\n\n\ndef plot_pair(\n data,\n group=\"posterior\",\n var_names: Optional[List[str]] = None,\n filter_vars: Optional[str] = None,\n combine_dims=None,\n coords=None,\n marginals=False,\n figsize=None,\n textsize=None,\n kind: Union[str, List[str]] = \"scatter\",\n gridsize=\"auto\",\n divergences=False,\n colorbar=False,\n labeller=None,\n ax=None,\n divergences_kwargs=None,\n scatter_kwargs=None,\n kde_kwargs=None,\n hexbin_kwargs=None,\n backend=None,\n backend_kwargs=None,\n marginal_kwargs=None,\n point_estimate=None,\n point_estimate_kwargs=None,\n point_estimate_marker_kwargs=None,\n reference_values=None,\n reference_values_kwargs=None,\n show=None,\n):\n \"\"\"\n Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.\n\n Parameters\n ----------\n data: obj\n Any object that can be converted to an :class:`arviz.InferenceData` object.\n Refer to documentation of :func:`arviz.convert_to_dataset` for details\n group: str, optional\n Specifies which InferenceData group should be plotted. Defaults to 'posterior'.\n var_names: list of variable names, optional\n Variables to be plotted, if None all variable are plotted. Prefix the\n variables by ``~`` when you want to exclude them from the plot.\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\",\n interpret var_names as substrings of the real variables names. If \"regex\",\n interpret var_names as regular expressions on the real variables names. A la\n ``pandas.filter``.\n combine_dims : set_like of str, optional\n List of dimensions to reduce. Defaults to reducing only the \"chain\" and \"draw\" dimensions.\n See the :ref:`this section <common_combine_dims>` for usage examples.\n coords: mapping, optional\n Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel`.\n marginals: bool, optional\n If True pairplot will include marginal distributions for every variable\n figsize: figure size tuple\n If None, size is (8 + numvars, 8 + numvars)\n textsize: int\n Text size for labels. If None it will be autoscaled based on ``figsize``.\n kind : str or List[str]\n Type of plot to display (scatter, kde and/or hexbin)\n gridsize: int or (int, int), optional\n Only works for ``kind=hexbin``. The number of hexagons in the x-direction.\n The corresponding number of hexagons in the y-direction is chosen\n such that the hexagons are approximately regular. Alternatively, gridsize\n can be a tuple with two elements specifying the number of hexagons\n in the x-direction and the y-direction.\n divergences: Boolean\n If True divergences will be plotted in a different color, only if group is either 'prior'\n or 'posterior'.\n colorbar: bool\n If True a colorbar will be included as part of the plot (Defaults to False).\n Only works when ``kind=hexbin``\n labeller : labeller instance, optional\n Class providing the method ``make_label_vert`` to generate the labels in the plot.\n Read the :ref:`label_guide` for more details and usage examples.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n divergences_kwargs: dicts, optional\n Additional keywords passed to :meth:`matplotlib.axes.Axes.scatter` for divergences\n scatter_kwargs:\n Additional keywords passed to :meth:`matplotlib.axes.Axes.scatter` when using scatter kind\n kde_kwargs: dict, optional\n Additional keywords passed to :func:`arviz.plot_kde` when using kde kind\n hexbin_kwargs: dict, optional\n Additional keywords passed to :meth:`matplotlib.axes.Axes.hexbin` when\n using hexbin kind\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used, passed to\n :func:`matplotlib.pyplot.subplots` or\n :func:`bokeh.plotting.figure`.\n marginal_kwargs: dict, optional\n Additional keywords passed to :func:`arviz.plot_dist`, modifying the\n marginal distributions plotted in the diagonal.\n point_estimate: str, optional\n Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be\n plotted using a scatter marker and vertical/horizontal lines.\n point_estimate_kwargs: dict, optional\n Additional keywords passed to :meth:`matplotlib.axes.Axes.axvline`,\n :meth:`matplotlib.axes.Axes.axhline` (matplotlib) or\n :class:`bokeh:bokeh.models.Span` (bokeh)\n point_estimate_marker_kwargs: dict, optional\n Additional keywords passed to :meth:`matplotlib.axes.Axes.scatter`\n or :meth:`bokeh:bokeh.plotting.Figure.square` in point\n estimate plot. Not available in bokeh\n reference_values: dict, optional\n Reference values for the plotted variables. The Reference values will be plotted\n using a scatter marker\n reference_values_kwargs: dict, optional\n Additional keywords passed to :meth:`matplotlib.axes.Axes.plot` or\n :meth:`bokeh:bokeh.plotting.Figure.circle` in reference values plot\n show: bool, optional\n Call backend show function.\n\n Returns\n -------\n axes: matplotlib axes or bokeh figures\n\n Examples\n --------\n KDE Pair Plot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> centered = az.load_arviz_data('centered_eight')\n >>> coords = {'school': ['Choate', 'Deerfield']}\n >>> az.plot_pair(centered,\n >>> var_names=['theta', 'mu', 'tau'],\n >>> kind='kde',\n >>> coords=coords,\n >>> divergences=True,\n >>> textsize=18)\n\n Hexbin pair plot\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_pair(centered,\n >>> var_names=['theta', 'mu'],\n >>> coords=coords,\n >>> textsize=18,\n >>> kind='hexbin')\n\n Pair plot showing divergences and select variables with regular expressions\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_pair(centered,\n ... var_names=['^t', 'mu'],\n ... filter_vars=\"regex\",\n ... coords=coords,\n ... divergences=True,\n ... textsize=18)\n \"\"\"\n valid_kinds = [\"scatter\", \"kde\", \"hexbin\"]\n kind_boolean: Union[bool, List[bool]]\n if isinstance(kind, str):\n kind_boolean = kind in valid_kinds\n else:\n kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]\n if not np.all(kind_boolean):\n raise ValueError(f\"Plot type {kind} not recognized. Plot type must be in {valid_kinds}\")\n\n if coords is None:\n coords = {}\n\n if labeller is None:\n labeller = BaseLabeller()\n\n # Get posterior draws and combine chains\n dataset = convert_to_dataset(data, group=group)\n var_names = _var_names(var_names, dataset, filter_vars)\n plotters = list(\n xarray_var_iter(\n get_coords(dataset, coords), var_names=var_names, skip_dims=combine_dims, combined=True\n )\n )\n flat_var_names = [\n labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters\n ]\n\n divergent_data = None\n diverging_mask = None\n\n # Assigning divergence group based on group param\n if group == \"posterior\":\n divergent_group = \"sample_stats\"\n elif group == \"prior\":\n divergent_group = \"sample_stats_prior\"\n else:\n divergences = False\n\n # Get diverging draws and combine chains\n if divergences:\n if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), \"diverging\"):\n divergent_data = convert_to_dataset(data, group=divergent_group)\n _, diverging_mask = xarray_to_ndarray(\n divergent_data, var_names=(\"diverging\",), combined=True\n )\n diverging_mask = np.squeeze(diverging_mask)\n else:\n divergences = False\n warnings.warn(\n \"Divergences data not found, plotting without divergences. \"\n \"Make sure the sample method provides divergences data and \"\n \"that it is present in the `diverging` field of `sample_stats` \"\n \"or `sample_stats_prior` or set divergences=False\",\n UserWarning,\n )\n\n if gridsize == \"auto\":\n gridsize = int(dataset.dims[\"draw\"] ** 0.35)\n\n numvars = len(flat_var_names)\n\n if numvars < 2:\n raise ValueError(\"Number of variables to be plotted must be 2 or greater.\")\n\n pairplot_kwargs = dict(\n ax=ax,\n plotters=plotters,\n numvars=numvars,\n figsize=figsize,\n textsize=textsize,\n kind=kind,\n scatter_kwargs=scatter_kwargs,\n kde_kwargs=kde_kwargs,\n hexbin_kwargs=hexbin_kwargs,\n gridsize=gridsize,\n colorbar=colorbar,\n divergences=divergences,\n diverging_mask=diverging_mask,\n divergences_kwargs=divergences_kwargs,\n flat_var_names=flat_var_names,\n backend_kwargs=backend_kwargs,\n marginal_kwargs=marginal_kwargs,\n show=show,\n marginals=marginals,\n point_estimate=point_estimate,\n point_estimate_kwargs=point_estimate_kwargs,\n point_estimate_marker_kwargs=point_estimate_marker_kwargs,\n reference_values=reference_values,\n reference_values_kwargs=reference_values_kwargs,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_pair\", \"pairplot\", backend)\n ax = plot(**pairplot_kwargs)\n return ax\n", "path": "arviz/plots/pairplot.py"}]} | 3,721 | 456 |
gh_patches_debug_16625 | rasdani/github-patches | git_diff | vispy__vispy-1380 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Vispy Linux Error ( division by zero)
When I try to tun the vispy on linux I get the following error:
```
from vispy.plot import Fig
f = Fig()
/lib/python2.7/site-packages/vispy/util/dpi/_linux.pyc in <lambda>(x_px, y_px, x_mm, y_mm)
49 from_xrandr = _get_dpi_from(
50 'xrandr', r'(\d+)x(\d+).*?(\d+)mm x (\d+)mm',
---> 51 lambda x_px, y_px, x_mm, y_mm: 25.4 * (x_px / x_mm + y_px / y_mm) / 2)
52 if from_xrandr is not None:
53 return from_xrandr
```
ZeroDivisionError: float division by zero
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/util/dpi/_linux.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # -----------------------------------------------------------------------------
3 # Copyright (c) Vispy Development Team. All Rights Reserved.
4 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
5 # -----------------------------------------------------------------------------
6
7 import os
8 import re
9 from subprocess import CalledProcessError
10
11 from ..logs import logger
12 from ..wrappers import run_subprocess
13
14
15 def _get_dpi_from(cmd, pattern, func):
16 """Match pattern against the output of func, passing the results as
17 floats to func. If anything fails, return None.
18 """
19 try:
20 out, _ = run_subprocess([cmd])
21 except (OSError, CalledProcessError):
22 pass
23 else:
24 match = re.search(pattern, out)
25 if match:
26 return func(*map(float, match.groups()))
27
28
29 def get_dpi(raise_error=True):
30 """Get screen DPI from the OS
31
32 Parameters
33 ----------
34 raise_error : bool
35 If True, raise an error if DPI could not be determined.
36
37 Returns
38 -------
39 dpi : float
40 Dots per inch of the primary screen.
41 """
42 # If we are running without an X server (e.g. OSMesa), use a fixed DPI
43 if 'DISPLAY' not in os.environ:
44 return 96.
45
46 from_xdpyinfo = _get_dpi_from(
47 'xdpyinfo', r'(\d+)x(\d+) dots per inch',
48 lambda x_dpi, y_dpi: (x_dpi + y_dpi) / 2)
49 if from_xdpyinfo is not None:
50 return from_xdpyinfo
51
52 from_xrandr = _get_dpi_from(
53 'xrandr', r'(\d+)x(\d+).*?(\d+)mm x (\d+)mm',
54 lambda x_px, y_px, x_mm, y_mm: 25.4 * (x_px / x_mm + y_px / y_mm) / 2)
55 if from_xrandr is not None:
56 return from_xrandr
57 if raise_error:
58 raise RuntimeError('could not determine DPI')
59 else:
60 logger.warning('could not determine DPI')
61 return 96
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vispy/util/dpi/_linux.py b/vispy/util/dpi/_linux.py
--- a/vispy/util/dpi/_linux.py
+++ b/vispy/util/dpi/_linux.py
@@ -26,6 +26,14 @@
return func(*map(float, match.groups()))
+def _xrandr_calc(x_px, y_px, x_mm, y_mm):
+ if x_mm == 0 or y_mm == 0:
+ logger.warning("'xrandr' output has screen dimension of 0mm, " +
+ "can't compute proper DPI")
+ return 96.
+ return 25.4 * (x_px / x_mm + y_px / y_mm) / 2
+
+
def get_dpi(raise_error=True):
"""Get screen DPI from the OS
@@ -51,7 +59,7 @@
from_xrandr = _get_dpi_from(
'xrandr', r'(\d+)x(\d+).*?(\d+)mm x (\d+)mm',
- lambda x_px, y_px, x_mm, y_mm: 25.4 * (x_px / x_mm + y_px / y_mm) / 2)
+ _xrandr_calc)
if from_xrandr is not None:
return from_xrandr
if raise_error:
| {"golden_diff": "diff --git a/vispy/util/dpi/_linux.py b/vispy/util/dpi/_linux.py\n--- a/vispy/util/dpi/_linux.py\n+++ b/vispy/util/dpi/_linux.py\n@@ -26,6 +26,14 @@\n return func(*map(float, match.groups()))\n \n \n+def _xrandr_calc(x_px, y_px, x_mm, y_mm):\n+ if x_mm == 0 or y_mm == 0:\n+ logger.warning(\"'xrandr' output has screen dimension of 0mm, \" +\n+ \"can't compute proper DPI\")\n+ return 96.\n+ return 25.4 * (x_px / x_mm + y_px / y_mm) / 2\n+\n+\n def get_dpi(raise_error=True):\n \"\"\"Get screen DPI from the OS\n \n@@ -51,7 +59,7 @@\n \n from_xrandr = _get_dpi_from(\n 'xrandr', r'(\\d+)x(\\d+).*?(\\d+)mm x (\\d+)mm',\n- lambda x_px, y_px, x_mm, y_mm: 25.4 * (x_px / x_mm + y_px / y_mm) / 2)\n+ _xrandr_calc)\n if from_xrandr is not None:\n return from_xrandr\n if raise_error:\n", "issue": "Vispy Linux Error ( division by zero)\nWhen I try to tun the vispy on linux I get the following error:\n\n```\nfrom vispy.plot import Fig\nf = Fig()\n\n/lib/python2.7/site-packages/vispy/util/dpi/_linux.pyc in <lambda>(x_px, y_px, x_mm, y_mm)\n 49 from_xrandr = _get_dpi_from(\n 50 'xrandr', r'(\\d+)x(\\d+).*?(\\d+)mm x (\\d+)mm',\n---> 51 lambda x_px, y_px, x_mm, y_mm: 25.4 * (x_px / x_mm + y_px / y_mm) / 2)\n 52 if from_xrandr is not None:\n 53 return from_xrandr\n```\n\nZeroDivisionError: float division by zero\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\nimport os\nimport re\nfrom subprocess import CalledProcessError\n\nfrom ..logs import logger\nfrom ..wrappers import run_subprocess\n\n\ndef _get_dpi_from(cmd, pattern, func):\n \"\"\"Match pattern against the output of func, passing the results as\n floats to func. If anything fails, return None.\n \"\"\"\n try:\n out, _ = run_subprocess([cmd])\n except (OSError, CalledProcessError):\n pass\n else:\n match = re.search(pattern, out)\n if match:\n return func(*map(float, match.groups()))\n\n\ndef get_dpi(raise_error=True):\n \"\"\"Get screen DPI from the OS\n\n Parameters\n ----------\n raise_error : bool\n If True, raise an error if DPI could not be determined.\n\n Returns\n -------\n dpi : float\n Dots per inch of the primary screen.\n \"\"\"\n # If we are running without an X server (e.g. OSMesa), use a fixed DPI\n if 'DISPLAY' not in os.environ:\n return 96.\n\n from_xdpyinfo = _get_dpi_from(\n 'xdpyinfo', r'(\\d+)x(\\d+) dots per inch',\n lambda x_dpi, y_dpi: (x_dpi + y_dpi) / 2)\n if from_xdpyinfo is not None:\n return from_xdpyinfo\n\n from_xrandr = _get_dpi_from(\n 'xrandr', r'(\\d+)x(\\d+).*?(\\d+)mm x (\\d+)mm',\n lambda x_px, y_px, x_mm, y_mm: 25.4 * (x_px / x_mm + y_px / y_mm) / 2)\n if from_xrandr is not None:\n return from_xrandr\n if raise_error:\n raise RuntimeError('could not determine DPI')\n else:\n logger.warning('could not determine DPI')\n return 96\n", "path": "vispy/util/dpi/_linux.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\nimport os\nimport re\nfrom subprocess import CalledProcessError\n\nfrom ..logs import logger\nfrom ..wrappers import run_subprocess\n\n\ndef _get_dpi_from(cmd, pattern, func):\n \"\"\"Match pattern against the output of func, passing the results as\n floats to func. If anything fails, return None.\n \"\"\"\n try:\n out, _ = run_subprocess([cmd])\n except (OSError, CalledProcessError):\n pass\n else:\n match = re.search(pattern, out)\n if match:\n return func(*map(float, match.groups()))\n\n\ndef _xrandr_calc(x_px, y_px, x_mm, y_mm):\n if x_mm == 0 or y_mm == 0:\n logger.warning(\"'xrandr' output has screen dimension of 0mm, \" +\n \"can't compute proper DPI\")\n return 96.\n return 25.4 * (x_px / x_mm + y_px / y_mm) / 2\n\n\ndef get_dpi(raise_error=True):\n \"\"\"Get screen DPI from the OS\n\n Parameters\n ----------\n raise_error : bool\n If True, raise an error if DPI could not be determined.\n\n Returns\n -------\n dpi : float\n Dots per inch of the primary screen.\n \"\"\"\n # If we are running without an X server (e.g. OSMesa), use a fixed DPI\n if 'DISPLAY' not in os.environ:\n return 96.\n\n from_xdpyinfo = _get_dpi_from(\n 'xdpyinfo', r'(\\d+)x(\\d+) dots per inch',\n lambda x_dpi, y_dpi: (x_dpi + y_dpi) / 2)\n if from_xdpyinfo is not None:\n return from_xdpyinfo\n\n from_xrandr = _get_dpi_from(\n 'xrandr', r'(\\d+)x(\\d+).*?(\\d+)mm x (\\d+)mm',\n _xrandr_calc)\n if from_xrandr is not None:\n return from_xrandr\n if raise_error:\n raise RuntimeError('could not determine DPI')\n else:\n logger.warning('could not determine DPI')\n return 96\n", "path": "vispy/util/dpi/_linux.py"}]} | 1,052 | 308 |
gh_patches_debug_6023 | rasdani/github-patches | git_diff | rasterio__rasterio-2062 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't access requester pays buckets with `rio --aws-requester-pays info <url>` cli tool
## Expected behavior and actual behavior.
Should be able to run something like this if correct AWS credentials are available
```bash
rio --aws-requester-pays \
info \
s3://usgs-landsat/collection02/level-2/standard/oli-tirs/2014/168/061/LC08_L2SP_168061_20140102_20200912_02_T1/LC08_L2SP_168061_20140102_20200912_02_T1_SR_B4.TIF
{"blockxsize": 256, "blockysize": 256, "bounds": [172185.0, -276315.0, 399915.0, -44085.0], "colorinterp": ["gray"], "compress": "deflate", "count": 1, "crs": "EPSG:32637", "descriptions": [null], "driver": "GTiff", "dtype": "uint16", "height": 7741, "indexes": [1], "interleave": "band", "lnglat": [37.07703791017367, -1.4485551824205734], "mask_flags": [["nodata"]], "nodata": 0.0, "res": [30.0, 30.0], "shape": [7741, 7591], "tiled": true, "transform": [30.0, 0.0, 172185.0, 0.0, -30.0, -44085.0, 0.0, 0.0, 1.0], "units": [null], "width": 7591}
```
## Steps to reproduce the problem.
1. make sure you have correct credentials, command below should work
```bash
gdalinfo /vsis3/usgs-landsat/collection02/level-2/standard/oli-tirs/2014/168/061/LC08_L2SP_168061_20140102_20200912_02_T1/LC08_L2SP_168061_20140102_20200912_02_T1_SR_B4.TIF \
--config AWS_REQUEST_PAYER requester
```
2. Now access same file with `rio`
```bash
rio --aws-requester-pays \
info \
s3://usgs-landsat/collection02/level-2/standard/oli-tirs/2014/168/061/LC08_L2SP_168061_20140102_20200912_02_T1/LC08_L2SP_168061_20140102_20200912_02_T1_SR_B4.TIF
```
and get output similar to
```
Traceback (most recent call last):
File "rasterio/_base.pyx", line 216, in rasterio._base.DatasetBase.__init__
File "rasterio/_shim.pyx", line 78, in rasterio._shim.open_dataset
File "rasterio/_err.pyx", line 213, in rasterio._err.exc_wrap_pointer
rasterio._err.CPLE_AWSAccessDeniedError: Access Denied
```
## Diagnosis
Problem is here:
https://github.com/mapbox/rasterio/blob/2e8ec36a52b6d57e5364f1b415e3573f6e3084a3/rasterio/rio/main.py#L95-L101
basically AWSSession is not being constructed even if aws option was requested.
Should be this instead:
```diff
- if aws_profile or aws_no_sign_requests:
+ if aws_profile or aws_no_sign_requests or aws_requester_pays:
```
## Operating system
Ubuntu 20.04
## Rasterio version and provenance
1.1.8 compiled against local gdal 3.0.4
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/rio/main.py`
Content:
```
1 """
2 Main command group for Rasterio's CLI.
3
4 Subcommands developed as a part of the Rasterio package have their own
5 modules under ``rasterio.rio`` (like ``rasterio/rio/info.py``) and are
6 registered in the 'rasterio.rio_commands' entry point group in
7 Rasterio's ``setup.py``:
8
9 entry_points='''
10 [console_scripts]
11 rio=rasterio.rio.main:main_group
12
13 [rasterio.rio_commands]
14 bounds=rasterio.rio.bounds:bounds
15 calc=rasterio.rio.calc:calc
16 ...
17
18 Users may create their own ``rio`` subcommands by writing modules that
19 register entry points in Rasterio's 'rasterio.rio_plugins' group. See
20 for example https://github.com/sgillies/rio-plugin-example, which has
21 been published to PyPI as ``rio-metasay``.
22
23 There's no advantage to making a ``rio`` subcommand which doesn't
24 import rasterio. But if you are using rasterio, you may profit from
25 Rasterio's CLI infrastructure and the network of existing commands.
26 Please add yours to the registry
27
28 https://github.com/mapbox/rasterio/wiki/Rio-plugin-registry
29
30 so that other ``rio`` users may find it.
31 """
32
33
34 import logging
35 from pkg_resources import iter_entry_points
36 import sys
37
38 from click_plugins import with_plugins
39 import click
40 import cligj
41
42 from . import options
43 import rasterio
44 from rasterio.session import AWSSession
45
46
47 def configure_logging(verbosity):
48 log_level = max(10, 30 - 10 * verbosity)
49 logging.basicConfig(stream=sys.stderr, level=log_level)
50
51
52 def gdal_version_cb(ctx, param, value):
53 if not value or ctx.resilient_parsing:
54 return
55
56 click.echo("{0}".format(rasterio.__gdal_version__), color=ctx.color)
57 ctx.exit()
58
59
60 @with_plugins(
61 ep
62 for ep in list(iter_entry_points("rasterio.rio_commands"))
63 + list(iter_entry_points("rasterio.rio_plugins"))
64 )
65 @click.group()
66 @cligj.verbose_opt
67 @cligj.quiet_opt
68 @click.option(
69 "--aws-profile", help="Select a profile from the AWS credentials file"
70 )
71 @click.option("--aws-no-sign-requests", is_flag=True, help="Make requests anonymously")
72 @click.option(
73 "--aws-requester-pays", is_flag=True, help="Requester pays data transfer costs"
74 )
75 @click.version_option(version=rasterio.__version__, message="%(version)s")
76 @click.option("--gdal-version", is_eager=True, is_flag=True, callback=gdal_version_cb)
77 @click.pass_context
78 def main_group(
79 ctx,
80 verbose,
81 quiet,
82 aws_profile,
83 aws_no_sign_requests,
84 aws_requester_pays,
85 gdal_version,
86 ):
87 """Rasterio command line interface.
88 """
89 verbosity = verbose - quiet
90 configure_logging(verbosity)
91 ctx.obj = {}
92 ctx.obj["verbosity"] = verbosity
93 ctx.obj["aws_profile"] = aws_profile
94 envopts = {"CPL_DEBUG": (verbosity > 2)}
95 if aws_profile or aws_no_sign_requests:
96 ctx.obj["env"] = rasterio.Env(
97 session=AWSSession(
98 profile_name=aws_profile,
99 aws_unsigned=aws_no_sign_requests,
100 requester_pays=aws_requester_pays,
101 ), **envopts)
102 else:
103 ctx.obj["env"] = rasterio.Env(**envopts)
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/rio/main.py b/rasterio/rio/main.py
--- a/rasterio/rio/main.py
+++ b/rasterio/rio/main.py
@@ -92,7 +92,7 @@
ctx.obj["verbosity"] = verbosity
ctx.obj["aws_profile"] = aws_profile
envopts = {"CPL_DEBUG": (verbosity > 2)}
- if aws_profile or aws_no_sign_requests:
+ if aws_profile or aws_no_sign_requests or aws_requester_pays:
ctx.obj["env"] = rasterio.Env(
session=AWSSession(
profile_name=aws_profile,
| {"golden_diff": "diff --git a/rasterio/rio/main.py b/rasterio/rio/main.py\n--- a/rasterio/rio/main.py\n+++ b/rasterio/rio/main.py\n@@ -92,7 +92,7 @@\n ctx.obj[\"verbosity\"] = verbosity\n ctx.obj[\"aws_profile\"] = aws_profile\n envopts = {\"CPL_DEBUG\": (verbosity > 2)}\n- if aws_profile or aws_no_sign_requests:\n+ if aws_profile or aws_no_sign_requests or aws_requester_pays:\n ctx.obj[\"env\"] = rasterio.Env(\n session=AWSSession(\n profile_name=aws_profile,\n", "issue": "Can't access requester pays buckets with `rio --aws-requester-pays info <url>` cli tool\n## Expected behavior and actual behavior.\r\n\r\nShould be able to run something like this if correct AWS credentials are available\r\n\r\n```bash\r\nrio --aws-requester-pays \\\r\n info \\\r\n s3://usgs-landsat/collection02/level-2/standard/oli-tirs/2014/168/061/LC08_L2SP_168061_20140102_20200912_02_T1/LC08_L2SP_168061_20140102_20200912_02_T1_SR_B4.TIF\r\n\r\n{\"blockxsize\": 256, \"blockysize\": 256, \"bounds\": [172185.0, -276315.0, 399915.0, -44085.0], \"colorinterp\": [\"gray\"], \"compress\": \"deflate\", \"count\": 1, \"crs\": \"EPSG:32637\", \"descriptions\": [null], \"driver\": \"GTiff\", \"dtype\": \"uint16\", \"height\": 7741, \"indexes\": [1], \"interleave\": \"band\", \"lnglat\": [37.07703791017367, -1.4485551824205734], \"mask_flags\": [[\"nodata\"]], \"nodata\": 0.0, \"res\": [30.0, 30.0], \"shape\": [7741, 7591], \"tiled\": true, \"transform\": [30.0, 0.0, 172185.0, 0.0, -30.0, -44085.0, 0.0, 0.0, 1.0], \"units\": [null], \"width\": 7591}\r\n```\r\n\r\n## Steps to reproduce the problem.\r\n\r\n1. make sure you have correct credentials, command below should work\r\n\r\n```bash\r\ngdalinfo /vsis3/usgs-landsat/collection02/level-2/standard/oli-tirs/2014/168/061/LC08_L2SP_168061_20140102_20200912_02_T1/LC08_L2SP_168061_20140102_20200912_02_T1_SR_B4.TIF \\\r\n--config AWS_REQUEST_PAYER requester\r\n```\r\n2. Now access same file with `rio`\r\n\r\n```bash\r\nrio --aws-requester-pays \\\r\n info \\\r\n s3://usgs-landsat/collection02/level-2/standard/oli-tirs/2014/168/061/LC08_L2SP_168061_20140102_20200912_02_T1/LC08_L2SP_168061_20140102_20200912_02_T1_SR_B4.TIF\r\n```\r\nand get output similar to\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"rasterio/_base.pyx\", line 216, in rasterio._base.DatasetBase.__init__\r\n File \"rasterio/_shim.pyx\", line 78, in rasterio._shim.open_dataset\r\n File \"rasterio/_err.pyx\", line 213, in rasterio._err.exc_wrap_pointer\r\nrasterio._err.CPLE_AWSAccessDeniedError: Access Denied\r\n```\r\n\r\n\r\n## Diagnosis\r\n\r\nProblem is here:\r\n\r\nhttps://github.com/mapbox/rasterio/blob/2e8ec36a52b6d57e5364f1b415e3573f6e3084a3/rasterio/rio/main.py#L95-L101\r\n\r\nbasically AWSSession is not being constructed even if aws option was requested.\r\n\r\nShould be this instead:\r\n\r\n```diff\r\n- if aws_profile or aws_no_sign_requests:\r\n+ if aws_profile or aws_no_sign_requests or aws_requester_pays:\r\n```\r\n\r\n## Operating system\r\n\r\nUbuntu 20.04\r\n\r\n## Rasterio version and provenance\r\n\r\n1.1.8 compiled against local gdal 3.0.4\r\n\n", "before_files": [{"content": "\"\"\"\nMain command group for Rasterio's CLI.\n\nSubcommands developed as a part of the Rasterio package have their own\nmodules under ``rasterio.rio`` (like ``rasterio/rio/info.py``) and are\nregistered in the 'rasterio.rio_commands' entry point group in\nRasterio's ``setup.py``:\n\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:main_group\n\n [rasterio.rio_commands]\n bounds=rasterio.rio.bounds:bounds\n calc=rasterio.rio.calc:calc\n ...\n\nUsers may create their own ``rio`` subcommands by writing modules that\nregister entry points in Rasterio's 'rasterio.rio_plugins' group. See\nfor example https://github.com/sgillies/rio-plugin-example, which has\nbeen published to PyPI as ``rio-metasay``.\n\nThere's no advantage to making a ``rio`` subcommand which doesn't\nimport rasterio. But if you are using rasterio, you may profit from\nRasterio's CLI infrastructure and the network of existing commands.\nPlease add yours to the registry\n\n https://github.com/mapbox/rasterio/wiki/Rio-plugin-registry\n\nso that other ``rio`` users may find it.\n\"\"\"\n\n\nimport logging\nfrom pkg_resources import iter_entry_points\nimport sys\n\nfrom click_plugins import with_plugins\nimport click\nimport cligj\n\nfrom . import options\nimport rasterio\nfrom rasterio.session import AWSSession\n\n\ndef configure_logging(verbosity):\n log_level = max(10, 30 - 10 * verbosity)\n logging.basicConfig(stream=sys.stderr, level=log_level)\n\n\ndef gdal_version_cb(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(\"{0}\".format(rasterio.__gdal_version__), color=ctx.color)\n ctx.exit()\n\n\n@with_plugins(\n ep\n for ep in list(iter_entry_points(\"rasterio.rio_commands\"))\n + list(iter_entry_points(\"rasterio.rio_plugins\"))\n)\[email protected]()\[email protected]_opt\[email protected]_opt\[email protected](\n \"--aws-profile\", help=\"Select a profile from the AWS credentials file\"\n)\[email protected](\"--aws-no-sign-requests\", is_flag=True, help=\"Make requests anonymously\")\[email protected](\n \"--aws-requester-pays\", is_flag=True, help=\"Requester pays data transfer costs\"\n)\[email protected]_option(version=rasterio.__version__, message=\"%(version)s\")\[email protected](\"--gdal-version\", is_eager=True, is_flag=True, callback=gdal_version_cb)\[email protected]_context\ndef main_group(\n ctx,\n verbose,\n quiet,\n aws_profile,\n aws_no_sign_requests,\n aws_requester_pays,\n gdal_version,\n):\n \"\"\"Rasterio command line interface.\n \"\"\"\n verbosity = verbose - quiet\n configure_logging(verbosity)\n ctx.obj = {}\n ctx.obj[\"verbosity\"] = verbosity\n ctx.obj[\"aws_profile\"] = aws_profile\n envopts = {\"CPL_DEBUG\": (verbosity > 2)}\n if aws_profile or aws_no_sign_requests:\n ctx.obj[\"env\"] = rasterio.Env(\n session=AWSSession(\n profile_name=aws_profile,\n aws_unsigned=aws_no_sign_requests,\n requester_pays=aws_requester_pays,\n ), **envopts)\n else:\n ctx.obj[\"env\"] = rasterio.Env(**envopts)\n", "path": "rasterio/rio/main.py"}], "after_files": [{"content": "\"\"\"\nMain command group for Rasterio's CLI.\n\nSubcommands developed as a part of the Rasterio package have their own\nmodules under ``rasterio.rio`` (like ``rasterio/rio/info.py``) and are\nregistered in the 'rasterio.rio_commands' entry point group in\nRasterio's ``setup.py``:\n\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:main_group\n\n [rasterio.rio_commands]\n bounds=rasterio.rio.bounds:bounds\n calc=rasterio.rio.calc:calc\n ...\n\nUsers may create their own ``rio`` subcommands by writing modules that\nregister entry points in Rasterio's 'rasterio.rio_plugins' group. See\nfor example https://github.com/sgillies/rio-plugin-example, which has\nbeen published to PyPI as ``rio-metasay``.\n\nThere's no advantage to making a ``rio`` subcommand which doesn't\nimport rasterio. But if you are using rasterio, you may profit from\nRasterio's CLI infrastructure and the network of existing commands.\nPlease add yours to the registry\n\n https://github.com/mapbox/rasterio/wiki/Rio-plugin-registry\n\nso that other ``rio`` users may find it.\n\"\"\"\n\n\nimport logging\nfrom pkg_resources import iter_entry_points\nimport sys\n\nfrom click_plugins import with_plugins\nimport click\nimport cligj\n\nfrom . import options\nimport rasterio\nfrom rasterio.session import AWSSession\n\n\ndef configure_logging(verbosity):\n log_level = max(10, 30 - 10 * verbosity)\n logging.basicConfig(stream=sys.stderr, level=log_level)\n\n\ndef gdal_version_cb(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(\"{0}\".format(rasterio.__gdal_version__), color=ctx.color)\n ctx.exit()\n\n\n@with_plugins(\n ep\n for ep in list(iter_entry_points(\"rasterio.rio_commands\"))\n + list(iter_entry_points(\"rasterio.rio_plugins\"))\n)\[email protected]()\[email protected]_opt\[email protected]_opt\[email protected](\n \"--aws-profile\", help=\"Select a profile from the AWS credentials file\"\n)\[email protected](\"--aws-no-sign-requests\", is_flag=True, help=\"Make requests anonymously\")\[email protected](\n \"--aws-requester-pays\", is_flag=True, help=\"Requester pays data transfer costs\"\n)\[email protected]_option(version=rasterio.__version__, message=\"%(version)s\")\[email protected](\"--gdal-version\", is_eager=True, is_flag=True, callback=gdal_version_cb)\[email protected]_context\ndef main_group(\n ctx,\n verbose,\n quiet,\n aws_profile,\n aws_no_sign_requests,\n aws_requester_pays,\n gdal_version,\n):\n \"\"\"Rasterio command line interface.\n \"\"\"\n verbosity = verbose - quiet\n configure_logging(verbosity)\n ctx.obj = {}\n ctx.obj[\"verbosity\"] = verbosity\n ctx.obj[\"aws_profile\"] = aws_profile\n envopts = {\"CPL_DEBUG\": (verbosity > 2)}\n if aws_profile or aws_no_sign_requests or aws_requester_pays:\n ctx.obj[\"env\"] = rasterio.Env(\n session=AWSSession(\n profile_name=aws_profile,\n aws_unsigned=aws_no_sign_requests,\n requester_pays=aws_requester_pays,\n ), **envopts)\n else:\n ctx.obj[\"env\"] = rasterio.Env(**envopts)\n", "path": "rasterio/rio/main.py"}]} | 2,301 | 140 |
gh_patches_debug_8150 | rasdani/github-patches | git_diff | iterative__dvc-6845 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plots --open: not working on macOS
We use relpath to open files in the browser.
https://github.com/iterative/dvc/blob/59cfc2c4a43e9b9d2fdb583da80d24e82ae39c26/dvc/command/plots.py#L60
This is mostly done to support `wsl`, and the fact that all other Platform seem to work, if it's relative or absolute.
The reason why it does not work is because `web browser.open` messes up. 🤦🏼
https://github.com/python/cpython/blob/39dd141a4ba68bbb38fd00a65cdcff711acdafb5/Lib/webbrowser.py#L638-L639
plots --open: not working on macOS
We use relpath to open files in the browser.
https://github.com/iterative/dvc/blob/59cfc2c4a43e9b9d2fdb583da80d24e82ae39c26/dvc/command/plots.py#L60
This is mostly done to support `wsl`, and the fact that all other Platform seem to work, if it's relative or absolute.
The reason why it does not work is because `web browser.open` messes up. 🤦🏼
https://github.com/python/cpython/blob/39dd141a4ba68bbb38fd00a65cdcff711acdafb5/Lib/webbrowser.py#L638-L639
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/command/plots.py`
Content:
```
1 import argparse
2 import logging
3
4 from dvc.command import completion
5 from dvc.command.base import CmdBase, append_doc_link, fix_subparsers
6 from dvc.exceptions import DvcException
7 from dvc.render.utils import find_vega, render
8 from dvc.ui import ui
9 from dvc.utils import format_link
10
11 logger = logging.getLogger(__name__)
12
13
14 class CmdPlots(CmdBase):
15 def _func(self, *args, **kwargs):
16 raise NotImplementedError
17
18 def _props(self):
19 from dvc.schema import PLOT_PROPS
20
21 # Pass only props specified by user, to not shadow ones from plot def
22 props = {p: getattr(self.args, p) for p in PLOT_PROPS}
23 return {k: v for k, v in props.items() if v is not None}
24
25 def run(self):
26 from pathlib import Path
27
28 if self.args.show_vega:
29 if not self.args.targets:
30 logger.error("please specify a target for `--show-vega`")
31 return 1
32 if len(self.args.targets) > 1:
33 logger.error(
34 "you can only specify one target for `--show-vega`"
35 )
36 return 1
37
38 try:
39 plots_data = self._func(
40 targets=self.args.targets, props=self._props()
41 )
42
43 if not plots_data:
44 ui.error_write(
45 "No plots were loaded, "
46 "visualization file will not be created."
47 )
48
49 if self.args.show_vega:
50 target = self.args.targets[0]
51 plot_json = find_vega(self.repo, plots_data, target)
52 if plot_json:
53 ui.write(plot_json)
54 return 0
55
56 rel: str = self.args.out or "dvc_plots"
57 path: Path = (Path.cwd() / rel).resolve()
58 index_path = render(
59 self.repo,
60 plots_data,
61 path=path,
62 html_template_path=self.args.html_template,
63 )
64
65 assert index_path.is_absolute()
66 url = index_path.as_uri()
67 ui.write(url)
68
69 if self.args.open:
70 import webbrowser
71
72 opened = webbrowser.open(index_path)
73 if not opened:
74 ui.error_write(
75 "Failed to open. Please try opening it manually."
76 )
77 return 1
78 return 0
79
80 except DvcException:
81 logger.exception("")
82 return 1
83
84
85 class CmdPlotsShow(CmdPlots):
86 UNINITIALIZED = True
87
88 def _func(self, *args, **kwargs):
89 return self.repo.plots.show(*args, **kwargs)
90
91
92 class CmdPlotsDiff(CmdPlots):
93 UNINITIALIZED = True
94
95 def _func(self, *args, **kwargs):
96 return self.repo.plots.diff(
97 *args,
98 revs=self.args.revisions,
99 experiment=self.args.experiment,
100 **kwargs,
101 )
102
103
104 class CmdPlotsModify(CmdPlots):
105 def run(self):
106 self.repo.plots.modify(
107 self.args.target, props=self._props(), unset=self.args.unset
108 )
109 return 0
110
111
112 def add_parser(subparsers, parent_parser):
113 PLOTS_HELP = (
114 "Commands to visualize and compare plot metrics in structured files "
115 "(JSON, YAML, CSV, TSV)."
116 )
117
118 plots_parser = subparsers.add_parser(
119 "plots",
120 parents=[parent_parser],
121 description=append_doc_link(PLOTS_HELP, "plots"),
122 help=PLOTS_HELP,
123 formatter_class=argparse.RawDescriptionHelpFormatter,
124 )
125 plots_subparsers = plots_parser.add_subparsers(
126 dest="cmd",
127 help="Use `dvc plots CMD --help` to display command-specific help.",
128 )
129
130 fix_subparsers(plots_subparsers)
131
132 SHOW_HELP = "Generate plots from metrics files."
133 plots_show_parser = plots_subparsers.add_parser(
134 "show",
135 parents=[parent_parser],
136 description=append_doc_link(SHOW_HELP, "plots/show"),
137 help=SHOW_HELP,
138 formatter_class=argparse.RawDescriptionHelpFormatter,
139 )
140 plots_show_parser.add_argument(
141 "targets",
142 nargs="*",
143 help="Files to visualize (supports any file, "
144 "even when not found as `plots` in `dvc.yaml`). "
145 "Shows all plots by default.",
146 ).complete = completion.FILE
147 _add_props_arguments(plots_show_parser)
148 _add_output_arguments(plots_show_parser)
149 plots_show_parser.set_defaults(func=CmdPlotsShow)
150
151 PLOTS_DIFF_HELP = (
152 "Show multiple versions of plot metrics "
153 "by plotting them in a single image."
154 )
155 plots_diff_parser = plots_subparsers.add_parser(
156 "diff",
157 parents=[parent_parser],
158 description=append_doc_link(PLOTS_DIFF_HELP, "plots/diff"),
159 help=PLOTS_DIFF_HELP,
160 formatter_class=argparse.RawDescriptionHelpFormatter,
161 )
162 plots_diff_parser.add_argument(
163 "--targets",
164 nargs="*",
165 help=(
166 "Specific plots file(s) to visualize "
167 "(even if not found as `plots` in `dvc.yaml`). "
168 "Shows all tracked plots by default."
169 ),
170 metavar="<paths>",
171 ).complete = completion.FILE
172 plots_diff_parser.add_argument(
173 "-e",
174 "--experiment",
175 action="store_true",
176 default=False,
177 help=argparse.SUPPRESS,
178 )
179 plots_diff_parser.add_argument(
180 "revisions", nargs="*", default=None, help="Git commits to plot from"
181 )
182 _add_props_arguments(plots_diff_parser)
183 _add_output_arguments(plots_diff_parser)
184 plots_diff_parser.set_defaults(func=CmdPlotsDiff)
185
186 PLOTS_MODIFY_HELP = (
187 "Modify display properties of data-series plots "
188 "(has no effect on image-type plots)."
189 )
190 plots_modify_parser = plots_subparsers.add_parser(
191 "modify",
192 parents=[parent_parser],
193 description=append_doc_link(PLOTS_MODIFY_HELP, "plots/modify"),
194 help=PLOTS_MODIFY_HELP,
195 formatter_class=argparse.RawDescriptionHelpFormatter,
196 )
197 plots_modify_parser.add_argument(
198 "target", help="Metric file to set properties to"
199 ).complete = completion.FILE
200 _add_props_arguments(plots_modify_parser)
201 plots_modify_parser.add_argument(
202 "--unset",
203 nargs="*",
204 metavar="<property>",
205 help="Unset one or more display properties.",
206 )
207 plots_modify_parser.set_defaults(func=CmdPlotsModify)
208
209
210 def _add_props_arguments(parser):
211 parser.add_argument(
212 "-t",
213 "--template",
214 nargs="?",
215 default=None,
216 help=(
217 "Special JSON or HTML schema file to inject with the data. "
218 "See {}".format(
219 format_link("https://man.dvc.org/plots#plot-templates")
220 )
221 ),
222 metavar="<path>",
223 ).complete = completion.FILE
224 parser.add_argument(
225 "-x", default=None, help="Field name for X axis.", metavar="<field>"
226 )
227 parser.add_argument(
228 "-y", default=None, help="Field name for Y axis.", metavar="<field>"
229 )
230 parser.add_argument(
231 "--no-header",
232 action="store_false",
233 dest="header",
234 default=None, # Use default None to distinguish when it's not used
235 help="Provided CSV or TSV datafile does not have a header.",
236 )
237 parser.add_argument(
238 "--title", default=None, metavar="<text>", help="Plot title."
239 )
240 parser.add_argument(
241 "--x-label", default=None, help="X axis label", metavar="<text>"
242 )
243 parser.add_argument(
244 "--y-label", default=None, help="Y axis label", metavar="<text>"
245 )
246
247
248 def _add_output_arguments(parser):
249 parser.add_argument(
250 "-o",
251 "--out",
252 default=None,
253 help="Destination path to save plots to",
254 metavar="<path>",
255 ).complete = completion.DIR
256 parser.add_argument(
257 "--show-vega",
258 action="store_true",
259 default=False,
260 help="Show output in Vega format.",
261 )
262 parser.add_argument(
263 "--open",
264 action="store_true",
265 default=False,
266 help="Open plot file directly in the browser.",
267 )
268 parser.add_argument(
269 "--html-template",
270 default=None,
271 help="Custom HTML template for VEGA visualization.",
272 metavar="<path>",
273 )
274
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/command/plots.py b/dvc/command/plots.py
--- a/dvc/command/plots.py
+++ b/dvc/command/plots.py
@@ -68,8 +68,12 @@
if self.args.open:
import webbrowser
+ from platform import uname
- opened = webbrowser.open(index_path)
+ if "Microsoft" in uname().release:
+ url = Path(rel) / "index.html"
+
+ opened = webbrowser.open(url)
if not opened:
ui.error_write(
"Failed to open. Please try opening it manually."
| {"golden_diff": "diff --git a/dvc/command/plots.py b/dvc/command/plots.py\n--- a/dvc/command/plots.py\n+++ b/dvc/command/plots.py\n@@ -68,8 +68,12 @@\n \n if self.args.open:\n import webbrowser\n+ from platform import uname\n \n- opened = webbrowser.open(index_path)\n+ if \"Microsoft\" in uname().release:\n+ url = Path(rel) / \"index.html\"\n+\n+ opened = webbrowser.open(url)\n if not opened:\n ui.error_write(\n \"Failed to open. Please try opening it manually.\"\n", "issue": "plots --open: not working on macOS\nWe use relpath to open files in the browser.\r\n\r\nhttps://github.com/iterative/dvc/blob/59cfc2c4a43e9b9d2fdb583da80d24e82ae39c26/dvc/command/plots.py#L60\r\n\r\nThis is mostly done to support `wsl`, and the fact that all other Platform seem to work, if it's relative or absolute.\r\n\r\nThe reason why it does not work is because `web browser.open` messes up. \ud83e\udd26\ud83c\udffc \r\nhttps://github.com/python/cpython/blob/39dd141a4ba68bbb38fd00a65cdcff711acdafb5/Lib/webbrowser.py#L638-L639\nplots --open: not working on macOS\nWe use relpath to open files in the browser.\r\n\r\nhttps://github.com/iterative/dvc/blob/59cfc2c4a43e9b9d2fdb583da80d24e82ae39c26/dvc/command/plots.py#L60\r\n\r\nThis is mostly done to support `wsl`, and the fact that all other Platform seem to work, if it's relative or absolute.\r\n\r\nThe reason why it does not work is because `web browser.open` messes up. \ud83e\udd26\ud83c\udffc \r\nhttps://github.com/python/cpython/blob/39dd141a4ba68bbb38fd00a65cdcff711acdafb5/Lib/webbrowser.py#L638-L639\n", "before_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.command import completion\nfrom dvc.command.base import CmdBase, append_doc_link, fix_subparsers\nfrom dvc.exceptions import DvcException\nfrom dvc.render.utils import find_vega, render\nfrom dvc.ui import ui\nfrom dvc.utils import format_link\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdPlots(CmdBase):\n def _func(self, *args, **kwargs):\n raise NotImplementedError\n\n def _props(self):\n from dvc.schema import PLOT_PROPS\n\n # Pass only props specified by user, to not shadow ones from plot def\n props = {p: getattr(self.args, p) for p in PLOT_PROPS}\n return {k: v for k, v in props.items() if v is not None}\n\n def run(self):\n from pathlib import Path\n\n if self.args.show_vega:\n if not self.args.targets:\n logger.error(\"please specify a target for `--show-vega`\")\n return 1\n if len(self.args.targets) > 1:\n logger.error(\n \"you can only specify one target for `--show-vega`\"\n )\n return 1\n\n try:\n plots_data = self._func(\n targets=self.args.targets, props=self._props()\n )\n\n if not plots_data:\n ui.error_write(\n \"No plots were loaded, \"\n \"visualization file will not be created.\"\n )\n\n if self.args.show_vega:\n target = self.args.targets[0]\n plot_json = find_vega(self.repo, plots_data, target)\n if plot_json:\n ui.write(plot_json)\n return 0\n\n rel: str = self.args.out or \"dvc_plots\"\n path: Path = (Path.cwd() / rel).resolve()\n index_path = render(\n self.repo,\n plots_data,\n path=path,\n html_template_path=self.args.html_template,\n )\n\n assert index_path.is_absolute()\n url = index_path.as_uri()\n ui.write(url)\n\n if self.args.open:\n import webbrowser\n\n opened = webbrowser.open(index_path)\n if not opened:\n ui.error_write(\n \"Failed to open. Please try opening it manually.\"\n )\n return 1\n return 0\n\n except DvcException:\n logger.exception(\"\")\n return 1\n\n\nclass CmdPlotsShow(CmdPlots):\n UNINITIALIZED = True\n\n def _func(self, *args, **kwargs):\n return self.repo.plots.show(*args, **kwargs)\n\n\nclass CmdPlotsDiff(CmdPlots):\n UNINITIALIZED = True\n\n def _func(self, *args, **kwargs):\n return self.repo.plots.diff(\n *args,\n revs=self.args.revisions,\n experiment=self.args.experiment,\n **kwargs,\n )\n\n\nclass CmdPlotsModify(CmdPlots):\n def run(self):\n self.repo.plots.modify(\n self.args.target, props=self._props(), unset=self.args.unset\n )\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n PLOTS_HELP = (\n \"Commands to visualize and compare plot metrics in structured files \"\n \"(JSON, YAML, CSV, TSV).\"\n )\n\n plots_parser = subparsers.add_parser(\n \"plots\",\n parents=[parent_parser],\n description=append_doc_link(PLOTS_HELP, \"plots\"),\n help=PLOTS_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n plots_subparsers = plots_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use `dvc plots CMD --help` to display command-specific help.\",\n )\n\n fix_subparsers(plots_subparsers)\n\n SHOW_HELP = \"Generate plots from metrics files.\"\n plots_show_parser = plots_subparsers.add_parser(\n \"show\",\n parents=[parent_parser],\n description=append_doc_link(SHOW_HELP, \"plots/show\"),\n help=SHOW_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n plots_show_parser.add_argument(\n \"targets\",\n nargs=\"*\",\n help=\"Files to visualize (supports any file, \"\n \"even when not found as `plots` in `dvc.yaml`). \"\n \"Shows all plots by default.\",\n ).complete = completion.FILE\n _add_props_arguments(plots_show_parser)\n _add_output_arguments(plots_show_parser)\n plots_show_parser.set_defaults(func=CmdPlotsShow)\n\n PLOTS_DIFF_HELP = (\n \"Show multiple versions of plot metrics \"\n \"by plotting them in a single image.\"\n )\n plots_diff_parser = plots_subparsers.add_parser(\n \"diff\",\n parents=[parent_parser],\n description=append_doc_link(PLOTS_DIFF_HELP, \"plots/diff\"),\n help=PLOTS_DIFF_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n plots_diff_parser.add_argument(\n \"--targets\",\n nargs=\"*\",\n help=(\n \"Specific plots file(s) to visualize \"\n \"(even if not found as `plots` in `dvc.yaml`). \"\n \"Shows all tracked plots by default.\"\n ),\n metavar=\"<paths>\",\n ).complete = completion.FILE\n plots_diff_parser.add_argument(\n \"-e\",\n \"--experiment\",\n action=\"store_true\",\n default=False,\n help=argparse.SUPPRESS,\n )\n plots_diff_parser.add_argument(\n \"revisions\", nargs=\"*\", default=None, help=\"Git commits to plot from\"\n )\n _add_props_arguments(plots_diff_parser)\n _add_output_arguments(plots_diff_parser)\n plots_diff_parser.set_defaults(func=CmdPlotsDiff)\n\n PLOTS_MODIFY_HELP = (\n \"Modify display properties of data-series plots \"\n \"(has no effect on image-type plots).\"\n )\n plots_modify_parser = plots_subparsers.add_parser(\n \"modify\",\n parents=[parent_parser],\n description=append_doc_link(PLOTS_MODIFY_HELP, \"plots/modify\"),\n help=PLOTS_MODIFY_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n plots_modify_parser.add_argument(\n \"target\", help=\"Metric file to set properties to\"\n ).complete = completion.FILE\n _add_props_arguments(plots_modify_parser)\n plots_modify_parser.add_argument(\n \"--unset\",\n nargs=\"*\",\n metavar=\"<property>\",\n help=\"Unset one or more display properties.\",\n )\n plots_modify_parser.set_defaults(func=CmdPlotsModify)\n\n\ndef _add_props_arguments(parser):\n parser.add_argument(\n \"-t\",\n \"--template\",\n nargs=\"?\",\n default=None,\n help=(\n \"Special JSON or HTML schema file to inject with the data. \"\n \"See {}\".format(\n format_link(\"https://man.dvc.org/plots#plot-templates\")\n )\n ),\n metavar=\"<path>\",\n ).complete = completion.FILE\n parser.add_argument(\n \"-x\", default=None, help=\"Field name for X axis.\", metavar=\"<field>\"\n )\n parser.add_argument(\n \"-y\", default=None, help=\"Field name for Y axis.\", metavar=\"<field>\"\n )\n parser.add_argument(\n \"--no-header\",\n action=\"store_false\",\n dest=\"header\",\n default=None, # Use default None to distinguish when it's not used\n help=\"Provided CSV or TSV datafile does not have a header.\",\n )\n parser.add_argument(\n \"--title\", default=None, metavar=\"<text>\", help=\"Plot title.\"\n )\n parser.add_argument(\n \"--x-label\", default=None, help=\"X axis label\", metavar=\"<text>\"\n )\n parser.add_argument(\n \"--y-label\", default=None, help=\"Y axis label\", metavar=\"<text>\"\n )\n\n\ndef _add_output_arguments(parser):\n parser.add_argument(\n \"-o\",\n \"--out\",\n default=None,\n help=\"Destination path to save plots to\",\n metavar=\"<path>\",\n ).complete = completion.DIR\n parser.add_argument(\n \"--show-vega\",\n action=\"store_true\",\n default=False,\n help=\"Show output in Vega format.\",\n )\n parser.add_argument(\n \"--open\",\n action=\"store_true\",\n default=False,\n help=\"Open plot file directly in the browser.\",\n )\n parser.add_argument(\n \"--html-template\",\n default=None,\n help=\"Custom HTML template for VEGA visualization.\",\n metavar=\"<path>\",\n )\n", "path": "dvc/command/plots.py"}], "after_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.command import completion\nfrom dvc.command.base import CmdBase, append_doc_link, fix_subparsers\nfrom dvc.exceptions import DvcException\nfrom dvc.render.utils import find_vega, render\nfrom dvc.ui import ui\nfrom dvc.utils import format_link\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdPlots(CmdBase):\n def _func(self, *args, **kwargs):\n raise NotImplementedError\n\n def _props(self):\n from dvc.schema import PLOT_PROPS\n\n # Pass only props specified by user, to not shadow ones from plot def\n props = {p: getattr(self.args, p) for p in PLOT_PROPS}\n return {k: v for k, v in props.items() if v is not None}\n\n def run(self):\n from pathlib import Path\n\n if self.args.show_vega:\n if not self.args.targets:\n logger.error(\"please specify a target for `--show-vega`\")\n return 1\n if len(self.args.targets) > 1:\n logger.error(\n \"you can only specify one target for `--show-vega`\"\n )\n return 1\n\n try:\n plots_data = self._func(\n targets=self.args.targets, props=self._props()\n )\n\n if not plots_data:\n ui.error_write(\n \"No plots were loaded, \"\n \"visualization file will not be created.\"\n )\n\n if self.args.show_vega:\n target = self.args.targets[0]\n plot_json = find_vega(self.repo, plots_data, target)\n if plot_json:\n ui.write(plot_json)\n return 0\n\n rel: str = self.args.out or \"dvc_plots\"\n path: Path = (Path.cwd() / rel).resolve()\n index_path = render(\n self.repo,\n plots_data,\n path=path,\n html_template_path=self.args.html_template,\n )\n\n assert index_path.is_absolute()\n url = index_path.as_uri()\n ui.write(url)\n\n if self.args.open:\n import webbrowser\n from platform import uname\n\n if \"Microsoft\" in uname().release:\n url = Path(rel) / \"index.html\"\n\n opened = webbrowser.open(url)\n if not opened:\n ui.error_write(\n \"Failed to open. Please try opening it manually.\"\n )\n return 1\n return 0\n\n except DvcException:\n logger.exception(\"\")\n return 1\n\n\nclass CmdPlotsShow(CmdPlots):\n UNINITIALIZED = True\n\n def _func(self, *args, **kwargs):\n return self.repo.plots.show(*args, **kwargs)\n\n\nclass CmdPlotsDiff(CmdPlots):\n UNINITIALIZED = True\n\n def _func(self, *args, **kwargs):\n return self.repo.plots.diff(\n *args,\n revs=self.args.revisions,\n experiment=self.args.experiment,\n **kwargs,\n )\n\n\nclass CmdPlotsModify(CmdPlots):\n def run(self):\n self.repo.plots.modify(\n self.args.target, props=self._props(), unset=self.args.unset\n )\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n PLOTS_HELP = (\n \"Commands to visualize and compare plot metrics in structured files \"\n \"(JSON, YAML, CSV, TSV).\"\n )\n\n plots_parser = subparsers.add_parser(\n \"plots\",\n parents=[parent_parser],\n description=append_doc_link(PLOTS_HELP, \"plots\"),\n help=PLOTS_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n plots_subparsers = plots_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use `dvc plots CMD --help` to display command-specific help.\",\n )\n\n fix_subparsers(plots_subparsers)\n\n SHOW_HELP = \"Generate plots from metrics files.\"\n plots_show_parser = plots_subparsers.add_parser(\n \"show\",\n parents=[parent_parser],\n description=append_doc_link(SHOW_HELP, \"plots/show\"),\n help=SHOW_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n plots_show_parser.add_argument(\n \"targets\",\n nargs=\"*\",\n help=\"Files to visualize (supports any file, \"\n \"even when not found as `plots` in `dvc.yaml`). \"\n \"Shows all plots by default.\",\n ).complete = completion.FILE\n _add_props_arguments(plots_show_parser)\n _add_output_arguments(plots_show_parser)\n plots_show_parser.set_defaults(func=CmdPlotsShow)\n\n PLOTS_DIFF_HELP = (\n \"Show multiple versions of plot metrics \"\n \"by plotting them in a single image.\"\n )\n plots_diff_parser = plots_subparsers.add_parser(\n \"diff\",\n parents=[parent_parser],\n description=append_doc_link(PLOTS_DIFF_HELP, \"plots/diff\"),\n help=PLOTS_DIFF_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n plots_diff_parser.add_argument(\n \"--targets\",\n nargs=\"*\",\n help=(\n \"Specific plots file(s) to visualize \"\n \"(even if not found as `plots` in `dvc.yaml`). \"\n \"Shows all tracked plots by default.\"\n ),\n metavar=\"<paths>\",\n ).complete = completion.FILE\n plots_diff_parser.add_argument(\n \"-e\",\n \"--experiment\",\n action=\"store_true\",\n default=False,\n help=argparse.SUPPRESS,\n )\n plots_diff_parser.add_argument(\n \"revisions\", nargs=\"*\", default=None, help=\"Git commits to plot from\"\n )\n _add_props_arguments(plots_diff_parser)\n _add_output_arguments(plots_diff_parser)\n plots_diff_parser.set_defaults(func=CmdPlotsDiff)\n\n PLOTS_MODIFY_HELP = (\n \"Modify display properties of data-series plots \"\n \"(has no effect on image-type plots).\"\n )\n plots_modify_parser = plots_subparsers.add_parser(\n \"modify\",\n parents=[parent_parser],\n description=append_doc_link(PLOTS_MODIFY_HELP, \"plots/modify\"),\n help=PLOTS_MODIFY_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n plots_modify_parser.add_argument(\n \"target\", help=\"Metric file to set properties to\"\n ).complete = completion.FILE\n _add_props_arguments(plots_modify_parser)\n plots_modify_parser.add_argument(\n \"--unset\",\n nargs=\"*\",\n metavar=\"<property>\",\n help=\"Unset one or more display properties.\",\n )\n plots_modify_parser.set_defaults(func=CmdPlotsModify)\n\n\ndef _add_props_arguments(parser):\n parser.add_argument(\n \"-t\",\n \"--template\",\n nargs=\"?\",\n default=None,\n help=(\n \"Special JSON or HTML schema file to inject with the data. \"\n \"See {}\".format(\n format_link(\"https://man.dvc.org/plots#plot-templates\")\n )\n ),\n metavar=\"<path>\",\n ).complete = completion.FILE\n parser.add_argument(\n \"-x\", default=None, help=\"Field name for X axis.\", metavar=\"<field>\"\n )\n parser.add_argument(\n \"-y\", default=None, help=\"Field name for Y axis.\", metavar=\"<field>\"\n )\n parser.add_argument(\n \"--no-header\",\n action=\"store_false\",\n dest=\"header\",\n default=None, # Use default None to distinguish when it's not used\n help=\"Provided CSV or TSV datafile does not have a header.\",\n )\n parser.add_argument(\n \"--title\", default=None, metavar=\"<text>\", help=\"Plot title.\"\n )\n parser.add_argument(\n \"--x-label\", default=None, help=\"X axis label\", metavar=\"<text>\"\n )\n parser.add_argument(\n \"--y-label\", default=None, help=\"Y axis label\", metavar=\"<text>\"\n )\n\n\ndef _add_output_arguments(parser):\n parser.add_argument(\n \"-o\",\n \"--out\",\n default=None,\n help=\"Destination path to save plots to\",\n metavar=\"<path>\",\n ).complete = completion.DIR\n parser.add_argument(\n \"--show-vega\",\n action=\"store_true\",\n default=False,\n help=\"Show output in Vega format.\",\n )\n parser.add_argument(\n \"--open\",\n action=\"store_true\",\n default=False,\n help=\"Open plot file directly in the browser.\",\n )\n parser.add_argument(\n \"--html-template\",\n default=None,\n help=\"Custom HTML template for VEGA visualization.\",\n metavar=\"<path>\",\n )\n", "path": "dvc/command/plots.py"}]} | 3,158 | 133 |
gh_patches_debug_34111 | rasdani/github-patches | git_diff | crytic__slither-2431 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: EVM printer fails for Abstract Contracts
### Describe the issue:
If slither is runned with `slither --print evm contract.sol` where contract.sol is an abstract contract, it will fail with the following error:
```shell
➜ slither --print evm contract.sol
File "/Users/dm/Projects/slither/slither/__main__.py", line 143, in _process
printer_results = slither.run_printers()
^^^^^^^^^^^^^^^^^^^^^^
File "/Users/dm/Projects/slither/slither/slither.py", line 300, in run_printers
return [p.output(self._crytic_compile.target).data for p in self._printers]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/dm/Projects/slither/slither/printers/summary/evm.py", line 78, in output
evm_info = _extract_evm_info(self.slither)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/dm/Projects/slither/slither/printers/summary/evm.py", line 30, in _extract_evm_info
cfg = CFG(contract_bytecode_runtime)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/dm/Projects/slither/env/lib/python3.12/site-packages/evm_cfg_builder/cfg/__init__.py", line 96, in __init__
self.create_functions()
File "/Users/dm/Projects/slither/env/lib/python3.12/site-packages/evm_cfg_builder/cfg/__init__.py", line 183, in create_functions
self.compute_functions(self._basic_blocks[0], True)
~~~~~~~~~~~~~~~~~~^^^
```
### Code example to reproduce the issue:
`abstract contract Ownable {}`
### Version:
0.10.2
### Relevant log output:
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/printers/summary/evm.py`
Content:
```
1 """
2 Module printing evm mapping of the contract
3 """
4 from slither.printers.abstract_printer import AbstractPrinter
5 from slither.analyses.evm import (
6 generate_source_to_evm_ins_mapping,
7 load_evm_cfg_builder,
8 )
9 from slither.utils.colors import blue, green, magenta, red
10
11
12 def _extract_evm_info(slither):
13 """
14 Extract evm information for all derived contracts using evm_cfg_builder
15
16 Returns: evm CFG and Solidity source to Program Counter (pc) mapping
17 """
18
19 evm_info = {}
20
21 CFG = load_evm_cfg_builder()
22
23 for contract in slither.contracts_derived:
24 contract_bytecode_runtime = contract.file_scope.bytecode_runtime(
25 contract.compilation_unit.crytic_compile_compilation_unit, contract.name
26 )
27 contract_srcmap_runtime = contract.file_scope.srcmap_runtime(
28 contract.compilation_unit.crytic_compile_compilation_unit, contract.name
29 )
30 cfg = CFG(contract_bytecode_runtime)
31 evm_info["cfg", contract.name] = cfg
32 evm_info["mapping", contract.name] = generate_source_to_evm_ins_mapping(
33 cfg.instructions,
34 contract_srcmap_runtime,
35 slither,
36 contract.source_mapping.filename.absolute,
37 )
38
39 contract_bytecode_init = contract.file_scope.bytecode_init(
40 contract.compilation_unit.crytic_compile_compilation_unit, contract.name
41 )
42 contract_srcmap_init = contract.file_scope.srcmap_init(
43 contract.compilation_unit.crytic_compile_compilation_unit, contract.name
44 )
45 cfg_init = CFG(contract_bytecode_init)
46
47 evm_info["cfg_init", contract.name] = cfg_init
48 evm_info["mapping_init", contract.name] = generate_source_to_evm_ins_mapping(
49 cfg_init.instructions,
50 contract_srcmap_init,
51 slither,
52 contract.source_mapping.filename.absolute,
53 )
54
55 return evm_info
56
57
58 # pylint: disable=too-many-locals
59 class PrinterEVM(AbstractPrinter):
60 ARGUMENT = "evm"
61 HELP = "Print the evm instructions of nodes in functions"
62
63 WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#evm"
64
65 def output(self, _filename):
66 """
67 _filename is not used
68 Args:
69 _filename(string)
70 """
71
72 txt = ""
73 if not self.slither.crytic_compile:
74 txt = "The EVM printer requires to compile with crytic-compile"
75 self.info(red(txt))
76 res = self.generate_output(txt)
77 return res
78 evm_info = _extract_evm_info(self.slither)
79
80 for contract in self.slither.contracts_derived:
81 txt += blue(f"Contract {contract.name}\n")
82
83 contract_file = self.slither.source_code[
84 contract.source_mapping.filename.absolute
85 ].encode("utf-8")
86 with open(contract.source_mapping.filename.absolute, "r", encoding="utf8") as f:
87 contract_file_lines = f.readlines()
88
89 contract_pcs = {}
90 contract_cfg = {}
91
92 for function in contract.functions:
93 txt += blue(f"\tFunction {function.canonical_name}\n")
94
95 # CFG and source mapping depend on function being constructor or not
96 if function.is_constructor:
97 contract_cfg = evm_info["cfg_init", contract.name]
98 contract_pcs = evm_info["mapping_init", contract.name]
99 else:
100 contract_cfg = evm_info["cfg", contract.name]
101 contract_pcs = evm_info["mapping", contract.name]
102
103 for node in function.nodes:
104 txt += green("\t\tNode: " + str(node) + "\n")
105 node_source_line = (
106 contract_file[0 : node.source_mapping.start].count("\n".encode("utf-8")) + 1
107 )
108 txt += green(
109 f"\t\tSource line {node_source_line}: {contract_file_lines[node_source_line - 1].rstrip()}\n"
110 )
111 txt += magenta("\t\tEVM Instructions:\n")
112 node_pcs = contract_pcs.get(node_source_line, [])
113 for pc in node_pcs:
114 txt += magenta(f"\t\t\t{hex(pc)}: {contract_cfg.get_instruction_at(pc)}\n")
115
116 for modifier in contract.modifiers:
117 txt += blue(f"\tModifier {modifier.canonical_name}\n")
118 for node in modifier.nodes:
119 txt += green("\t\tNode: " + str(node) + "\n")
120 node_source_line = (
121 contract_file[0 : node.source_mapping.start].count("\n".encode("utf-8")) + 1
122 )
123 txt += green(
124 f"\t\tSource line {node_source_line}: {contract_file_lines[node_source_line - 1].rstrip()}\n"
125 )
126 txt += magenta("\t\tEVM Instructions:\n")
127 node_pcs = contract_pcs.get(node_source_line, [])
128 for pc in node_pcs:
129 txt += magenta(f"\t\t\t{hex(pc)}: {contract_cfg.get_instruction_at(pc)}\n")
130
131 self.info(txt)
132 res = self.generate_output(txt)
133 return res
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/slither/printers/summary/evm.py b/slither/printers/summary/evm.py
--- a/slither/printers/summary/evm.py
+++ b/slither/printers/summary/evm.py
@@ -1,6 +1,8 @@
"""
Module printing evm mapping of the contract
"""
+import logging
+
from slither.printers.abstract_printer import AbstractPrinter
from slither.analyses.evm import (
generate_source_to_evm_ins_mapping,
@@ -9,6 +11,9 @@
from slither.utils.colors import blue, green, magenta, red
+logger: logging.Logger = logging.getLogger("EVMPrinter")
+
+
def _extract_evm_info(slither):
"""
Extract evm information for all derived contracts using evm_cfg_builder
@@ -24,6 +29,16 @@
contract_bytecode_runtime = contract.file_scope.bytecode_runtime(
contract.compilation_unit.crytic_compile_compilation_unit, contract.name
)
+
+ if not contract_bytecode_runtime:
+ logger.info(
+ "Contract %s (abstract: %r) has no bytecode runtime, skipping. ",
+ contract.name,
+ contract.is_abstract,
+ )
+ evm_info["empty", contract.name] = True
+ continue
+
contract_srcmap_runtime = contract.file_scope.srcmap_runtime(
contract.compilation_unit.crytic_compile_compilation_unit, contract.name
)
@@ -80,6 +95,10 @@
for contract in self.slither.contracts_derived:
txt += blue(f"Contract {contract.name}\n")
+ if evm_info.get(("empty", contract.name), False):
+ txt += "\tempty contract\n"
+ continue
+
contract_file = self.slither.source_code[
contract.source_mapping.filename.absolute
].encode("utf-8")
| {"golden_diff": "diff --git a/slither/printers/summary/evm.py b/slither/printers/summary/evm.py\n--- a/slither/printers/summary/evm.py\n+++ b/slither/printers/summary/evm.py\n@@ -1,6 +1,8 @@\n \"\"\"\n Module printing evm mapping of the contract\n \"\"\"\n+import logging\n+\n from slither.printers.abstract_printer import AbstractPrinter\n from slither.analyses.evm import (\n generate_source_to_evm_ins_mapping,\n@@ -9,6 +11,9 @@\n from slither.utils.colors import blue, green, magenta, red\n \n \n+logger: logging.Logger = logging.getLogger(\"EVMPrinter\")\n+\n+\n def _extract_evm_info(slither):\n \"\"\"\n Extract evm information for all derived contracts using evm_cfg_builder\n@@ -24,6 +29,16 @@\n contract_bytecode_runtime = contract.file_scope.bytecode_runtime(\n contract.compilation_unit.crytic_compile_compilation_unit, contract.name\n )\n+\n+ if not contract_bytecode_runtime:\n+ logger.info(\n+ \"Contract %s (abstract: %r) has no bytecode runtime, skipping. \",\n+ contract.name,\n+ contract.is_abstract,\n+ )\n+ evm_info[\"empty\", contract.name] = True\n+ continue\n+\n contract_srcmap_runtime = contract.file_scope.srcmap_runtime(\n contract.compilation_unit.crytic_compile_compilation_unit, contract.name\n )\n@@ -80,6 +95,10 @@\n for contract in self.slither.contracts_derived:\n txt += blue(f\"Contract {contract.name}\\n\")\n \n+ if evm_info.get((\"empty\", contract.name), False):\n+ txt += \"\\tempty contract\\n\"\n+ continue\n+\n contract_file = self.slither.source_code[\n contract.source_mapping.filename.absolute\n ].encode(\"utf-8\")\n", "issue": "[Bug]: EVM printer fails for Abstract Contracts\n### Describe the issue:\n\nIf slither is runned with `slither --print evm contract.sol` where contract.sol is an abstract contract, it will fail with the following error:\r\n\r\n```shell\r\n\u279c slither --print evm contract.sol\r\n\r\n\r\n File \"/Users/dm/Projects/slither/slither/__main__.py\", line 143, in _process\r\n printer_results = slither.run_printers()\r\n ^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/Users/dm/Projects/slither/slither/slither.py\", line 300, in run_printers\r\n return [p.output(self._crytic_compile.target).data for p in self._printers]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/Users/dm/Projects/slither/slither/printers/summary/evm.py\", line 78, in output\r\n evm_info = _extract_evm_info(self.slither)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/Users/dm/Projects/slither/slither/printers/summary/evm.py\", line 30, in _extract_evm_info\r\n cfg = CFG(contract_bytecode_runtime)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/Users/dm/Projects/slither/env/lib/python3.12/site-packages/evm_cfg_builder/cfg/__init__.py\", line 96, in __init__\r\n self.create_functions()\r\n File \"/Users/dm/Projects/slither/env/lib/python3.12/site-packages/evm_cfg_builder/cfg/__init__.py\", line 183, in create_functions\r\n self.compute_functions(self._basic_blocks[0], True)\r\n ~~~~~~~~~~~~~~~~~~^^^\r\n\r\n\r\n```\r\n\n\n### Code example to reproduce the issue:\n\n`abstract contract Ownable {}`\n\n### Version:\n\n0.10.2\n\n### Relevant log output:\n\n_No response_\n", "before_files": [{"content": "\"\"\"\n Module printing evm mapping of the contract\n\"\"\"\nfrom slither.printers.abstract_printer import AbstractPrinter\nfrom slither.analyses.evm import (\n generate_source_to_evm_ins_mapping,\n load_evm_cfg_builder,\n)\nfrom slither.utils.colors import blue, green, magenta, red\n\n\ndef _extract_evm_info(slither):\n \"\"\"\n Extract evm information for all derived contracts using evm_cfg_builder\n\n Returns: evm CFG and Solidity source to Program Counter (pc) mapping\n \"\"\"\n\n evm_info = {}\n\n CFG = load_evm_cfg_builder()\n\n for contract in slither.contracts_derived:\n contract_bytecode_runtime = contract.file_scope.bytecode_runtime(\n contract.compilation_unit.crytic_compile_compilation_unit, contract.name\n )\n contract_srcmap_runtime = contract.file_scope.srcmap_runtime(\n contract.compilation_unit.crytic_compile_compilation_unit, contract.name\n )\n cfg = CFG(contract_bytecode_runtime)\n evm_info[\"cfg\", contract.name] = cfg\n evm_info[\"mapping\", contract.name] = generate_source_to_evm_ins_mapping(\n cfg.instructions,\n contract_srcmap_runtime,\n slither,\n contract.source_mapping.filename.absolute,\n )\n\n contract_bytecode_init = contract.file_scope.bytecode_init(\n contract.compilation_unit.crytic_compile_compilation_unit, contract.name\n )\n contract_srcmap_init = contract.file_scope.srcmap_init(\n contract.compilation_unit.crytic_compile_compilation_unit, contract.name\n )\n cfg_init = CFG(contract_bytecode_init)\n\n evm_info[\"cfg_init\", contract.name] = cfg_init\n evm_info[\"mapping_init\", contract.name] = generate_source_to_evm_ins_mapping(\n cfg_init.instructions,\n contract_srcmap_init,\n slither,\n contract.source_mapping.filename.absolute,\n )\n\n return evm_info\n\n\n# pylint: disable=too-many-locals\nclass PrinterEVM(AbstractPrinter):\n ARGUMENT = \"evm\"\n HELP = \"Print the evm instructions of nodes in functions\"\n\n WIKI = \"https://github.com/trailofbits/slither/wiki/Printer-documentation#evm\"\n\n def output(self, _filename):\n \"\"\"\n _filename is not used\n Args:\n _filename(string)\n \"\"\"\n\n txt = \"\"\n if not self.slither.crytic_compile:\n txt = \"The EVM printer requires to compile with crytic-compile\"\n self.info(red(txt))\n res = self.generate_output(txt)\n return res\n evm_info = _extract_evm_info(self.slither)\n\n for contract in self.slither.contracts_derived:\n txt += blue(f\"Contract {contract.name}\\n\")\n\n contract_file = self.slither.source_code[\n contract.source_mapping.filename.absolute\n ].encode(\"utf-8\")\n with open(contract.source_mapping.filename.absolute, \"r\", encoding=\"utf8\") as f:\n contract_file_lines = f.readlines()\n\n contract_pcs = {}\n contract_cfg = {}\n\n for function in contract.functions:\n txt += blue(f\"\\tFunction {function.canonical_name}\\n\")\n\n # CFG and source mapping depend on function being constructor or not\n if function.is_constructor:\n contract_cfg = evm_info[\"cfg_init\", contract.name]\n contract_pcs = evm_info[\"mapping_init\", contract.name]\n else:\n contract_cfg = evm_info[\"cfg\", contract.name]\n contract_pcs = evm_info[\"mapping\", contract.name]\n\n for node in function.nodes:\n txt += green(\"\\t\\tNode: \" + str(node) + \"\\n\")\n node_source_line = (\n contract_file[0 : node.source_mapping.start].count(\"\\n\".encode(\"utf-8\")) + 1\n )\n txt += green(\n f\"\\t\\tSource line {node_source_line}: {contract_file_lines[node_source_line - 1].rstrip()}\\n\"\n )\n txt += magenta(\"\\t\\tEVM Instructions:\\n\")\n node_pcs = contract_pcs.get(node_source_line, [])\n for pc in node_pcs:\n txt += magenta(f\"\\t\\t\\t{hex(pc)}: {contract_cfg.get_instruction_at(pc)}\\n\")\n\n for modifier in contract.modifiers:\n txt += blue(f\"\\tModifier {modifier.canonical_name}\\n\")\n for node in modifier.nodes:\n txt += green(\"\\t\\tNode: \" + str(node) + \"\\n\")\n node_source_line = (\n contract_file[0 : node.source_mapping.start].count(\"\\n\".encode(\"utf-8\")) + 1\n )\n txt += green(\n f\"\\t\\tSource line {node_source_line}: {contract_file_lines[node_source_line - 1].rstrip()}\\n\"\n )\n txt += magenta(\"\\t\\tEVM Instructions:\\n\")\n node_pcs = contract_pcs.get(node_source_line, [])\n for pc in node_pcs:\n txt += magenta(f\"\\t\\t\\t{hex(pc)}: {contract_cfg.get_instruction_at(pc)}\\n\")\n\n self.info(txt)\n res = self.generate_output(txt)\n return res\n", "path": "slither/printers/summary/evm.py"}], "after_files": [{"content": "\"\"\"\n Module printing evm mapping of the contract\n\"\"\"\nimport logging\n\nfrom slither.printers.abstract_printer import AbstractPrinter\nfrom slither.analyses.evm import (\n generate_source_to_evm_ins_mapping,\n load_evm_cfg_builder,\n)\nfrom slither.utils.colors import blue, green, magenta, red\n\n\nlogger: logging.Logger = logging.getLogger(\"EVMPrinter\")\n\n\ndef _extract_evm_info(slither):\n \"\"\"\n Extract evm information for all derived contracts using evm_cfg_builder\n\n Returns: evm CFG and Solidity source to Program Counter (pc) mapping\n \"\"\"\n\n evm_info = {}\n\n CFG = load_evm_cfg_builder()\n\n for contract in slither.contracts_derived:\n contract_bytecode_runtime = contract.file_scope.bytecode_runtime(\n contract.compilation_unit.crytic_compile_compilation_unit, contract.name\n )\n\n if not contract_bytecode_runtime:\n logger.info(\n \"Contract %s (abstract: %r) has no bytecode runtime, skipping. \",\n contract.name,\n contract.is_abstract,\n )\n evm_info[\"empty\", contract.name] = True\n continue\n\n contract_srcmap_runtime = contract.file_scope.srcmap_runtime(\n contract.compilation_unit.crytic_compile_compilation_unit, contract.name\n )\n cfg = CFG(contract_bytecode_runtime)\n evm_info[\"cfg\", contract.name] = cfg\n evm_info[\"mapping\", contract.name] = generate_source_to_evm_ins_mapping(\n cfg.instructions,\n contract_srcmap_runtime,\n slither,\n contract.source_mapping.filename.absolute,\n )\n\n contract_bytecode_init = contract.file_scope.bytecode_init(\n contract.compilation_unit.crytic_compile_compilation_unit, contract.name\n )\n contract_srcmap_init = contract.file_scope.srcmap_init(\n contract.compilation_unit.crytic_compile_compilation_unit, contract.name\n )\n cfg_init = CFG(contract_bytecode_init)\n\n evm_info[\"cfg_init\", contract.name] = cfg_init\n evm_info[\"mapping_init\", contract.name] = generate_source_to_evm_ins_mapping(\n cfg_init.instructions,\n contract_srcmap_init,\n slither,\n contract.source_mapping.filename.absolute,\n )\n\n return evm_info\n\n\n# pylint: disable=too-many-locals\nclass PrinterEVM(AbstractPrinter):\n ARGUMENT = \"evm\"\n HELP = \"Print the evm instructions of nodes in functions\"\n\n WIKI = \"https://github.com/trailofbits/slither/wiki/Printer-documentation#evm\"\n\n def output(self, _filename):\n \"\"\"\n _filename is not used\n Args:\n _filename(string)\n \"\"\"\n\n txt = \"\"\n if not self.slither.crytic_compile:\n txt = \"The EVM printer requires to compile with crytic-compile\"\n self.info(red(txt))\n res = self.generate_output(txt)\n return res\n evm_info = _extract_evm_info(self.slither)\n\n for contract in self.slither.contracts_derived:\n txt += blue(f\"Contract {contract.name}\\n\")\n\n if evm_info.get((\"empty\", contract.name), False):\n txt += \"\\tempty contract\\n\"\n continue\n\n contract_file = self.slither.source_code[\n contract.source_mapping.filename.absolute\n ].encode(\"utf-8\")\n with open(contract.source_mapping.filename.absolute, \"r\", encoding=\"utf8\") as f:\n contract_file_lines = f.readlines()\n\n contract_pcs = {}\n contract_cfg = {}\n\n for function in contract.functions:\n txt += blue(f\"\\tFunction {function.canonical_name}\\n\")\n\n # CFG and source mapping depend on function being constructor or not\n if function.is_constructor:\n contract_cfg = evm_info[\"cfg_init\", contract.name]\n contract_pcs = evm_info[\"mapping_init\", contract.name]\n else:\n contract_cfg = evm_info[\"cfg\", contract.name]\n contract_pcs = evm_info[\"mapping\", contract.name]\n\n for node in function.nodes:\n txt += green(\"\\t\\tNode: \" + str(node) + \"\\n\")\n node_source_line = (\n contract_file[0 : node.source_mapping.start].count(\"\\n\".encode(\"utf-8\")) + 1\n )\n txt += green(\n f\"\\t\\tSource line {node_source_line}: {contract_file_lines[node_source_line - 1].rstrip()}\\n\"\n )\n txt += magenta(\"\\t\\tEVM Instructions:\\n\")\n node_pcs = contract_pcs.get(node_source_line, [])\n for pc in node_pcs:\n txt += magenta(f\"\\t\\t\\t{hex(pc)}: {contract_cfg.get_instruction_at(pc)}\\n\")\n\n for modifier in contract.modifiers:\n txt += blue(f\"\\tModifier {modifier.canonical_name}\\n\")\n for node in modifier.nodes:\n txt += green(\"\\t\\tNode: \" + str(node) + \"\\n\")\n node_source_line = (\n contract_file[0 : node.source_mapping.start].count(\"\\n\".encode(\"utf-8\")) + 1\n )\n txt += green(\n f\"\\t\\tSource line {node_source_line}: {contract_file_lines[node_source_line - 1].rstrip()}\\n\"\n )\n txt += magenta(\"\\t\\tEVM Instructions:\\n\")\n node_pcs = contract_pcs.get(node_source_line, [])\n for pc in node_pcs:\n txt += magenta(f\"\\t\\t\\t{hex(pc)}: {contract_cfg.get_instruction_at(pc)}\\n\")\n\n self.info(txt)\n res = self.generate_output(txt)\n return res\n", "path": "slither/printers/summary/evm.py"}]} | 2,094 | 415 |
gh_patches_debug_25292 | rasdani/github-patches | git_diff | OWASP__owasp-mastg-2253 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] Broken links in MASTG Webpage
### MASTG Chapter
0x04g-Testing-Cryptography.md
### File Line Number
90
### Context
The links to "Data Storage on Android" and "Data Storage on iOS" lead to 404
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/scripts/transform_files.py`
Content:
```
1 import re, os
2 from pathlib import Path
3
4 EMOJIS_regex = r"🥇 |🎁 |📝 |❗ "
5
6 def transform_links(file_text):
7 print("[*] Regex Substitutions ../Document to MASTG/")
8 found = re.findall(r'(\((?:../)*Document/.*\.md/*)', file_text)
9 print(f" Found: {found}")
10 return re.sub(r"\(((?:../)*)Document/(.*)\.md/*", r"(\1MASTG/\2/", file_text)
11
12 def remove_emojis(file_text):
13 print("[*] Regex Substitutions for emojis")
14 found = re.findall(EMOJIS_regex, file_text)
15 print(f" Found: {found}")
16 return re.sub(EMOJIS_regex, r"", file_text)
17
18 def transform(folder, functions):
19 print(f"[*] Applying transforms to {folder}")
20 for root, dirname, filenames in os.walk(folder):
21 if len(filenames):
22 files = Path(root).glob('*.md')
23
24 for file in files:
25 file_obj = Path(file)
26 print(f" - File {file_obj.as_posix()}")
27 file_text = file_obj.read_text()
28
29 new_text = None
30 for function in functions:
31 if new_text is None:
32 new_text = function(file_text)
33 else:
34 new_text = function(new_text)
35
36 file_obj.write_text(new_text)
37
38 transform("docs", [transform_links])
39 transform("Document", [remove_emojis])
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/scripts/transform_files.py b/tools/scripts/transform_files.py
--- a/tools/scripts/transform_files.py
+++ b/tools/scripts/transform_files.py
@@ -4,10 +4,22 @@
EMOJIS_regex = r"🥇 |🎁 |📝 |❗ "
def transform_links(file_text):
- print("[*] Regex Substitutions ../Document to MASTG/")
- found = re.findall(r'(\((?:../)*Document/.*\.md/*)', file_text)
- print(f" Found: {found}")
- return re.sub(r"\(((?:../)*)Document/(.*)\.md/*", r"(\1MASTG/\2/", file_text)
+ # print("[*] Regex Substitutions ../Document to MASTG/")
+ found = re.findall(r'(\(0x.*\.md/*)', file_text)
+
+ # TODO FIX we must find a better solution to this
+ while len(found) > 0:
+ print(f" Found: {found}")
+ file_text = re.sub(r"\((0x0[1-3].*\.md)", r"(../Intro/\1", file_text)
+ file_text = re.sub(r"\((0x04.*\.md)", r"(../General/\1", file_text)
+ file_text = re.sub(r"\((0x05.*\.md)", r"(../Android/\1", file_text)
+ file_text = re.sub(r"\((0x06.*\.md)", r"(../iOS/\1", file_text)
+ file_text = re.sub(r"\((0x08.*\.md)", r"(../Tools/\1", file_text)
+ file_text = re.sub(r"\((0x09.*\.md)", r"(../References/\1", file_text)
+
+ found = re.findall(r'(\(0x.*\.md/*)', file_text)
+
+ return file_text
def remove_emojis(file_text):
print("[*] Regex Substitutions for emojis")
@@ -35,5 +47,5 @@
file_obj.write_text(new_text)
-transform("docs", [transform_links])
-transform("Document", [remove_emojis])
\ No newline at end of file
+transform("docs/MASTG", [transform_links])
+# transform("docs/MASTG", [remove_emojis])
\ No newline at end of file
| {"golden_diff": "diff --git a/tools/scripts/transform_files.py b/tools/scripts/transform_files.py\n--- a/tools/scripts/transform_files.py\n+++ b/tools/scripts/transform_files.py\n@@ -4,10 +4,22 @@\n EMOJIS_regex = r\"\ud83e\udd47 |\ud83c\udf81 |\ud83d\udcdd |\u2757 \"\n \n def transform_links(file_text):\n- print(\"[*] Regex Substitutions ../Document to MASTG/\")\n- found = re.findall(r'(\\((?:../)*Document/.*\\.md/*)', file_text)\n- print(f\" Found: {found}\")\n- return re.sub(r\"\\(((?:../)*)Document/(.*)\\.md/*\", r\"(\\1MASTG/\\2/\", file_text)\n+ # print(\"[*] Regex Substitutions ../Document to MASTG/\")\n+ found = re.findall(r'(\\(0x.*\\.md/*)', file_text)\n+\n+ # TODO FIX we must find a better solution to this\n+ while len(found) > 0:\n+ print(f\" Found: {found}\")\n+ file_text = re.sub(r\"\\((0x0[1-3].*\\.md)\", r\"(../Intro/\\1\", file_text)\n+ file_text = re.sub(r\"\\((0x04.*\\.md)\", r\"(../General/\\1\", file_text)\n+ file_text = re.sub(r\"\\((0x05.*\\.md)\", r\"(../Android/\\1\", file_text)\n+ file_text = re.sub(r\"\\((0x06.*\\.md)\", r\"(../iOS/\\1\", file_text)\n+ file_text = re.sub(r\"\\((0x08.*\\.md)\", r\"(../Tools/\\1\", file_text)\n+ file_text = re.sub(r\"\\((0x09.*\\.md)\", r\"(../References/\\1\", file_text)\n+\n+ found = re.findall(r'(\\(0x.*\\.md/*)', file_text)\n+\n+ return file_text\n \n def remove_emojis(file_text):\n print(\"[*] Regex Substitutions for emojis\")\n@@ -35,5 +47,5 @@\n \n file_obj.write_text(new_text)\n \n-transform(\"docs\", [transform_links])\n-transform(\"Document\", [remove_emojis])\n\\ No newline at end of file\n+transform(\"docs/MASTG\", [transform_links])\n+# transform(\"docs/MASTG\", [remove_emojis])\n\\ No newline at end of file\n", "issue": "[Bug] Broken links in MASTG Webpage\n### MASTG Chapter\n\n0x04g-Testing-Cryptography.md\n\n### File Line Number\n\n90\n\n### Context\n\nThe links to \"Data Storage on Android\" and \"Data Storage on iOS\" lead to 404\n", "before_files": [{"content": "import re, os\nfrom pathlib import Path\n\nEMOJIS_regex = r\"\ud83e\udd47 |\ud83c\udf81 |\ud83d\udcdd |\u2757 \"\n\ndef transform_links(file_text):\n print(\"[*] Regex Substitutions ../Document to MASTG/\")\n found = re.findall(r'(\\((?:../)*Document/.*\\.md/*)', file_text)\n print(f\" Found: {found}\")\n return re.sub(r\"\\(((?:../)*)Document/(.*)\\.md/*\", r\"(\\1MASTG/\\2/\", file_text)\n\ndef remove_emojis(file_text):\n print(\"[*] Regex Substitutions for emojis\")\n found = re.findall(EMOJIS_regex, file_text)\n print(f\" Found: {found}\")\n return re.sub(EMOJIS_regex, r\"\", file_text)\n\ndef transform(folder, functions):\n print(f\"[*] Applying transforms to {folder}\")\n for root, dirname, filenames in os.walk(folder):\n if len(filenames):\n files = Path(root).glob('*.md')\n\n for file in files:\n file_obj = Path(file)\n print(f\" - File {file_obj.as_posix()}\")\n file_text = file_obj.read_text()\n \n new_text = None\n for function in functions:\n if new_text is None:\n new_text = function(file_text)\n else:\n new_text = function(new_text)\n\n file_obj.write_text(new_text)\n\ntransform(\"docs\", [transform_links])\ntransform(\"Document\", [remove_emojis])", "path": "tools/scripts/transform_files.py"}], "after_files": [{"content": "import re, os\nfrom pathlib import Path\n\nEMOJIS_regex = r\"\ud83e\udd47 |\ud83c\udf81 |\ud83d\udcdd |\u2757 \"\n\ndef transform_links(file_text):\n # print(\"[*] Regex Substitutions ../Document to MASTG/\")\n found = re.findall(r'(\\(0x.*\\.md/*)', file_text)\n\n # TODO FIX we must find a better solution to this\n while len(found) > 0:\n print(f\" Found: {found}\")\n file_text = re.sub(r\"\\((0x0[1-3].*\\.md)\", r\"(../Intro/\\1\", file_text)\n file_text = re.sub(r\"\\((0x04.*\\.md)\", r\"(../General/\\1\", file_text)\n file_text = re.sub(r\"\\((0x05.*\\.md)\", r\"(../Android/\\1\", file_text)\n file_text = re.sub(r\"\\((0x06.*\\.md)\", r\"(../iOS/\\1\", file_text)\n file_text = re.sub(r\"\\((0x08.*\\.md)\", r\"(../Tools/\\1\", file_text)\n file_text = re.sub(r\"\\((0x09.*\\.md)\", r\"(../References/\\1\", file_text)\n\n found = re.findall(r'(\\(0x.*\\.md/*)', file_text)\n\n return file_text\n\ndef remove_emojis(file_text):\n print(\"[*] Regex Substitutions for emojis\")\n found = re.findall(EMOJIS_regex, file_text)\n print(f\" Found: {found}\")\n return re.sub(EMOJIS_regex, r\"\", file_text)\n\ndef transform(folder, functions):\n print(f\"[*] Applying transforms to {folder}\")\n for root, dirname, filenames in os.walk(folder):\n if len(filenames):\n files = Path(root).glob('*.md')\n\n for file in files:\n file_obj = Path(file)\n print(f\" - File {file_obj.as_posix()}\")\n file_text = file_obj.read_text()\n \n new_text = None\n for function in functions:\n if new_text is None:\n new_text = function(file_text)\n else:\n new_text = function(new_text)\n\n file_obj.write_text(new_text)\n\ntransform(\"docs/MASTG\", [transform_links])\n# transform(\"docs/MASTG\", [remove_emojis])", "path": "tools/scripts/transform_files.py"}]} | 718 | 529 |
gh_patches_debug_40681 | rasdani/github-patches | git_diff | WeblateOrg__weblate-10571 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dynamic dictionary for Azure AI Translator
### Describe the problem
Azure AI Translator can [translate with dynamic dictionary](https://learn.microsoft.com/en-us/azure/ai-services/translator/reference/v3-0-translate#translate-with-dynamic-dictionary). This would be a great way to expose Weblate glossaries there.
### Describe the solution you'd like
Utilize `get_glossary_terms` before sending string to translate and annotate glossary terms with `<mstrans:dictionary translation="translation of phrase">phrase</mstrans:dictionary>` markup.
### Describe alternatives you've considered
_No response_
### Screenshots
_No response_
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/machinery/microsoft.py`
Content:
```
1 # Copyright © Michal Čihař <[email protected]>
2 #
3 # SPDX-License-Identifier: GPL-3.0-or-later
4
5 from __future__ import annotations
6
7 from datetime import timedelta
8
9 from django.utils import timezone
10
11 from .base import MachineTranslation, MachineTranslationError
12 from .forms import MicrosoftMachineryForm
13
14 TOKEN_URL = "https://{0}{1}/sts/v1.0/issueToken?Subscription-Key={2}" # noqa: S105
15 TOKEN_EXPIRY = timedelta(minutes=9)
16
17
18 class MicrosoftCognitiveTranslation(MachineTranslation):
19 """Microsoft Cognitive Services Translator API support."""
20
21 name = "Azure AI Translator"
22 max_score = 90
23 settings_form = MicrosoftMachineryForm
24
25 language_map = {
26 "zh-hant": "zh-Hant",
27 "zh-hans": "zh-Hans",
28 "zh-tw": "zh-Hant",
29 "zh-cn": "zh-Hans",
30 "tlh": "tlh-Latn",
31 "tlh-qaak": "tlh-Piqd",
32 "nb": "no",
33 "bs-latn": "bs-Latn",
34 "sr": "sr-Latn",
35 "sr-latn": "sr-Latn",
36 "sr-cyrl": "sr-Cyrl",
37 "mn": "mn-Mong",
38 }
39
40 @classmethod
41 def get_identifier(cls):
42 return "microsoft-translator"
43
44 def __init__(self, settings: dict[str, str]):
45 """Check configuration."""
46 super().__init__(settings)
47 self._access_token = None
48 self._token_expiry = None
49
50 # check settings for Microsoft region prefix
51 region = "" if not self.settings["region"] else f"{self.settings['region']}."
52
53 self._cognitive_token_url = TOKEN_URL.format(
54 region,
55 self.settings["endpoint_url"],
56 self.settings["key"],
57 )
58
59 def get_url(self, suffix):
60 return f"https://{self.settings['base_url']}/{suffix}"
61
62 def is_token_expired(self):
63 """Check whether token is about to expire."""
64 return self._token_expiry <= timezone.now()
65
66 def get_authentication(self):
67 """Hook for backends to allow add authentication headers to request."""
68 return {"Authorization": f"Bearer {self.access_token}"}
69
70 @property
71 def access_token(self):
72 """Obtain and caches access token."""
73 if self._access_token is None or self.is_token_expired():
74 self._access_token = self.request(
75 "post", self._cognitive_token_url, skip_auth=True
76 ).text
77 self._token_expiry = timezone.now() + TOKEN_EXPIRY
78
79 return self._access_token
80
81 def map_language_code(self, code):
82 """Convert language to service specific code."""
83 return super().map_language_code(code).replace("_", "-")
84
85 def download_languages(self):
86 """
87 Download list of supported languages from a service.
88
89 Example of the response:
90
91 ['af', 'ar', 'bs-Latn', 'bg', 'ca', 'zh-CHS', 'zh-CHT', 'yue', 'hr', 'cs', 'da',
92 'nl', 'en', 'et', 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww',
93 'h', 'id', 'it', 'ja', 'sw', 'tlh', 'tlh-Qaak', 'ko', 'lv', 'lt', 'mg', 'ms',
94 'mt', 'yua', 'no', 'otq', 'fa', 'pl', 'pt', 'ro', 'r', 'sm', 'sr-Cyrl',
95 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'th', 'to', 'tr', 'uk', 'ur', 'vi',
96 'cy']
97 """
98 response = self.request(
99 "get", self.get_url("languages"), params={"api-version": "3.0"}
100 )
101 # Microsoft tends to use utf-8-sig instead of plain utf-8
102 response.encoding = response.apparent_encoding
103 payload = response.json()
104
105 # We should get an object, string usually means an error
106 if isinstance(payload, str):
107 raise MachineTranslationError(payload)
108
109 return payload["translation"].keys()
110
111 def download_translations(
112 self,
113 source,
114 language,
115 text: str,
116 unit,
117 user,
118 threshold: int = 75,
119 ):
120 """Download list of possible translations from a service."""
121 args = {
122 "api-version": "3.0",
123 "from": source,
124 "to": language,
125 "category": "general",
126 }
127 response = self.request(
128 "post", self.get_url("translate"), params=args, json=[{"Text": text[:5000]}]
129 )
130 # Microsoft tends to use utf-8-sig instead of plain utf-8
131 response.encoding = "utf-8-sig"
132 payload = response.json()
133 yield {
134 "text": payload[0]["translations"][0]["text"],
135 "quality": self.max_score,
136 "service": self.name,
137 "source": text,
138 }
139
```
Path: `weblate/machinery/googlev3.py`
Content:
```
1 # Copyright © Michal Čihař <[email protected]>
2 #
3 # SPDX-License-Identifier: GPL-3.0-or-later
4
5 import json
6 from typing import Any
7
8 from django.utils.functional import cached_property
9 from google.cloud.translate import TranslationServiceClient
10 from google.oauth2 import service_account
11
12 from .base import XMLMachineTranslationMixin
13 from .forms import GoogleV3MachineryForm
14 from .google import GoogleBaseTranslation
15
16
17 class GoogleV3Translation(XMLMachineTranslationMixin, GoogleBaseTranslation):
18 """Google Translate API v3 machine translation support."""
19
20 setup = None
21 name = "Google Cloud Translation Advanced"
22 max_score = 90
23 settings_form = GoogleV3MachineryForm
24
25 @classmethod
26 def get_identifier(cls):
27 return "google-translate-api-v3"
28
29 @cached_property
30 def client(self):
31 credentials = service_account.Credentials.from_service_account_info(
32 json.loads(self.settings["credentials"])
33 )
34 api_endpoint = "translate.googleapis.com"
35 if self.settings["location"].startswith("europe-"):
36 api_endpoint = "translate-eu.googleapis.com"
37 elif self.settings["location"].startswith("us-"):
38 api_endpoint = "translate-us.googleapis.com"
39 return TranslationServiceClient(
40 credentials=credentials, client_options={"api_endpoint": api_endpoint}
41 )
42
43 @cached_property
44 def parent(self):
45 project = self.settings["project"]
46 location = self.settings["location"]
47 return f"projects/{project}/locations/{location}"
48
49 def download_languages(self):
50 """List of supported languages."""
51 response = self.client.get_supported_languages(request={"parent": self.parent})
52 return [language.language_code for language in response.languages]
53
54 def download_translations(
55 self,
56 source,
57 language,
58 text: str,
59 unit,
60 user,
61 threshold: int = 75,
62 ):
63 """Download list of possible translations from a service."""
64 request = {
65 "parent": self.parent,
66 "contents": [text],
67 "target_language_code": language,
68 "source_language_code": source,
69 "mime_type": "text/html",
70 }
71 response = self.client.translate_text(request)
72
73 yield {
74 "text": response.translations[0].translated_text,
75 "quality": self.max_score,
76 "service": self.name,
77 "source": text,
78 }
79
80 def format_replacement(self, h_start: int, h_end: int, h_text: str, h_kind: Any):
81 """Generates a single replacement."""
82 return f'<span translate="no" id="{h_start}">{h_text}</span>' # noqa: B028
83
84 def cleanup_text(self, text, unit):
85 text, replacements = super().cleanup_text(text, unit)
86
87 # Sanitize newlines
88 replacement = '<br translate="no">'
89 replacements[replacement] = "\n"
90
91 return text.replace("\n", replacement), replacements
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/weblate/machinery/googlev3.py b/weblate/machinery/googlev3.py
--- a/weblate/machinery/googlev3.py
+++ b/weblate/machinery/googlev3.py
@@ -79,7 +79,7 @@
def format_replacement(self, h_start: int, h_end: int, h_text: str, h_kind: Any):
"""Generates a single replacement."""
- return f'<span translate="no" id="{h_start}">{h_text}</span>' # noqa: B028
+ return f'<span translate="no" id="{h_start}">{self.escape_text(h_text)}</span>' # noqa: B028
def cleanup_text(self, text, unit):
text, replacements = super().cleanup_text(text, unit)
diff --git a/weblate/machinery/microsoft.py b/weblate/machinery/microsoft.py
--- a/weblate/machinery/microsoft.py
+++ b/weblate/machinery/microsoft.py
@@ -5,17 +5,24 @@
from __future__ import annotations
from datetime import timedelta
+from typing import Any
from django.utils import timezone
-from .base import MachineTranslation, MachineTranslationError
+from weblate.glossary.models import get_glossary_terms
+
+from .base import (
+ MachineTranslation,
+ MachineTranslationError,
+ XMLMachineTranslationMixin,
+)
from .forms import MicrosoftMachineryForm
TOKEN_URL = "https://{0}{1}/sts/v1.0/issueToken?Subscription-Key={2}" # noqa: S105
TOKEN_EXPIRY = timedelta(minutes=9)
-class MicrosoftCognitiveTranslation(MachineTranslation):
+class MicrosoftCognitiveTranslation(XMLMachineTranslationMixin, MachineTranslation):
"""Microsoft Cognitive Services Translator API support."""
name = "Azure AI Translator"
@@ -123,6 +130,7 @@
"from": source,
"to": language,
"category": "general",
+ "textType": "html",
}
response = self.request(
"post", self.get_url("translate"), params=args, json=[{"Text": text[:5000]}]
@@ -136,3 +144,37 @@
"service": self.name,
"source": text,
}
+
+ def format_replacement(self, h_start: int, h_end: int, h_text: str, h_kind: Any):
+ """Generates a single replacement."""
+ if h_kind is None:
+ return f'<span class="notranslate" id="{h_start}">{self.escape_text(h_text)}</span>' # noqa: B028
+ # Glossary
+ flags = h_kind.all_flags
+ if "forbidden" in flags:
+ return h_text
+ if "read-only" in flags:
+ # Use terminology format
+ return self.format_replacement(h_start, h_end, h_text, None)
+ return f'<mstrans:dictionary translation="{self.escape_text(h_kind.target)}">{self.escape_text(h_text)}</mstrans:dictionary>'
+
+ def get_highlights(self, text, unit):
+ result = list(super().get_highlights(text, unit))
+
+ for term in get_glossary_terms(unit):
+ for start, end in term.glossary_positions:
+ glossary_highlight = (start, end, text[start:end], term)
+ handled = False
+ for i, (h_start, _h_end, _h_text, _h_kind) in enumerate(result):
+ if start < h_start:
+ if end > h_start:
+ # Skip as overlaps
+ break
+ # Insert before
+ result.insert(i, glossary_highlight)
+ handled = True
+ break
+ if not handled and not result or result[-1][1] < start:
+ result.append(glossary_highlight)
+
+ yield from result
| {"golden_diff": "diff --git a/weblate/machinery/googlev3.py b/weblate/machinery/googlev3.py\n--- a/weblate/machinery/googlev3.py\n+++ b/weblate/machinery/googlev3.py\n@@ -79,7 +79,7 @@\n \n def format_replacement(self, h_start: int, h_end: int, h_text: str, h_kind: Any):\n \"\"\"Generates a single replacement.\"\"\"\n- return f'<span translate=\"no\" id=\"{h_start}\">{h_text}</span>' # noqa: B028\n+ return f'<span translate=\"no\" id=\"{h_start}\">{self.escape_text(h_text)}</span>' # noqa: B028\n \n def cleanup_text(self, text, unit):\n text, replacements = super().cleanup_text(text, unit)\ndiff --git a/weblate/machinery/microsoft.py b/weblate/machinery/microsoft.py\n--- a/weblate/machinery/microsoft.py\n+++ b/weblate/machinery/microsoft.py\n@@ -5,17 +5,24 @@\n from __future__ import annotations\n \n from datetime import timedelta\n+from typing import Any\n \n from django.utils import timezone\n \n-from .base import MachineTranslation, MachineTranslationError\n+from weblate.glossary.models import get_glossary_terms\n+\n+from .base import (\n+ MachineTranslation,\n+ MachineTranslationError,\n+ XMLMachineTranslationMixin,\n+)\n from .forms import MicrosoftMachineryForm\n \n TOKEN_URL = \"https://{0}{1}/sts/v1.0/issueToken?Subscription-Key={2}\" # noqa: S105\n TOKEN_EXPIRY = timedelta(minutes=9)\n \n \n-class MicrosoftCognitiveTranslation(MachineTranslation):\n+class MicrosoftCognitiveTranslation(XMLMachineTranslationMixin, MachineTranslation):\n \"\"\"Microsoft Cognitive Services Translator API support.\"\"\"\n \n name = \"Azure AI Translator\"\n@@ -123,6 +130,7 @@\n \"from\": source,\n \"to\": language,\n \"category\": \"general\",\n+ \"textType\": \"html\",\n }\n response = self.request(\n \"post\", self.get_url(\"translate\"), params=args, json=[{\"Text\": text[:5000]}]\n@@ -136,3 +144,37 @@\n \"service\": self.name,\n \"source\": text,\n }\n+\n+ def format_replacement(self, h_start: int, h_end: int, h_text: str, h_kind: Any):\n+ \"\"\"Generates a single replacement.\"\"\"\n+ if h_kind is None:\n+ return f'<span class=\"notranslate\" id=\"{h_start}\">{self.escape_text(h_text)}</span>' # noqa: B028\n+ # Glossary\n+ flags = h_kind.all_flags\n+ if \"forbidden\" in flags:\n+ return h_text\n+ if \"read-only\" in flags:\n+ # Use terminology format\n+ return self.format_replacement(h_start, h_end, h_text, None)\n+ return f'<mstrans:dictionary translation=\"{self.escape_text(h_kind.target)}\">{self.escape_text(h_text)}</mstrans:dictionary>'\n+\n+ def get_highlights(self, text, unit):\n+ result = list(super().get_highlights(text, unit))\n+\n+ for term in get_glossary_terms(unit):\n+ for start, end in term.glossary_positions:\n+ glossary_highlight = (start, end, text[start:end], term)\n+ handled = False\n+ for i, (h_start, _h_end, _h_text, _h_kind) in enumerate(result):\n+ if start < h_start:\n+ if end > h_start:\n+ # Skip as overlaps\n+ break\n+ # Insert before\n+ result.insert(i, glossary_highlight)\n+ handled = True\n+ break\n+ if not handled and not result or result[-1][1] < start:\n+ result.append(glossary_highlight)\n+\n+ yield from result\n", "issue": "Dynamic dictionary for Azure AI Translator\n### Describe the problem\n\nAzure AI Translator can [translate with dynamic dictionary](https://learn.microsoft.com/en-us/azure/ai-services/translator/reference/v3-0-translate#translate-with-dynamic-dictionary). This would be a great way to expose Weblate glossaries there.\n\n### Describe the solution you'd like\n\nUtilize `get_glossary_terms` before sending string to translate and annotate glossary terms with `<mstrans:dictionary translation=\"translation of phrase\">phrase</mstrans:dictionary>` markup.\n\n### Describe alternatives you've considered\n\n_No response_\n\n### Screenshots\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import annotations\n\nfrom datetime import timedelta\n\nfrom django.utils import timezone\n\nfrom .base import MachineTranslation, MachineTranslationError\nfrom .forms import MicrosoftMachineryForm\n\nTOKEN_URL = \"https://{0}{1}/sts/v1.0/issueToken?Subscription-Key={2}\" # noqa: S105\nTOKEN_EXPIRY = timedelta(minutes=9)\n\n\nclass MicrosoftCognitiveTranslation(MachineTranslation):\n \"\"\"Microsoft Cognitive Services Translator API support.\"\"\"\n\n name = \"Azure AI Translator\"\n max_score = 90\n settings_form = MicrosoftMachineryForm\n\n language_map = {\n \"zh-hant\": \"zh-Hant\",\n \"zh-hans\": \"zh-Hans\",\n \"zh-tw\": \"zh-Hant\",\n \"zh-cn\": \"zh-Hans\",\n \"tlh\": \"tlh-Latn\",\n \"tlh-qaak\": \"tlh-Piqd\",\n \"nb\": \"no\",\n \"bs-latn\": \"bs-Latn\",\n \"sr\": \"sr-Latn\",\n \"sr-latn\": \"sr-Latn\",\n \"sr-cyrl\": \"sr-Cyrl\",\n \"mn\": \"mn-Mong\",\n }\n\n @classmethod\n def get_identifier(cls):\n return \"microsoft-translator\"\n\n def __init__(self, settings: dict[str, str]):\n \"\"\"Check configuration.\"\"\"\n super().__init__(settings)\n self._access_token = None\n self._token_expiry = None\n\n # check settings for Microsoft region prefix\n region = \"\" if not self.settings[\"region\"] else f\"{self.settings['region']}.\"\n\n self._cognitive_token_url = TOKEN_URL.format(\n region,\n self.settings[\"endpoint_url\"],\n self.settings[\"key\"],\n )\n\n def get_url(self, suffix):\n return f\"https://{self.settings['base_url']}/{suffix}\"\n\n def is_token_expired(self):\n \"\"\"Check whether token is about to expire.\"\"\"\n return self._token_expiry <= timezone.now()\n\n def get_authentication(self):\n \"\"\"Hook for backends to allow add authentication headers to request.\"\"\"\n return {\"Authorization\": f\"Bearer {self.access_token}\"}\n\n @property\n def access_token(self):\n \"\"\"Obtain and caches access token.\"\"\"\n if self._access_token is None or self.is_token_expired():\n self._access_token = self.request(\n \"post\", self._cognitive_token_url, skip_auth=True\n ).text\n self._token_expiry = timezone.now() + TOKEN_EXPIRY\n\n return self._access_token\n\n def map_language_code(self, code):\n \"\"\"Convert language to service specific code.\"\"\"\n return super().map_language_code(code).replace(\"_\", \"-\")\n\n def download_languages(self):\n \"\"\"\n Download list of supported languages from a service.\n\n Example of the response:\n\n ['af', 'ar', 'bs-Latn', 'bg', 'ca', 'zh-CHS', 'zh-CHT', 'yue', 'hr', 'cs', 'da',\n 'nl', 'en', 'et', 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww',\n 'h', 'id', 'it', 'ja', 'sw', 'tlh', 'tlh-Qaak', 'ko', 'lv', 'lt', 'mg', 'ms',\n 'mt', 'yua', 'no', 'otq', 'fa', 'pl', 'pt', 'ro', 'r', 'sm', 'sr-Cyrl',\n 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'th', 'to', 'tr', 'uk', 'ur', 'vi',\n 'cy']\n \"\"\"\n response = self.request(\n \"get\", self.get_url(\"languages\"), params={\"api-version\": \"3.0\"}\n )\n # Microsoft tends to use utf-8-sig instead of plain utf-8\n response.encoding = response.apparent_encoding\n payload = response.json()\n\n # We should get an object, string usually means an error\n if isinstance(payload, str):\n raise MachineTranslationError(payload)\n\n return payload[\"translation\"].keys()\n\n def download_translations(\n self,\n source,\n language,\n text: str,\n unit,\n user,\n threshold: int = 75,\n ):\n \"\"\"Download list of possible translations from a service.\"\"\"\n args = {\n \"api-version\": \"3.0\",\n \"from\": source,\n \"to\": language,\n \"category\": \"general\",\n }\n response = self.request(\n \"post\", self.get_url(\"translate\"), params=args, json=[{\"Text\": text[:5000]}]\n )\n # Microsoft tends to use utf-8-sig instead of plain utf-8\n response.encoding = \"utf-8-sig\"\n payload = response.json()\n yield {\n \"text\": payload[0][\"translations\"][0][\"text\"],\n \"quality\": self.max_score,\n \"service\": self.name,\n \"source\": text,\n }\n", "path": "weblate/machinery/microsoft.py"}, {"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nimport json\nfrom typing import Any\n\nfrom django.utils.functional import cached_property\nfrom google.cloud.translate import TranslationServiceClient\nfrom google.oauth2 import service_account\n\nfrom .base import XMLMachineTranslationMixin\nfrom .forms import GoogleV3MachineryForm\nfrom .google import GoogleBaseTranslation\n\n\nclass GoogleV3Translation(XMLMachineTranslationMixin, GoogleBaseTranslation):\n \"\"\"Google Translate API v3 machine translation support.\"\"\"\n\n setup = None\n name = \"Google Cloud Translation Advanced\"\n max_score = 90\n settings_form = GoogleV3MachineryForm\n\n @classmethod\n def get_identifier(cls):\n return \"google-translate-api-v3\"\n\n @cached_property\n def client(self):\n credentials = service_account.Credentials.from_service_account_info(\n json.loads(self.settings[\"credentials\"])\n )\n api_endpoint = \"translate.googleapis.com\"\n if self.settings[\"location\"].startswith(\"europe-\"):\n api_endpoint = \"translate-eu.googleapis.com\"\n elif self.settings[\"location\"].startswith(\"us-\"):\n api_endpoint = \"translate-us.googleapis.com\"\n return TranslationServiceClient(\n credentials=credentials, client_options={\"api_endpoint\": api_endpoint}\n )\n\n @cached_property\n def parent(self):\n project = self.settings[\"project\"]\n location = self.settings[\"location\"]\n return f\"projects/{project}/locations/{location}\"\n\n def download_languages(self):\n \"\"\"List of supported languages.\"\"\"\n response = self.client.get_supported_languages(request={\"parent\": self.parent})\n return [language.language_code for language in response.languages]\n\n def download_translations(\n self,\n source,\n language,\n text: str,\n unit,\n user,\n threshold: int = 75,\n ):\n \"\"\"Download list of possible translations from a service.\"\"\"\n request = {\n \"parent\": self.parent,\n \"contents\": [text],\n \"target_language_code\": language,\n \"source_language_code\": source,\n \"mime_type\": \"text/html\",\n }\n response = self.client.translate_text(request)\n\n yield {\n \"text\": response.translations[0].translated_text,\n \"quality\": self.max_score,\n \"service\": self.name,\n \"source\": text,\n }\n\n def format_replacement(self, h_start: int, h_end: int, h_text: str, h_kind: Any):\n \"\"\"Generates a single replacement.\"\"\"\n return f'<span translate=\"no\" id=\"{h_start}\">{h_text}</span>' # noqa: B028\n\n def cleanup_text(self, text, unit):\n text, replacements = super().cleanup_text(text, unit)\n\n # Sanitize newlines\n replacement = '<br translate=\"no\">'\n replacements[replacement] = \"\\n\"\n\n return text.replace(\"\\n\", replacement), replacements\n", "path": "weblate/machinery/googlev3.py"}], "after_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import annotations\n\nfrom datetime import timedelta\nfrom typing import Any\n\nfrom django.utils import timezone\n\nfrom weblate.glossary.models import get_glossary_terms\n\nfrom .base import (\n MachineTranslation,\n MachineTranslationError,\n XMLMachineTranslationMixin,\n)\nfrom .forms import MicrosoftMachineryForm\n\nTOKEN_URL = \"https://{0}{1}/sts/v1.0/issueToken?Subscription-Key={2}\" # noqa: S105\nTOKEN_EXPIRY = timedelta(minutes=9)\n\n\nclass MicrosoftCognitiveTranslation(XMLMachineTranslationMixin, MachineTranslation):\n \"\"\"Microsoft Cognitive Services Translator API support.\"\"\"\n\n name = \"Azure AI Translator\"\n max_score = 90\n settings_form = MicrosoftMachineryForm\n\n language_map = {\n \"zh-hant\": \"zh-Hant\",\n \"zh-hans\": \"zh-Hans\",\n \"zh-tw\": \"zh-Hant\",\n \"zh-cn\": \"zh-Hans\",\n \"tlh\": \"tlh-Latn\",\n \"tlh-qaak\": \"tlh-Piqd\",\n \"nb\": \"no\",\n \"bs-latn\": \"bs-Latn\",\n \"sr\": \"sr-Latn\",\n \"sr-latn\": \"sr-Latn\",\n \"sr-cyrl\": \"sr-Cyrl\",\n \"mn\": \"mn-Mong\",\n }\n\n @classmethod\n def get_identifier(cls):\n return \"microsoft-translator\"\n\n def __init__(self, settings: dict[str, str]):\n \"\"\"Check configuration.\"\"\"\n super().__init__(settings)\n self._access_token = None\n self._token_expiry = None\n\n # check settings for Microsoft region prefix\n region = \"\" if not self.settings[\"region\"] else f\"{self.settings['region']}.\"\n\n self._cognitive_token_url = TOKEN_URL.format(\n region,\n self.settings[\"endpoint_url\"],\n self.settings[\"key\"],\n )\n\n def get_url(self, suffix):\n return f\"https://{self.settings['base_url']}/{suffix}\"\n\n def is_token_expired(self):\n \"\"\"Check whether token is about to expire.\"\"\"\n return self._token_expiry <= timezone.now()\n\n def get_authentication(self):\n \"\"\"Hook for backends to allow add authentication headers to request.\"\"\"\n return {\"Authorization\": f\"Bearer {self.access_token}\"}\n\n @property\n def access_token(self):\n \"\"\"Obtain and caches access token.\"\"\"\n if self._access_token is None or self.is_token_expired():\n self._access_token = self.request(\n \"post\", self._cognitive_token_url, skip_auth=True\n ).text\n self._token_expiry = timezone.now() + TOKEN_EXPIRY\n\n return self._access_token\n\n def map_language_code(self, code):\n \"\"\"Convert language to service specific code.\"\"\"\n return super().map_language_code(code).replace(\"_\", \"-\")\n\n def download_languages(self):\n \"\"\"\n Download list of supported languages from a service.\n\n Example of the response:\n\n ['af', 'ar', 'bs-Latn', 'bg', 'ca', 'zh-CHS', 'zh-CHT', 'yue', 'hr', 'cs', 'da',\n 'nl', 'en', 'et', 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww',\n 'h', 'id', 'it', 'ja', 'sw', 'tlh', 'tlh-Qaak', 'ko', 'lv', 'lt', 'mg', 'ms',\n 'mt', 'yua', 'no', 'otq', 'fa', 'pl', 'pt', 'ro', 'r', 'sm', 'sr-Cyrl',\n 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'th', 'to', 'tr', 'uk', 'ur', 'vi',\n 'cy']\n \"\"\"\n response = self.request(\n \"get\", self.get_url(\"languages\"), params={\"api-version\": \"3.0\"}\n )\n # Microsoft tends to use utf-8-sig instead of plain utf-8\n response.encoding = response.apparent_encoding\n payload = response.json()\n\n # We should get an object, string usually means an error\n if isinstance(payload, str):\n raise MachineTranslationError(payload)\n\n return payload[\"translation\"].keys()\n\n def download_translations(\n self,\n source,\n language,\n text: str,\n unit,\n user,\n threshold: int = 75,\n ):\n \"\"\"Download list of possible translations from a service.\"\"\"\n args = {\n \"api-version\": \"3.0\",\n \"from\": source,\n \"to\": language,\n \"category\": \"general\",\n \"textType\": \"html\",\n }\n response = self.request(\n \"post\", self.get_url(\"translate\"), params=args, json=[{\"Text\": text[:5000]}]\n )\n # Microsoft tends to use utf-8-sig instead of plain utf-8\n response.encoding = \"utf-8-sig\"\n payload = response.json()\n yield {\n \"text\": payload[0][\"translations\"][0][\"text\"],\n \"quality\": self.max_score,\n \"service\": self.name,\n \"source\": text,\n }\n\n def format_replacement(self, h_start: int, h_end: int, h_text: str, h_kind: Any):\n \"\"\"Generates a single replacement.\"\"\"\n if h_kind is None:\n return f'<span class=\"notranslate\" id=\"{h_start}\">{self.escape_text(h_text)}</span>' # noqa: B028\n # Glossary\n flags = h_kind.all_flags\n if \"forbidden\" in flags:\n return h_text\n if \"read-only\" in flags:\n # Use terminology format\n return self.format_replacement(h_start, h_end, h_text, None)\n return f'<mstrans:dictionary translation=\"{self.escape_text(h_kind.target)}\">{self.escape_text(h_text)}</mstrans:dictionary>'\n\n def get_highlights(self, text, unit):\n result = list(super().get_highlights(text, unit))\n\n for term in get_glossary_terms(unit):\n for start, end in term.glossary_positions:\n glossary_highlight = (start, end, text[start:end], term)\n handled = False\n for i, (h_start, _h_end, _h_text, _h_kind) in enumerate(result):\n if start < h_start:\n if end > h_start:\n # Skip as overlaps\n break\n # Insert before\n result.insert(i, glossary_highlight)\n handled = True\n break\n if not handled and not result or result[-1][1] < start:\n result.append(glossary_highlight)\n\n yield from result\n", "path": "weblate/machinery/microsoft.py"}, {"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nimport json\nfrom typing import Any\n\nfrom django.utils.functional import cached_property\nfrom google.cloud.translate import TranslationServiceClient\nfrom google.oauth2 import service_account\n\nfrom .base import XMLMachineTranslationMixin\nfrom .forms import GoogleV3MachineryForm\nfrom .google import GoogleBaseTranslation\n\n\nclass GoogleV3Translation(XMLMachineTranslationMixin, GoogleBaseTranslation):\n \"\"\"Google Translate API v3 machine translation support.\"\"\"\n\n setup = None\n name = \"Google Cloud Translation Advanced\"\n max_score = 90\n settings_form = GoogleV3MachineryForm\n\n @classmethod\n def get_identifier(cls):\n return \"google-translate-api-v3\"\n\n @cached_property\n def client(self):\n credentials = service_account.Credentials.from_service_account_info(\n json.loads(self.settings[\"credentials\"])\n )\n api_endpoint = \"translate.googleapis.com\"\n if self.settings[\"location\"].startswith(\"europe-\"):\n api_endpoint = \"translate-eu.googleapis.com\"\n elif self.settings[\"location\"].startswith(\"us-\"):\n api_endpoint = \"translate-us.googleapis.com\"\n return TranslationServiceClient(\n credentials=credentials, client_options={\"api_endpoint\": api_endpoint}\n )\n\n @cached_property\n def parent(self):\n project = self.settings[\"project\"]\n location = self.settings[\"location\"]\n return f\"projects/{project}/locations/{location}\"\n\n def download_languages(self):\n \"\"\"List of supported languages.\"\"\"\n response = self.client.get_supported_languages(request={\"parent\": self.parent})\n return [language.language_code for language in response.languages]\n\n def download_translations(\n self,\n source,\n language,\n text: str,\n unit,\n user,\n threshold: int = 75,\n ):\n \"\"\"Download list of possible translations from a service.\"\"\"\n request = {\n \"parent\": self.parent,\n \"contents\": [text],\n \"target_language_code\": language,\n \"source_language_code\": source,\n \"mime_type\": \"text/html\",\n }\n response = self.client.translate_text(request)\n\n yield {\n \"text\": response.translations[0].translated_text,\n \"quality\": self.max_score,\n \"service\": self.name,\n \"source\": text,\n }\n\n def format_replacement(self, h_start: int, h_end: int, h_text: str, h_kind: Any):\n \"\"\"Generates a single replacement.\"\"\"\n return f'<span translate=\"no\" id=\"{h_start}\">{self.escape_text(h_text)}</span>' # noqa: B028\n\n def cleanup_text(self, text, unit):\n text, replacements = super().cleanup_text(text, unit)\n\n # Sanitize newlines\n replacement = '<br translate=\"no\">'\n replacements[replacement] = \"\\n\"\n\n return text.replace(\"\\n\", replacement), replacements\n", "path": "weblate/machinery/googlev3.py"}]} | 2,710 | 882 |
gh_patches_debug_23265 | rasdani/github-patches | git_diff | google__turbinia-763 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docker explorer task failing with debug_tasks enabled
In turbinia config ```DEBUG_TASKS = True```
```
2021-02-24 14:32:11 [INFO] Running sudo /usr/local/bin/de.py -r /tmp/turbinia-mounts/turbinia6k7f8uxc/var/lib/docker list all_containers -d
usage: de.py [-h] [-d] [-r DOCKER_DIRECTORY] [-V]
{download,mount,list,history} ...
de.py: error: unrecognized arguments: -d
```
https://github.com/hacktobeer/turbinia/blob/master/turbinia/workers/docker.py#L71
^ appends '-d' to the end of the de.py command. This is incorrect and will fail the docker explorer command. It should not be appended but come right after 'de.py' -> 'de.py -d'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/workers/docker.py`
Content:
```
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Task for running docker-explorer."""
15
16 from __future__ import unicode_literals
17
18 import json
19 import logging
20 import subprocess
21
22 from turbinia import TurbiniaException
23 from turbinia.evidence import DockerContainer
24 from turbinia.evidence import EvidenceState as state
25 from turbinia.lib import utils
26 from turbinia.workers import Priority
27 from turbinia.workers import TurbiniaTask
28 from turbinia.lib.docker_manager import GetDockerPath
29 from turbinia import config
30
31 log = logging.getLogger('turbinia')
32
33
34 class DockerContainersEnumerationTask(TurbiniaTask):
35 """Enumerates Docker containers on Linux"""
36
37 REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]
38
39 def GetContainers(self, evidence):
40 """Lists the containers from an input Evidence.
41
42 We use subprocess to run the DockerExplorer script, instead of using the
43 Python module, because we need to make sure all DockerExplorer code runs
44 as root.
45
46 Args:
47 evidence (Evidence): the input Evidence.
48
49 Returns:
50 a list(dict) containing information about the containers found.
51
52 Raises:
53 TurbiniaException: when the docker-explorer tool cannot be found or failed
54 to run.
55 """
56 config.LoadConfig()
57 docker_dir = GetDockerPath(evidence.mount_path)
58
59 containers_info = None
60
61 # TODO(rgayon): use docker-explorer exposed constant when
62 # https://github.com/google/docker-explorer/issues/80 is in.
63 de_binary = utils.get_exe_path('de.py')
64 if not de_binary:
65 raise TurbiniaException('Cannot find de.py in path')
66
67 docker_explorer_command = [
68 'sudo', de_binary, '-r', docker_dir, 'list', 'all_containers'
69 ]
70 if config.DEBUG_TASKS or evidence.config.get('debug_tasks'):
71 docker_explorer_command.append('-d')
72 log.info('Running {0:s}'.format(' '.join(docker_explorer_command)))
73 try:
74 json_string = subprocess.check_output(docker_explorer_command).decode(
75 'utf-8')
76 except json.JSONDecodeError as e:
77 raise TurbiniaException(
78 'Error decoding JSON output from de.py: {0!s}'.format(e))
79 except subprocess.CalledProcessError as e:
80 raise TurbiniaException('de.py returned an error: {0!s}'.format(e))
81
82 containers_info = json.loads(json_string)
83
84 return containers_info
85
86 def run(self, evidence, result):
87 """Run the docker-explorer tool to list containerss.
88
89 Args:
90 evidence (Evidence object): The evidence to process
91 result (TurbiniaTaskResult): The object to place task results into.
92
93 Returns:
94 TurbiniaTaskResult object.
95 """
96
97 status_report = ''
98 success = False
99
100 status_report = (
101 'Error enumerating Docker containers, evidence has no mounted '
102 'filesystem')
103 found_containers = []
104 try:
105 containers_info = self.GetContainers(evidence)
106 for container_info in containers_info:
107 container_id = container_info.get('container_id')
108 found_containers.append(container_id)
109 container_evidence = DockerContainer(container_id=container_id)
110 result.add_evidence(container_evidence, evidence.config)
111 success = True
112 status_report = 'Found {0!s} containers: {1:s}'.format(
113 len(found_containers), ' '.join(found_containers))
114 except TurbiniaException as e:
115 status_report = 'Error enumerating Docker containers: {0!s}'.format(e)
116
117 result.report_priority = Priority.LOW
118 result.report_data = status_report
119 result.close(self, success=success, status=status_report)
120 return result
121
```
Path: `turbinia/workers/redis.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2018 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Task for analysing redis_config files."""
16
17 from __future__ import unicode_literals
18
19 import os
20 import re
21
22 from turbinia.evidence import EvidenceState as state
23 from turbinia.evidence import ReportText
24 from turbinia.lib import text_formatter as fmt
25 from turbinia.workers import TurbiniaTask
26 from turbinia.workers import Priority
27
28
29 class RedisAnalysisTask(TurbiniaTask):
30 """Task to analyze a Redis configuration file."""
31
32 REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]
33
34 def run(self, evidence, result):
35 """Run the Redis configuration analysis worker.
36
37 Args:
38 evidence (Evidence object): The evidence we will process.
39 result (TurbiniaTaskResult): The object to place task results into.
40
41 Returns:
42 TurbiniaTaskResult object.
43 """
44 # Where to store the resulting output file.
45 output_file_name = 'redis_config_analysis.txt'
46 output_file_path = os.path.join(self.output_dir, output_file_name)
47 # Set the output file as the data source for the output evidence.
48 output_evidence = ReportText(source_path=output_file_path)
49
50 # Read the input file
51 with open(evidence.local_path, 'r') as input_file:
52 redis_config = input_file.read()
53
54 (report, priority, summary) = self.analyse_redis_config(redis_config)
55 output_evidence.text_data = report
56 result.report_priority = priority
57 result.report_data = report
58
59 # Write the report to the output file.
60 with open(output_file_path, 'w') as fh:
61 fh.write(output_evidence.text_data.encode('utf-8'))
62
63 # Add the resulting evidence to the result object.
64 result.add_evidence(output_evidence, evidence.config)
65 result.close(self, success=True, status=summary)
66 return result
67
68 def analyse_redis_config(self, config):
69 """Analyses a Redis configuration.
70
71 Args:
72 config (str): configuration file content.
73
74 Returns:
75 Tuple(
76 report_text(str): The report data
77 report_priority(int): The priority of the report (0 - 100)
78 summary(str): A summary of the report (used for task status)
79 )
80 """
81 findings = []
82 bind_everywhere_re = re.compile(
83 r'^\s*bind[\s"]*0\.0\.0\.0', re.IGNORECASE | re.MULTILINE)
84
85 if re.search(bind_everywhere_re, config):
86 findings.append(fmt.bullet('Redis listening on every IP'))
87
88 if findings:
89 summary = 'Insecure Redis configuration found.'
90 findings.insert(0, fmt.heading4(fmt.bold(summary)))
91 report = '\n'.join(findings)
92 return (report, Priority.HIGH, summary)
93
94 report = 'No issues found in Redis configuration'
95 return (report, Priority.LOW, report)
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/turbinia/workers/docker.py b/turbinia/workers/docker.py
--- a/turbinia/workers/docker.py
+++ b/turbinia/workers/docker.py
@@ -64,11 +64,13 @@
if not de_binary:
raise TurbiniaException('Cannot find de.py in path')
- docker_explorer_command = [
- 'sudo', de_binary, '-r', docker_dir, 'list', 'all_containers'
- ]
+ docker_explorer_command = ['sudo', de_binary]
+
if config.DEBUG_TASKS or evidence.config.get('debug_tasks'):
docker_explorer_command.append('-d')
+
+ docker_explorer_command.extend(['-r', docker_dir, 'list', 'all_containers'])
+
log.info('Running {0:s}'.format(' '.join(docker_explorer_command)))
try:
json_string = subprocess.check_output(docker_explorer_command).decode(
diff --git a/turbinia/workers/redis.py b/turbinia/workers/redis.py
--- a/turbinia/workers/redis.py
+++ b/turbinia/workers/redis.py
@@ -57,7 +57,7 @@
result.report_data = report
# Write the report to the output file.
- with open(output_file_path, 'w') as fh:
+ with open(output_file_path, 'wb') as fh:
fh.write(output_evidence.text_data.encode('utf-8'))
# Add the resulting evidence to the result object.
| {"golden_diff": "diff --git a/turbinia/workers/docker.py b/turbinia/workers/docker.py\n--- a/turbinia/workers/docker.py\n+++ b/turbinia/workers/docker.py\n@@ -64,11 +64,13 @@\n if not de_binary:\n raise TurbiniaException('Cannot find de.py in path')\n \n- docker_explorer_command = [\n- 'sudo', de_binary, '-r', docker_dir, 'list', 'all_containers'\n- ]\n+ docker_explorer_command = ['sudo', de_binary]\n+\n if config.DEBUG_TASKS or evidence.config.get('debug_tasks'):\n docker_explorer_command.append('-d')\n+\n+ docker_explorer_command.extend(['-r', docker_dir, 'list', 'all_containers'])\n+\n log.info('Running {0:s}'.format(' '.join(docker_explorer_command)))\n try:\n json_string = subprocess.check_output(docker_explorer_command).decode(\ndiff --git a/turbinia/workers/redis.py b/turbinia/workers/redis.py\n--- a/turbinia/workers/redis.py\n+++ b/turbinia/workers/redis.py\n@@ -57,7 +57,7 @@\n result.report_data = report\n \n # Write the report to the output file.\n- with open(output_file_path, 'w') as fh:\n+ with open(output_file_path, 'wb') as fh:\n fh.write(output_evidence.text_data.encode('utf-8'))\n \n # Add the resulting evidence to the result object.\n", "issue": "docker explorer task failing with debug_tasks enabled\nIn turbinia config ```DEBUG_TASKS = True```\r\n\r\n```\r\n2021-02-24 14:32:11 [INFO] Running sudo /usr/local/bin/de.py -r /tmp/turbinia-mounts/turbinia6k7f8uxc/var/lib/docker list all_containers -d\r\nusage: de.py [-h] [-d] [-r DOCKER_DIRECTORY] [-V]\r\n {download,mount,list,history} ...\r\nde.py: error: unrecognized arguments: -d\r\n```\r\n\r\nhttps://github.com/hacktobeer/turbinia/blob/master/turbinia/workers/docker.py#L71\r\n\r\n^ appends '-d' to the end of the de.py command. This is incorrect and will fail the docker explorer command. It should not be appended but come right after 'de.py' -> 'de.py -d'\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task for running docker-explorer.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\nimport subprocess\n\nfrom turbinia import TurbiniaException\nfrom turbinia.evidence import DockerContainer\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.lib import utils\nfrom turbinia.workers import Priority\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.lib.docker_manager import GetDockerPath\nfrom turbinia import config\n\nlog = logging.getLogger('turbinia')\n\n\nclass DockerContainersEnumerationTask(TurbiniaTask):\n \"\"\"Enumerates Docker containers on Linux\"\"\"\n\n REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]\n\n def GetContainers(self, evidence):\n \"\"\"Lists the containers from an input Evidence.\n\n We use subprocess to run the DockerExplorer script, instead of using the\n Python module, because we need to make sure all DockerExplorer code runs\n as root.\n\n Args:\n evidence (Evidence): the input Evidence.\n\n Returns:\n a list(dict) containing information about the containers found.\n\n Raises:\n TurbiniaException: when the docker-explorer tool cannot be found or failed\n to run.\n \"\"\"\n config.LoadConfig()\n docker_dir = GetDockerPath(evidence.mount_path)\n\n containers_info = None\n\n # TODO(rgayon): use docker-explorer exposed constant when\n # https://github.com/google/docker-explorer/issues/80 is in.\n de_binary = utils.get_exe_path('de.py')\n if not de_binary:\n raise TurbiniaException('Cannot find de.py in path')\n\n docker_explorer_command = [\n 'sudo', de_binary, '-r', docker_dir, 'list', 'all_containers'\n ]\n if config.DEBUG_TASKS or evidence.config.get('debug_tasks'):\n docker_explorer_command.append('-d')\n log.info('Running {0:s}'.format(' '.join(docker_explorer_command)))\n try:\n json_string = subprocess.check_output(docker_explorer_command).decode(\n 'utf-8')\n except json.JSONDecodeError as e:\n raise TurbiniaException(\n 'Error decoding JSON output from de.py: {0!s}'.format(e))\n except subprocess.CalledProcessError as e:\n raise TurbiniaException('de.py returned an error: {0!s}'.format(e))\n\n containers_info = json.loads(json_string)\n\n return containers_info\n\n def run(self, evidence, result):\n \"\"\"Run the docker-explorer tool to list containerss.\n\n Args:\n evidence (Evidence object): The evidence to process\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n status_report = ''\n success = False\n\n status_report = (\n 'Error enumerating Docker containers, evidence has no mounted '\n 'filesystem')\n found_containers = []\n try:\n containers_info = self.GetContainers(evidence)\n for container_info in containers_info:\n container_id = container_info.get('container_id')\n found_containers.append(container_id)\n container_evidence = DockerContainer(container_id=container_id)\n result.add_evidence(container_evidence, evidence.config)\n success = True\n status_report = 'Found {0!s} containers: {1:s}'.format(\n len(found_containers), ' '.join(found_containers))\n except TurbiniaException as e:\n status_report = 'Error enumerating Docker containers: {0!s}'.format(e)\n\n result.report_priority = Priority.LOW\n result.report_data = status_report\n result.close(self, success=success, status=status_report)\n return result\n", "path": "turbinia/workers/docker.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task for analysing redis_config files.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nimport re\n\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.evidence import ReportText\nfrom turbinia.lib import text_formatter as fmt\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.workers import Priority\n\n\nclass RedisAnalysisTask(TurbiniaTask):\n \"\"\"Task to analyze a Redis configuration file.\"\"\"\n\n REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]\n\n def run(self, evidence, result):\n \"\"\"Run the Redis configuration analysis worker.\n\n Args:\n evidence (Evidence object): The evidence we will process.\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n # Where to store the resulting output file.\n output_file_name = 'redis_config_analysis.txt'\n output_file_path = os.path.join(self.output_dir, output_file_name)\n # Set the output file as the data source for the output evidence.\n output_evidence = ReportText(source_path=output_file_path)\n\n # Read the input file\n with open(evidence.local_path, 'r') as input_file:\n redis_config = input_file.read()\n\n (report, priority, summary) = self.analyse_redis_config(redis_config)\n output_evidence.text_data = report\n result.report_priority = priority\n result.report_data = report\n\n # Write the report to the output file.\n with open(output_file_path, 'w') as fh:\n fh.write(output_evidence.text_data.encode('utf-8'))\n\n # Add the resulting evidence to the result object.\n result.add_evidence(output_evidence, evidence.config)\n result.close(self, success=True, status=summary)\n return result\n\n def analyse_redis_config(self, config):\n \"\"\"Analyses a Redis configuration.\n\n Args:\n config (str): configuration file content.\n\n Returns:\n Tuple(\n report_text(str): The report data\n report_priority(int): The priority of the report (0 - 100)\n summary(str): A summary of the report (used for task status)\n )\n \"\"\"\n findings = []\n bind_everywhere_re = re.compile(\n r'^\\s*bind[\\s\"]*0\\.0\\.0\\.0', re.IGNORECASE | re.MULTILINE)\n\n if re.search(bind_everywhere_re, config):\n findings.append(fmt.bullet('Redis listening on every IP'))\n\n if findings:\n summary = 'Insecure Redis configuration found.'\n findings.insert(0, fmt.heading4(fmt.bold(summary)))\n report = '\\n'.join(findings)\n return (report, Priority.HIGH, summary)\n\n report = 'No issues found in Redis configuration'\n return (report, Priority.LOW, report)\n", "path": "turbinia/workers/redis.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task for running docker-explorer.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\nimport subprocess\n\nfrom turbinia import TurbiniaException\nfrom turbinia.evidence import DockerContainer\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.lib import utils\nfrom turbinia.workers import Priority\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.lib.docker_manager import GetDockerPath\nfrom turbinia import config\n\nlog = logging.getLogger('turbinia')\n\n\nclass DockerContainersEnumerationTask(TurbiniaTask):\n \"\"\"Enumerates Docker containers on Linux\"\"\"\n\n REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]\n\n def GetContainers(self, evidence):\n \"\"\"Lists the containers from an input Evidence.\n\n We use subprocess to run the DockerExplorer script, instead of using the\n Python module, because we need to make sure all DockerExplorer code runs\n as root.\n\n Args:\n evidence (Evidence): the input Evidence.\n\n Returns:\n a list(dict) containing information about the containers found.\n\n Raises:\n TurbiniaException: when the docker-explorer tool cannot be found or failed\n to run.\n \"\"\"\n config.LoadConfig()\n docker_dir = GetDockerPath(evidence.mount_path)\n\n containers_info = None\n\n # TODO(rgayon): use docker-explorer exposed constant when\n # https://github.com/google/docker-explorer/issues/80 is in.\n de_binary = utils.get_exe_path('de.py')\n if not de_binary:\n raise TurbiniaException('Cannot find de.py in path')\n\n docker_explorer_command = ['sudo', de_binary]\n\n if config.DEBUG_TASKS or evidence.config.get('debug_tasks'):\n docker_explorer_command.append('-d')\n\n docker_explorer_command.extend(['-r', docker_dir, 'list', 'all_containers'])\n\n log.info('Running {0:s}'.format(' '.join(docker_explorer_command)))\n try:\n json_string = subprocess.check_output(docker_explorer_command).decode(\n 'utf-8')\n except json.JSONDecodeError as e:\n raise TurbiniaException(\n 'Error decoding JSON output from de.py: {0!s}'.format(e))\n except subprocess.CalledProcessError as e:\n raise TurbiniaException('de.py returned an error: {0!s}'.format(e))\n\n containers_info = json.loads(json_string)\n\n return containers_info\n\n def run(self, evidence, result):\n \"\"\"Run the docker-explorer tool to list containerss.\n\n Args:\n evidence (Evidence object): The evidence to process\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n status_report = ''\n success = False\n\n status_report = (\n 'Error enumerating Docker containers, evidence has no mounted '\n 'filesystem')\n found_containers = []\n try:\n containers_info = self.GetContainers(evidence)\n for container_info in containers_info:\n container_id = container_info.get('container_id')\n found_containers.append(container_id)\n container_evidence = DockerContainer(container_id=container_id)\n result.add_evidence(container_evidence, evidence.config)\n success = True\n status_report = 'Found {0!s} containers: {1:s}'.format(\n len(found_containers), ' '.join(found_containers))\n except TurbiniaException as e:\n status_report = 'Error enumerating Docker containers: {0!s}'.format(e)\n\n result.report_priority = Priority.LOW\n result.report_data = status_report\n result.close(self, success=success, status=status_report)\n return result\n", "path": "turbinia/workers/docker.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task for analysing redis_config files.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nimport re\n\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.evidence import ReportText\nfrom turbinia.lib import text_formatter as fmt\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.workers import Priority\n\n\nclass RedisAnalysisTask(TurbiniaTask):\n \"\"\"Task to analyze a Redis configuration file.\"\"\"\n\n REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]\n\n def run(self, evidence, result):\n \"\"\"Run the Redis configuration analysis worker.\n\n Args:\n evidence (Evidence object): The evidence we will process.\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n # Where to store the resulting output file.\n output_file_name = 'redis_config_analysis.txt'\n output_file_path = os.path.join(self.output_dir, output_file_name)\n # Set the output file as the data source for the output evidence.\n output_evidence = ReportText(source_path=output_file_path)\n\n # Read the input file\n with open(evidence.local_path, 'r') as input_file:\n redis_config = input_file.read()\n\n (report, priority, summary) = self.analyse_redis_config(redis_config)\n output_evidence.text_data = report\n result.report_priority = priority\n result.report_data = report\n\n # Write the report to the output file.\n with open(output_file_path, 'wb') as fh:\n fh.write(output_evidence.text_data.encode('utf-8'))\n\n # Add the resulting evidence to the result object.\n result.add_evidence(output_evidence, evidence.config)\n result.close(self, success=True, status=summary)\n return result\n\n def analyse_redis_config(self, config):\n \"\"\"Analyses a Redis configuration.\n\n Args:\n config (str): configuration file content.\n\n Returns:\n Tuple(\n report_text(str): The report data\n report_priority(int): The priority of the report (0 - 100)\n summary(str): A summary of the report (used for task status)\n )\n \"\"\"\n findings = []\n bind_everywhere_re = re.compile(\n r'^\\s*bind[\\s\"]*0\\.0\\.0\\.0', re.IGNORECASE | re.MULTILINE)\n\n if re.search(bind_everywhere_re, config):\n findings.append(fmt.bullet('Redis listening on every IP'))\n\n if findings:\n summary = 'Insecure Redis configuration found.'\n findings.insert(0, fmt.heading4(fmt.bold(summary)))\n report = '\\n'.join(findings)\n return (report, Priority.HIGH, summary)\n\n report = 'No issues found in Redis configuration'\n return (report, Priority.LOW, report)\n", "path": "turbinia/workers/redis.py"}]} | 2,623 | 341 |
gh_patches_debug_16918 | rasdani/github-patches | git_diff | ansible__ansible-18121 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
safe_eval() warning for variables containing curly braces and Jinja2 expressions
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
`template/safe_eval`
##### ANSIBLE VERSION
```
ansible 2.2.0.0
config file =
configured module search path = Default w/o overrides
```
##### SUMMARY
There is a strange `safe_eval()` warning if one wants to template a string containing Jinja2 expressions and curly braces. Even escaping the curly braces like `{{ '{}' }}` does not help.
##### STEPS TO REPRODUCE
```
- hosts: localhost
vars:
foo: "bar"
tasks:
- debug: msg="{}/{{ foo }}"
```
##### EXPECTED RESULTS
Apart from the warning the expected results coincide with the actual ones.
##### ACTUAL RESULTS
```
PLAY [localhost] ***************************************************************
TASK [setup] *******************************************************************
ok: [localhost]
TASK [debug] *******************************************************************
[WARNING]: Exception in safe_eval() on expr: {}/bar (name 'bar' is not defined)
ok: [localhost] => {
"msg": "{}/bar"
}
PLAY RECAP *********************************************************************
localhost : ok=2 changed=0 unreachable=0 failed=0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/template/safe_eval.py`
Content:
```
1 # (c) 2012, Michael DeHaan <[email protected]>
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17 from __future__ import (absolute_import, division, print_function)
18 __metaclass__ = type
19
20 import ast
21 import sys
22
23 from ansible.compat.six import string_types
24 from ansible.compat.six.moves import builtins
25
26 from ansible import constants as C
27 from ansible.plugins import filter_loader, test_loader
28
29 try:
30 from __main__ import display
31 except ImportError:
32 from ansible.utils.display import Display
33 display = Display()
34
35 def safe_eval(expr, locals={}, include_exceptions=False):
36 '''
37 This is intended for allowing things like:
38 with_items: a_list_variable
39
40 Where Jinja2 would return a string but we do not want to allow it to
41 call functions (outside of Jinja2, where the env is constrained). If
42 the input data to this function came from an untrusted (remote) source,
43 it should first be run through _clean_data_struct() to ensure the data
44 is further sanitized prior to evaluation.
45
46 Based on:
47 http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
48 '''
49
50 # define certain JSON types
51 # eg. JSON booleans are unknown to python eval()
52 JSON_TYPES = {
53 'false': False,
54 'null': None,
55 'true': True,
56 }
57
58 # this is the whitelist of AST nodes we are going to
59 # allow in the evaluation. Any node type other than
60 # those listed here will raise an exception in our custom
61 # visitor class defined below.
62 SAFE_NODES = set(
63 (
64 ast.Add,
65 ast.BinOp,
66 ast.Call,
67 ast.Compare,
68 ast.Dict,
69 ast.Div,
70 ast.Expression,
71 ast.List,
72 ast.Load,
73 ast.Mult,
74 ast.Num,
75 ast.Name,
76 ast.Str,
77 ast.Sub,
78 ast.Tuple,
79 ast.UnaryOp,
80 )
81 )
82
83 # AST node types were expanded after 2.6
84 if sys.version_info[:2] >= (2, 7):
85 SAFE_NODES.update(
86 set(
87 (ast.Set,)
88 )
89 )
90
91 # And in Python 3.4 too
92 if sys.version_info[:2] >= (3, 4):
93 SAFE_NODES.update(
94 set(
95 (ast.NameConstant,)
96 )
97 )
98
99 filter_list = []
100 for filter in filter_loader.all():
101 filter_list.extend(filter.filters().keys())
102
103 test_list = []
104 for test in test_loader.all():
105 test_list.extend(test.tests().keys())
106
107 CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
108
109 class CleansingNodeVisitor(ast.NodeVisitor):
110 def generic_visit(self, node, inside_call=False):
111 if type(node) not in SAFE_NODES:
112 raise Exception("invalid expression (%s)" % expr)
113 elif isinstance(node, ast.Call):
114 inside_call = True
115 elif isinstance(node, ast.Name) and inside_call:
116 if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
117 raise Exception("invalid function: %s" % node.id)
118 # iterate over all child nodes
119 for child_node in ast.iter_child_nodes(node):
120 self.generic_visit(child_node, inside_call)
121
122 if not isinstance(expr, string_types):
123 # already templated to a datastructure, perhaps?
124 if include_exceptions:
125 return (expr, None)
126 return expr
127
128 cnv = CleansingNodeVisitor()
129 try:
130 parsed_tree = ast.parse(expr, mode='eval')
131 cnv.visit(parsed_tree)
132 compiled = compile(parsed_tree, expr, 'eval')
133 result = eval(compiled, JSON_TYPES, dict(locals))
134
135 if include_exceptions:
136 return (result, None)
137 else:
138 return result
139 except SyntaxError as e:
140 # special handling for syntax errors, we just return
141 # the expression string back as-is to support late evaluation
142 if include_exceptions:
143 return (expr, None)
144 return expr
145 except Exception as e:
146 display.warning('Exception in safe_eval() on expr: %s (%s)' % (expr, e))
147 if include_exceptions:
148 return (expr, e)
149 return expr
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/ansible/template/safe_eval.py b/lib/ansible/template/safe_eval.py
--- a/lib/ansible/template/safe_eval.py
+++ b/lib/ansible/template/safe_eval.py
@@ -26,12 +26,6 @@
from ansible import constants as C
from ansible.plugins import filter_loader, test_loader
-try:
- from __main__ import display
-except ImportError:
- from ansible.utils.display import Display
- display = Display()
-
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
@@ -143,7 +137,6 @@
return (expr, None)
return expr
except Exception as e:
- display.warning('Exception in safe_eval() on expr: %s (%s)' % (expr, e))
if include_exceptions:
return (expr, e)
return expr
| {"golden_diff": "diff --git a/lib/ansible/template/safe_eval.py b/lib/ansible/template/safe_eval.py\n--- a/lib/ansible/template/safe_eval.py\n+++ b/lib/ansible/template/safe_eval.py\n@@ -26,12 +26,6 @@\n from ansible import constants as C\n from ansible.plugins import filter_loader, test_loader\n \n-try:\n- from __main__ import display\n-except ImportError:\n- from ansible.utils.display import Display\n- display = Display()\n-\n def safe_eval(expr, locals={}, include_exceptions=False):\n '''\n This is intended for allowing things like:\n@@ -143,7 +137,6 @@\n return (expr, None)\n return expr\n except Exception as e:\n- display.warning('Exception in safe_eval() on expr: %s (%s)' % (expr, e))\n if include_exceptions:\n return (expr, e)\n return expr\n", "issue": "safe_eval() warning for variables containing curly braces and Jinja2 expressions\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\n`template/safe_eval`\n##### ANSIBLE VERSION\n\n```\nansible 2.2.0.0\n config file = \n configured module search path = Default w/o overrides\n```\n##### SUMMARY\n\nThere is a strange `safe_eval()` warning if one wants to template a string containing Jinja2 expressions and curly braces. Even escaping the curly braces like `{{ '{}' }}` does not help.\n##### STEPS TO REPRODUCE\n\n```\n- hosts: localhost\n vars:\n foo: \"bar\"\n tasks:\n - debug: msg=\"{}/{{ foo }}\"\n```\n##### EXPECTED RESULTS\n\nApart from the warning the expected results coincide with the actual ones.\n##### ACTUAL RESULTS\n\n```\nPLAY [localhost] ***************************************************************\n\nTASK [setup] *******************************************************************\nok: [localhost]\n\nTASK [debug] *******************************************************************\n [WARNING]: Exception in safe_eval() on expr: {}/bar (name 'bar' is not defined)\n\nok: [localhost] => {\n \"msg\": \"{}/bar\"\n}\n\nPLAY RECAP *********************************************************************\nlocalhost : ok=2 changed=0 unreachable=0 failed=0 \n```\n\n", "before_files": [{"content": "# (c) 2012, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport ast\nimport sys\n\nfrom ansible.compat.six import string_types\nfrom ansible.compat.six.moves import builtins\n\nfrom ansible import constants as C\nfrom ansible.plugins import filter_loader, test_loader\n\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\ndef safe_eval(expr, locals={}, include_exceptions=False):\n '''\n This is intended for allowing things like:\n with_items: a_list_variable\n\n Where Jinja2 would return a string but we do not want to allow it to\n call functions (outside of Jinja2, where the env is constrained). If\n the input data to this function came from an untrusted (remote) source,\n it should first be run through _clean_data_struct() to ensure the data\n is further sanitized prior to evaluation.\n\n Based on:\n http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe\n '''\n\n # define certain JSON types\n # eg. JSON booleans are unknown to python eval()\n JSON_TYPES = {\n 'false': False,\n 'null': None,\n 'true': True,\n }\n\n # this is the whitelist of AST nodes we are going to\n # allow in the evaluation. Any node type other than\n # those listed here will raise an exception in our custom\n # visitor class defined below.\n SAFE_NODES = set(\n (\n ast.Add,\n ast.BinOp,\n ast.Call,\n ast.Compare,\n ast.Dict,\n ast.Div,\n ast.Expression,\n ast.List,\n ast.Load,\n ast.Mult,\n ast.Num,\n ast.Name,\n ast.Str,\n ast.Sub,\n ast.Tuple,\n ast.UnaryOp,\n )\n )\n\n # AST node types were expanded after 2.6\n if sys.version_info[:2] >= (2, 7):\n SAFE_NODES.update(\n set(\n (ast.Set,)\n )\n )\n\n # And in Python 3.4 too\n if sys.version_info[:2] >= (3, 4):\n SAFE_NODES.update(\n set(\n (ast.NameConstant,)\n )\n )\n\n filter_list = []\n for filter in filter_loader.all():\n filter_list.extend(filter.filters().keys())\n\n test_list = []\n for test in test_loader.all():\n test_list.extend(test.tests().keys())\n\n CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list\n\n class CleansingNodeVisitor(ast.NodeVisitor):\n def generic_visit(self, node, inside_call=False):\n if type(node) not in SAFE_NODES:\n raise Exception(\"invalid expression (%s)\" % expr)\n elif isinstance(node, ast.Call):\n inside_call = True\n elif isinstance(node, ast.Name) and inside_call:\n if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:\n raise Exception(\"invalid function: %s\" % node.id)\n # iterate over all child nodes\n for child_node in ast.iter_child_nodes(node):\n self.generic_visit(child_node, inside_call)\n\n if not isinstance(expr, string_types):\n # already templated to a datastructure, perhaps?\n if include_exceptions:\n return (expr, None)\n return expr\n\n cnv = CleansingNodeVisitor()\n try:\n parsed_tree = ast.parse(expr, mode='eval')\n cnv.visit(parsed_tree)\n compiled = compile(parsed_tree, expr, 'eval')\n result = eval(compiled, JSON_TYPES, dict(locals))\n\n if include_exceptions:\n return (result, None)\n else:\n return result\n except SyntaxError as e:\n # special handling for syntax errors, we just return\n # the expression string back as-is to support late evaluation\n if include_exceptions:\n return (expr, None)\n return expr\n except Exception as e:\n display.warning('Exception in safe_eval() on expr: %s (%s)' % (expr, e))\n if include_exceptions:\n return (expr, e)\n return expr\n", "path": "lib/ansible/template/safe_eval.py"}], "after_files": [{"content": "# (c) 2012, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport ast\nimport sys\n\nfrom ansible.compat.six import string_types\nfrom ansible.compat.six.moves import builtins\n\nfrom ansible import constants as C\nfrom ansible.plugins import filter_loader, test_loader\n\ndef safe_eval(expr, locals={}, include_exceptions=False):\n '''\n This is intended for allowing things like:\n with_items: a_list_variable\n\n Where Jinja2 would return a string but we do not want to allow it to\n call functions (outside of Jinja2, where the env is constrained). If\n the input data to this function came from an untrusted (remote) source,\n it should first be run through _clean_data_struct() to ensure the data\n is further sanitized prior to evaluation.\n\n Based on:\n http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe\n '''\n\n # define certain JSON types\n # eg. JSON booleans are unknown to python eval()\n JSON_TYPES = {\n 'false': False,\n 'null': None,\n 'true': True,\n }\n\n # this is the whitelist of AST nodes we are going to\n # allow in the evaluation. Any node type other than\n # those listed here will raise an exception in our custom\n # visitor class defined below.\n SAFE_NODES = set(\n (\n ast.Add,\n ast.BinOp,\n ast.Call,\n ast.Compare,\n ast.Dict,\n ast.Div,\n ast.Expression,\n ast.List,\n ast.Load,\n ast.Mult,\n ast.Num,\n ast.Name,\n ast.Str,\n ast.Sub,\n ast.Tuple,\n ast.UnaryOp,\n )\n )\n\n # AST node types were expanded after 2.6\n if sys.version_info[:2] >= (2, 7):\n SAFE_NODES.update(\n set(\n (ast.Set,)\n )\n )\n\n # And in Python 3.4 too\n if sys.version_info[:2] >= (3, 4):\n SAFE_NODES.update(\n set(\n (ast.NameConstant,)\n )\n )\n\n filter_list = []\n for filter in filter_loader.all():\n filter_list.extend(filter.filters().keys())\n\n test_list = []\n for test in test_loader.all():\n test_list.extend(test.tests().keys())\n\n CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list\n\n class CleansingNodeVisitor(ast.NodeVisitor):\n def generic_visit(self, node, inside_call=False):\n if type(node) not in SAFE_NODES:\n raise Exception(\"invalid expression (%s)\" % expr)\n elif isinstance(node, ast.Call):\n inside_call = True\n elif isinstance(node, ast.Name) and inside_call:\n if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:\n raise Exception(\"invalid function: %s\" % node.id)\n # iterate over all child nodes\n for child_node in ast.iter_child_nodes(node):\n self.generic_visit(child_node, inside_call)\n\n if not isinstance(expr, string_types):\n # already templated to a datastructure, perhaps?\n if include_exceptions:\n return (expr, None)\n return expr\n\n cnv = CleansingNodeVisitor()\n try:\n parsed_tree = ast.parse(expr, mode='eval')\n cnv.visit(parsed_tree)\n compiled = compile(parsed_tree, expr, 'eval')\n result = eval(compiled, JSON_TYPES, dict(locals))\n\n if include_exceptions:\n return (result, None)\n else:\n return result\n except SyntaxError as e:\n # special handling for syntax errors, we just return\n # the expression string back as-is to support late evaluation\n if include_exceptions:\n return (expr, None)\n return expr\n except Exception as e:\n if include_exceptions:\n return (expr, e)\n return expr\n", "path": "lib/ansible/template/safe_eval.py"}]} | 1,966 | 199 |
gh_patches_debug_16465 | rasdani/github-patches | git_diff | getredash__redash-3684 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error running query: Decimal128('0') is not JSON serializable
ANY mongo query for table with Decimal128 field type fails.
Every mongo query must be aggregated and projected to not return Decimal128 fields.
We are using Decimal128 to work with e.g. transaction amounts so its critical to us.
### Technical details:
* Redash Version: Redash 6.0.0+b8537 (4780bd9c)
* Browser/OS: Safari
* How did you install Redash: Cloud
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/query_runner/mongodb.py`
Content:
```
1 import datetime
2 import logging
3 import re
4
5 from dateutil.parser import parse
6
7 from redash.query_runner import *
8 from redash.utils import JSONEncoder, json_dumps, json_loads, parse_human_time
9
10 logger = logging.getLogger(__name__)
11
12 try:
13 import pymongo
14 from bson.objectid import ObjectId
15 from bson.timestamp import Timestamp
16 from bson.son import SON
17 from bson.json_util import object_hook as bson_object_hook
18 enabled = True
19
20 except ImportError:
21 enabled = False
22
23
24 TYPES_MAP = {
25 str: TYPE_STRING,
26 unicode: TYPE_STRING,
27 int: TYPE_INTEGER,
28 long: TYPE_INTEGER,
29 float: TYPE_FLOAT,
30 bool: TYPE_BOOLEAN,
31 datetime.datetime: TYPE_DATETIME,
32 }
33
34
35 class MongoDBJSONEncoder(JSONEncoder):
36 def default(self, o):
37 if isinstance(o, ObjectId):
38 return str(o)
39 elif isinstance(o, Timestamp):
40 return super(MongoDBJSONEncoder, self).default(o.as_datetime())
41
42 return super(MongoDBJSONEncoder, self).default(o)
43
44
45 date_regex = re.compile("ISODate\(\"(.*)\"\)", re.IGNORECASE)
46
47
48 def parse_oids(oids):
49 if not isinstance(oids, list):
50 raise Exception("$oids takes an array as input.")
51
52 return [bson_object_hook({'$oid': oid}) for oid in oids]
53
54
55 def datetime_parser(dct):
56 for k, v in dct.iteritems():
57 if isinstance(v, basestring):
58 m = date_regex.findall(v)
59 if len(m) > 0:
60 dct[k] = parse(m[0], yearfirst=True)
61
62 if '$humanTime' in dct:
63 return parse_human_time(dct['$humanTime'])
64
65 if '$oids' in dct:
66 return parse_oids(dct['$oids'])
67
68 return bson_object_hook(dct)
69
70
71 def parse_query_json(query):
72 query_data = json_loads(query, object_hook=datetime_parser)
73 return query_data
74
75
76 def _get_column_by_name(columns, column_name):
77 for c in columns:
78 if "name" in c and c["name"] == column_name:
79 return c
80
81 return None
82
83
84 def parse_results(results):
85 rows = []
86 columns = []
87
88 for row in results:
89 parsed_row = {}
90
91 for key in row:
92 if isinstance(row[key], dict):
93 for inner_key in row[key]:
94 column_name = u'{}.{}'.format(key, inner_key)
95 if _get_column_by_name(columns, column_name) is None:
96 columns.append({
97 "name": column_name,
98 "friendly_name": column_name,
99 "type": TYPES_MAP.get(type(row[key][inner_key]), TYPE_STRING)
100 })
101
102 parsed_row[column_name] = row[key][inner_key]
103
104 else:
105 if _get_column_by_name(columns, key) is None:
106 columns.append({
107 "name": key,
108 "friendly_name": key,
109 "type": TYPES_MAP.get(type(row[key]), TYPE_STRING)
110 })
111
112 parsed_row[key] = row[key]
113
114 rows.append(parsed_row)
115
116 return rows, columns
117
118
119 class MongoDB(BaseQueryRunner):
120 @classmethod
121 def configuration_schema(cls):
122 return {
123 'type': 'object',
124 'properties': {
125 'connectionString': {
126 'type': 'string',
127 'title': 'Connection String'
128 },
129 'dbName': {
130 'type': 'string',
131 'title': "Database Name"
132 },
133 'replicaSetName': {
134 'type': 'string',
135 'title': 'Replica Set Name'
136 },
137 },
138 'required': ['connectionString', 'dbName']
139 }
140
141 @classmethod
142 def enabled(cls):
143 return enabled
144
145 @classmethod
146 def annotate_query(cls):
147 return False
148
149 def __init__(self, configuration):
150 super(MongoDB, self).__init__(configuration)
151
152 self.syntax = 'json'
153
154 self.db_name = self.configuration["dbName"]
155
156 self.is_replica_set = True if "replicaSetName" in self.configuration and self.configuration["replicaSetName"] else False
157
158 def _get_db(self):
159 if self.is_replica_set:
160 db_connection = pymongo.MongoClient(self.configuration["connectionString"],
161 replicaSet=self.configuration["replicaSetName"])
162 else:
163 db_connection = pymongo.MongoClient(self.configuration["connectionString"])
164
165 return db_connection[self.db_name]
166
167 def test_connection(self):
168 db = self._get_db()
169 if not db.command("connectionStatus")["ok"]:
170 raise Exception("MongoDB connection error")
171
172 def _merge_property_names(self, columns, document):
173 for property in document:
174 if property not in columns:
175 columns.append(property)
176
177 def _is_collection_a_view(self, db, collection_name):
178 if 'viewOn' in db[collection_name].options():
179 return True
180 else:
181 return False
182
183 def _get_collection_fields(self, db, collection_name):
184 # Since MongoDB is a document based database and each document doesn't have
185 # to have the same fields as another documet in the collection its a bit hard to
186 # show these attributes as fields in the schema.
187 #
188 # For now, the logic is to take the first and last documents (last is determined
189 # by the Natural Order (http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order)
190 # as we don't know the correct order. In most single server installations it would be
191 # find. In replicaset when reading from non master it might not return the really last
192 # document written.
193 collection_is_a_view = self._is_collection_a_view(db, collection_name)
194 documents_sample = []
195 if collection_is_a_view:
196 for d in db[collection_name].find().limit(2):
197 documents_sample.append(d)
198 else:
199 for d in db[collection_name].find().sort([("$natural", 1)]).limit(1):
200 documents_sample.append(d)
201
202 for d in db[collection_name].find().sort([("$natural", -1)]).limit(1):
203 documents_sample.append(d)
204 columns = []
205 for d in documents_sample:
206 self._merge_property_names(columns, d)
207 return columns
208
209 def get_schema(self, get_stats=False):
210 schema = {}
211 db = self._get_db()
212 for collection_name in db.collection_names():
213 if collection_name.startswith('system.'):
214 continue
215 columns = self._get_collection_fields(db, collection_name)
216 schema[collection_name] = {
217 "name": collection_name, "columns": sorted(columns)}
218
219 return schema.values()
220
221
222 def run_query(self, query, user):
223 db = self._get_db()
224
225 logger.debug("mongodb connection string: %s", self.configuration['connectionString'])
226 logger.debug("mongodb got query: %s", query)
227
228 try:
229 query_data = parse_query_json(query)
230 except ValueError:
231 return None, "Invalid query format. The query is not a valid JSON."
232
233 if "collection" not in query_data:
234 return None, "'collection' must have a value to run a query"
235 else:
236 collection = query_data["collection"]
237
238 q = query_data.get("query", None)
239 f = None
240
241 aggregate = query_data.get("aggregate", None)
242 if aggregate:
243 for step in aggregate:
244 if "$sort" in step:
245 sort_list = []
246 for sort_item in step["$sort"]:
247 sort_list.append((sort_item["name"], sort_item["direction"]))
248
249 step["$sort"] = SON(sort_list)
250
251 if not aggregate:
252 s = None
253 if "sort" in query_data and query_data["sort"]:
254 s = []
255 for field in query_data["sort"]:
256 s.append((field["name"], field["direction"]))
257
258 if "fields" in query_data:
259 f = query_data["fields"]
260
261 s = None
262 if "sort" in query_data and query_data["sort"]:
263 s = []
264 for field_data in query_data["sort"]:
265 s.append((field_data["name"], field_data["direction"]))
266
267 columns = []
268 rows = []
269
270 cursor = None
271 if q or (not q and not aggregate):
272 if s:
273 cursor = db[collection].find(q, f).sort(s)
274 else:
275 cursor = db[collection].find(q, f)
276
277 if "skip" in query_data:
278 cursor = cursor.skip(query_data["skip"])
279
280 if "limit" in query_data:
281 cursor = cursor.limit(query_data["limit"])
282
283 if "count" in query_data:
284 cursor = cursor.count()
285
286 elif aggregate:
287 allow_disk_use = query_data.get('allowDiskUse', False)
288 r = db[collection].aggregate(aggregate, allowDiskUse=allow_disk_use)
289
290 # Backwards compatibility with older pymongo versions.
291 #
292 # Older pymongo version would return a dictionary from an aggregate command.
293 # The dict would contain a "result" key which would hold the cursor.
294 # Newer ones return pymongo.command_cursor.CommandCursor.
295 if isinstance(r, dict):
296 cursor = r["result"]
297 else:
298 cursor = r
299
300 if "count" in query_data:
301 columns.append({
302 "name" : "count",
303 "friendly_name" : "count",
304 "type" : TYPE_INTEGER
305 })
306
307 rows.append({ "count" : cursor })
308 else:
309 rows, columns = parse_results(cursor)
310
311 if f:
312 ordered_columns = []
313 for k in sorted(f, key=f.get):
314 column = _get_column_by_name(columns, k)
315 if column:
316 ordered_columns.append(column)
317
318 columns = ordered_columns
319
320 if query_data.get('sortColumns'):
321 reverse = query_data['sortColumns'] == 'desc'
322 columns = sorted(columns, key=lambda col: col['name'], reverse=reverse)
323
324 data = {
325 "columns": columns,
326 "rows": rows
327 }
328 error = None
329 json_data = json_dumps(data, cls=MongoDBJSONEncoder)
330
331 return json_data, error
332
333 register(MongoDB)
334
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/query_runner/mongodb.py b/redash/query_runner/mongodb.py
--- a/redash/query_runner/mongodb.py
+++ b/redash/query_runner/mongodb.py
@@ -13,6 +13,7 @@
import pymongo
from bson.objectid import ObjectId
from bson.timestamp import Timestamp
+ from bson.decimal128 import Decimal128
from bson.son import SON
from bson.json_util import object_hook as bson_object_hook
enabled = True
@@ -38,7 +39,8 @@
return str(o)
elif isinstance(o, Timestamp):
return super(MongoDBJSONEncoder, self).default(o.as_datetime())
-
+ elif isinstance(o, Decimal128):
+ return o.to_decimal()
return super(MongoDBJSONEncoder, self).default(o)
| {"golden_diff": "diff --git a/redash/query_runner/mongodb.py b/redash/query_runner/mongodb.py\n--- a/redash/query_runner/mongodb.py\n+++ b/redash/query_runner/mongodb.py\n@@ -13,6 +13,7 @@\n import pymongo\n from bson.objectid import ObjectId\n from bson.timestamp import Timestamp\n+ from bson.decimal128 import Decimal128\n from bson.son import SON\n from bson.json_util import object_hook as bson_object_hook\n enabled = True\n@@ -38,7 +39,8 @@\n return str(o)\n elif isinstance(o, Timestamp):\n return super(MongoDBJSONEncoder, self).default(o.as_datetime())\n-\n+ elif isinstance(o, Decimal128):\n+ return o.to_decimal()\n return super(MongoDBJSONEncoder, self).default(o)\n", "issue": "Error running query: Decimal128('0') is not JSON serializable\nANY mongo query for table with Decimal128 field type fails.\r\nEvery mongo query must be aggregated and projected to not return Decimal128 fields.\r\nWe are using Decimal128 to work with e.g. transaction amounts so its critical to us.\r\n\r\n### Technical details:\r\n\r\n* Redash Version: Redash 6.0.0+b8537 (4780bd9c)\r\n* Browser/OS: Safari\r\n* How did you install Redash: Cloud\n", "before_files": [{"content": "import datetime\nimport logging\nimport re\n\nfrom dateutil.parser import parse\n\nfrom redash.query_runner import *\nfrom redash.utils import JSONEncoder, json_dumps, json_loads, parse_human_time\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import pymongo\n from bson.objectid import ObjectId\n from bson.timestamp import Timestamp\n from bson.son import SON\n from bson.json_util import object_hook as bson_object_hook\n enabled = True\n\nexcept ImportError:\n enabled = False\n\n\nTYPES_MAP = {\n str: TYPE_STRING,\n unicode: TYPE_STRING,\n int: TYPE_INTEGER,\n long: TYPE_INTEGER,\n float: TYPE_FLOAT,\n bool: TYPE_BOOLEAN,\n datetime.datetime: TYPE_DATETIME,\n}\n\n\nclass MongoDBJSONEncoder(JSONEncoder):\n def default(self, o):\n if isinstance(o, ObjectId):\n return str(o)\n elif isinstance(o, Timestamp):\n return super(MongoDBJSONEncoder, self).default(o.as_datetime())\n\n return super(MongoDBJSONEncoder, self).default(o)\n\n\ndate_regex = re.compile(\"ISODate\\(\\\"(.*)\\\"\\)\", re.IGNORECASE)\n\n\ndef parse_oids(oids):\n if not isinstance(oids, list):\n raise Exception(\"$oids takes an array as input.\")\n\n return [bson_object_hook({'$oid': oid}) for oid in oids]\n\n\ndef datetime_parser(dct):\n for k, v in dct.iteritems():\n if isinstance(v, basestring):\n m = date_regex.findall(v)\n if len(m) > 0:\n dct[k] = parse(m[0], yearfirst=True)\n\n if '$humanTime' in dct:\n return parse_human_time(dct['$humanTime'])\n\n if '$oids' in dct:\n return parse_oids(dct['$oids'])\n\n return bson_object_hook(dct)\n\n\ndef parse_query_json(query):\n query_data = json_loads(query, object_hook=datetime_parser)\n return query_data\n\n\ndef _get_column_by_name(columns, column_name):\n for c in columns:\n if \"name\" in c and c[\"name\"] == column_name:\n return c\n\n return None\n\n\ndef parse_results(results):\n rows = []\n columns = []\n\n for row in results:\n parsed_row = {}\n\n for key in row:\n if isinstance(row[key], dict):\n for inner_key in row[key]:\n column_name = u'{}.{}'.format(key, inner_key)\n if _get_column_by_name(columns, column_name) is None:\n columns.append({\n \"name\": column_name,\n \"friendly_name\": column_name,\n \"type\": TYPES_MAP.get(type(row[key][inner_key]), TYPE_STRING)\n })\n\n parsed_row[column_name] = row[key][inner_key]\n\n else:\n if _get_column_by_name(columns, key) is None:\n columns.append({\n \"name\": key,\n \"friendly_name\": key,\n \"type\": TYPES_MAP.get(type(row[key]), TYPE_STRING)\n })\n\n parsed_row[key] = row[key]\n\n rows.append(parsed_row)\n\n return rows, columns\n\n\nclass MongoDB(BaseQueryRunner):\n @classmethod\n def configuration_schema(cls):\n return {\n 'type': 'object',\n 'properties': {\n 'connectionString': {\n 'type': 'string',\n 'title': 'Connection String'\n },\n 'dbName': {\n 'type': 'string',\n 'title': \"Database Name\"\n },\n 'replicaSetName': {\n 'type': 'string',\n 'title': 'Replica Set Name'\n },\n },\n 'required': ['connectionString', 'dbName']\n }\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def annotate_query(cls):\n return False\n\n def __init__(self, configuration):\n super(MongoDB, self).__init__(configuration)\n\n self.syntax = 'json'\n\n self.db_name = self.configuration[\"dbName\"]\n\n self.is_replica_set = True if \"replicaSetName\" in self.configuration and self.configuration[\"replicaSetName\"] else False\n\n def _get_db(self):\n if self.is_replica_set:\n db_connection = pymongo.MongoClient(self.configuration[\"connectionString\"],\n replicaSet=self.configuration[\"replicaSetName\"])\n else:\n db_connection = pymongo.MongoClient(self.configuration[\"connectionString\"])\n\n return db_connection[self.db_name]\n\n def test_connection(self):\n db = self._get_db()\n if not db.command(\"connectionStatus\")[\"ok\"]:\n raise Exception(\"MongoDB connection error\")\n\n def _merge_property_names(self, columns, document):\n for property in document:\n if property not in columns:\n columns.append(property)\n\n def _is_collection_a_view(self, db, collection_name):\n if 'viewOn' in db[collection_name].options():\n return True\n else:\n return False\n\n def _get_collection_fields(self, db, collection_name):\n # Since MongoDB is a document based database and each document doesn't have\n # to have the same fields as another documet in the collection its a bit hard to\n # show these attributes as fields in the schema.\n #\n # For now, the logic is to take the first and last documents (last is determined\n # by the Natural Order (http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order)\n # as we don't know the correct order. In most single server installations it would be\n # find. In replicaset when reading from non master it might not return the really last\n # document written.\n collection_is_a_view = self._is_collection_a_view(db, collection_name)\n documents_sample = []\n if collection_is_a_view:\n for d in db[collection_name].find().limit(2):\n documents_sample.append(d)\n else:\n for d in db[collection_name].find().sort([(\"$natural\", 1)]).limit(1):\n documents_sample.append(d)\n\n for d in db[collection_name].find().sort([(\"$natural\", -1)]).limit(1):\n documents_sample.append(d)\n columns = []\n for d in documents_sample:\n self._merge_property_names(columns, d)\n return columns\n\n def get_schema(self, get_stats=False):\n schema = {}\n db = self._get_db()\n for collection_name in db.collection_names():\n if collection_name.startswith('system.'):\n continue\n columns = self._get_collection_fields(db, collection_name)\n schema[collection_name] = {\n \"name\": collection_name, \"columns\": sorted(columns)}\n\n return schema.values()\n\n\n def run_query(self, query, user):\n db = self._get_db()\n\n logger.debug(\"mongodb connection string: %s\", self.configuration['connectionString'])\n logger.debug(\"mongodb got query: %s\", query)\n\n try:\n query_data = parse_query_json(query)\n except ValueError:\n return None, \"Invalid query format. The query is not a valid JSON.\"\n\n if \"collection\" not in query_data:\n return None, \"'collection' must have a value to run a query\"\n else:\n collection = query_data[\"collection\"]\n\n q = query_data.get(\"query\", None)\n f = None\n\n aggregate = query_data.get(\"aggregate\", None)\n if aggregate:\n for step in aggregate:\n if \"$sort\" in step:\n sort_list = []\n for sort_item in step[\"$sort\"]:\n sort_list.append((sort_item[\"name\"], sort_item[\"direction\"]))\n\n step[\"$sort\"] = SON(sort_list)\n\n if not aggregate:\n s = None\n if \"sort\" in query_data and query_data[\"sort\"]:\n s = []\n for field in query_data[\"sort\"]:\n s.append((field[\"name\"], field[\"direction\"]))\n\n if \"fields\" in query_data:\n f = query_data[\"fields\"]\n\n s = None\n if \"sort\" in query_data and query_data[\"sort\"]:\n s = []\n for field_data in query_data[\"sort\"]:\n s.append((field_data[\"name\"], field_data[\"direction\"]))\n\n columns = []\n rows = []\n\n cursor = None\n if q or (not q and not aggregate):\n if s:\n cursor = db[collection].find(q, f).sort(s)\n else:\n cursor = db[collection].find(q, f)\n\n if \"skip\" in query_data:\n cursor = cursor.skip(query_data[\"skip\"])\n\n if \"limit\" in query_data:\n cursor = cursor.limit(query_data[\"limit\"])\n\n if \"count\" in query_data:\n cursor = cursor.count()\n\n elif aggregate:\n allow_disk_use = query_data.get('allowDiskUse', False)\n r = db[collection].aggregate(aggregate, allowDiskUse=allow_disk_use)\n\n # Backwards compatibility with older pymongo versions.\n #\n # Older pymongo version would return a dictionary from an aggregate command.\n # The dict would contain a \"result\" key which would hold the cursor.\n # Newer ones return pymongo.command_cursor.CommandCursor.\n if isinstance(r, dict):\n cursor = r[\"result\"]\n else:\n cursor = r\n\n if \"count\" in query_data:\n columns.append({\n \"name\" : \"count\",\n \"friendly_name\" : \"count\",\n \"type\" : TYPE_INTEGER\n })\n\n rows.append({ \"count\" : cursor })\n else:\n rows, columns = parse_results(cursor)\n\n if f:\n ordered_columns = []\n for k in sorted(f, key=f.get):\n column = _get_column_by_name(columns, k)\n if column:\n ordered_columns.append(column)\n\n columns = ordered_columns\n\n if query_data.get('sortColumns'):\n reverse = query_data['sortColumns'] == 'desc'\n columns = sorted(columns, key=lambda col: col['name'], reverse=reverse)\n\n data = {\n \"columns\": columns,\n \"rows\": rows\n }\n error = None\n json_data = json_dumps(data, cls=MongoDBJSONEncoder)\n\n return json_data, error\n\nregister(MongoDB)\n", "path": "redash/query_runner/mongodb.py"}], "after_files": [{"content": "import datetime\nimport logging\nimport re\n\nfrom dateutil.parser import parse\n\nfrom redash.query_runner import *\nfrom redash.utils import JSONEncoder, json_dumps, json_loads, parse_human_time\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import pymongo\n from bson.objectid import ObjectId\n from bson.timestamp import Timestamp\n from bson.decimal128 import Decimal128\n from bson.son import SON\n from bson.json_util import object_hook as bson_object_hook\n enabled = True\n\nexcept ImportError:\n enabled = False\n\n\nTYPES_MAP = {\n str: TYPE_STRING,\n unicode: TYPE_STRING,\n int: TYPE_INTEGER,\n long: TYPE_INTEGER,\n float: TYPE_FLOAT,\n bool: TYPE_BOOLEAN,\n datetime.datetime: TYPE_DATETIME,\n}\n\n\nclass MongoDBJSONEncoder(JSONEncoder):\n def default(self, o):\n if isinstance(o, ObjectId):\n return str(o)\n elif isinstance(o, Timestamp):\n return super(MongoDBJSONEncoder, self).default(o.as_datetime())\n elif isinstance(o, Decimal128):\n return o.to_decimal()\n return super(MongoDBJSONEncoder, self).default(o)\n\n\ndate_regex = re.compile(\"ISODate\\(\\\"(.*)\\\"\\)\", re.IGNORECASE)\n\n\ndef parse_oids(oids):\n if not isinstance(oids, list):\n raise Exception(\"$oids takes an array as input.\")\n\n return [bson_object_hook({'$oid': oid}) for oid in oids]\n\n\ndef datetime_parser(dct):\n for k, v in dct.iteritems():\n if isinstance(v, basestring):\n m = date_regex.findall(v)\n if len(m) > 0:\n dct[k] = parse(m[0], yearfirst=True)\n\n if '$humanTime' in dct:\n return parse_human_time(dct['$humanTime'])\n\n if '$oids' in dct:\n return parse_oids(dct['$oids'])\n\n return bson_object_hook(dct)\n\n\ndef parse_query_json(query):\n query_data = json_loads(query, object_hook=datetime_parser)\n return query_data\n\n\ndef _get_column_by_name(columns, column_name):\n for c in columns:\n if \"name\" in c and c[\"name\"] == column_name:\n return c\n\n return None\n\n\ndef parse_results(results):\n rows = []\n columns = []\n\n for row in results:\n parsed_row = {}\n\n for key in row:\n if isinstance(row[key], dict):\n for inner_key in row[key]:\n column_name = u'{}.{}'.format(key, inner_key)\n if _get_column_by_name(columns, column_name) is None:\n columns.append({\n \"name\": column_name,\n \"friendly_name\": column_name,\n \"type\": TYPES_MAP.get(type(row[key][inner_key]), TYPE_STRING)\n })\n\n parsed_row[column_name] = row[key][inner_key]\n\n else:\n if _get_column_by_name(columns, key) is None:\n columns.append({\n \"name\": key,\n \"friendly_name\": key,\n \"type\": TYPES_MAP.get(type(row[key]), TYPE_STRING)\n })\n\n parsed_row[key] = row[key]\n\n rows.append(parsed_row)\n\n return rows, columns\n\n\nclass MongoDB(BaseQueryRunner):\n @classmethod\n def configuration_schema(cls):\n return {\n 'type': 'object',\n 'properties': {\n 'connectionString': {\n 'type': 'string',\n 'title': 'Connection String'\n },\n 'dbName': {\n 'type': 'string',\n 'title': \"Database Name\"\n },\n 'replicaSetName': {\n 'type': 'string',\n 'title': 'Replica Set Name'\n },\n },\n 'required': ['connectionString', 'dbName']\n }\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def annotate_query(cls):\n return False\n\n def __init__(self, configuration):\n super(MongoDB, self).__init__(configuration)\n\n self.syntax = 'json'\n\n self.db_name = self.configuration[\"dbName\"]\n\n self.is_replica_set = True if \"replicaSetName\" in self.configuration and self.configuration[\"replicaSetName\"] else False\n\n def _get_db(self):\n if self.is_replica_set:\n db_connection = pymongo.MongoClient(self.configuration[\"connectionString\"],\n replicaSet=self.configuration[\"replicaSetName\"])\n else:\n db_connection = pymongo.MongoClient(self.configuration[\"connectionString\"])\n\n return db_connection[self.db_name]\n\n def test_connection(self):\n db = self._get_db()\n if not db.command(\"connectionStatus\")[\"ok\"]:\n raise Exception(\"MongoDB connection error\")\n\n def _merge_property_names(self, columns, document):\n for property in document:\n if property not in columns:\n columns.append(property)\n\n def _is_collection_a_view(self, db, collection_name):\n if 'viewOn' in db[collection_name].options():\n return True\n else:\n return False\n\n def _get_collection_fields(self, db, collection_name):\n # Since MongoDB is a document based database and each document doesn't have\n # to have the same fields as another documet in the collection its a bit hard to\n # show these attributes as fields in the schema.\n #\n # For now, the logic is to take the first and last documents (last is determined\n # by the Natural Order (http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order)\n # as we don't know the correct order. In most single server installations it would be\n # find. In replicaset when reading from non master it might not return the really last\n # document written.\n collection_is_a_view = self._is_collection_a_view(db, collection_name)\n documents_sample = []\n if collection_is_a_view:\n for d in db[collection_name].find().limit(2):\n documents_sample.append(d)\n else:\n for d in db[collection_name].find().sort([(\"$natural\", 1)]).limit(1):\n documents_sample.append(d)\n\n for d in db[collection_name].find().sort([(\"$natural\", -1)]).limit(1):\n documents_sample.append(d)\n columns = []\n for d in documents_sample:\n self._merge_property_names(columns, d)\n return columns\n\n def get_schema(self, get_stats=False):\n schema = {}\n db = self._get_db()\n for collection_name in db.collection_names():\n if collection_name.startswith('system.'):\n continue\n columns = self._get_collection_fields(db, collection_name)\n schema[collection_name] = {\n \"name\": collection_name, \"columns\": sorted(columns)}\n\n return schema.values()\n\n\n def run_query(self, query, user):\n db = self._get_db()\n\n logger.debug(\"mongodb connection string: %s\", self.configuration['connectionString'])\n logger.debug(\"mongodb got query: %s\", query)\n\n try:\n query_data = parse_query_json(query)\n except ValueError:\n return None, \"Invalid query format. The query is not a valid JSON.\"\n\n if \"collection\" not in query_data:\n return None, \"'collection' must have a value to run a query\"\n else:\n collection = query_data[\"collection\"]\n\n q = query_data.get(\"query\", None)\n f = None\n\n aggregate = query_data.get(\"aggregate\", None)\n if aggregate:\n for step in aggregate:\n if \"$sort\" in step:\n sort_list = []\n for sort_item in step[\"$sort\"]:\n sort_list.append((sort_item[\"name\"], sort_item[\"direction\"]))\n\n step[\"$sort\"] = SON(sort_list)\n\n if not aggregate:\n s = None\n if \"sort\" in query_data and query_data[\"sort\"]:\n s = []\n for field in query_data[\"sort\"]:\n s.append((field[\"name\"], field[\"direction\"]))\n\n if \"fields\" in query_data:\n f = query_data[\"fields\"]\n\n s = None\n if \"sort\" in query_data and query_data[\"sort\"]:\n s = []\n for field_data in query_data[\"sort\"]:\n s.append((field_data[\"name\"], field_data[\"direction\"]))\n\n columns = []\n rows = []\n\n cursor = None\n if q or (not q and not aggregate):\n if s:\n cursor = db[collection].find(q, f).sort(s)\n else:\n cursor = db[collection].find(q, f)\n\n if \"skip\" in query_data:\n cursor = cursor.skip(query_data[\"skip\"])\n\n if \"limit\" in query_data:\n cursor = cursor.limit(query_data[\"limit\"])\n\n if \"count\" in query_data:\n cursor = cursor.count()\n\n elif aggregate:\n allow_disk_use = query_data.get('allowDiskUse', False)\n r = db[collection].aggregate(aggregate, allowDiskUse=allow_disk_use)\n\n # Backwards compatibility with older pymongo versions.\n #\n # Older pymongo version would return a dictionary from an aggregate command.\n # The dict would contain a \"result\" key which would hold the cursor.\n # Newer ones return pymongo.command_cursor.CommandCursor.\n if isinstance(r, dict):\n cursor = r[\"result\"]\n else:\n cursor = r\n\n if \"count\" in query_data:\n columns.append({\n \"name\" : \"count\",\n \"friendly_name\" : \"count\",\n \"type\" : TYPE_INTEGER\n })\n\n rows.append({ \"count\" : cursor })\n else:\n rows, columns = parse_results(cursor)\n\n if f:\n ordered_columns = []\n for k in sorted(f, key=f.get):\n column = _get_column_by_name(columns, k)\n if column:\n ordered_columns.append(column)\n\n columns = ordered_columns\n\n if query_data.get('sortColumns'):\n reverse = query_data['sortColumns'] == 'desc'\n columns = sorted(columns, key=lambda col: col['name'], reverse=reverse)\n\n data = {\n \"columns\": columns,\n \"rows\": rows\n }\n error = None\n json_data = json_dumps(data, cls=MongoDBJSONEncoder)\n\n return json_data, error\n\nregister(MongoDB)\n", "path": "redash/query_runner/mongodb.py"}]} | 3,484 | 184 |
gh_patches_debug_21463 | rasdani/github-patches | git_diff | pwndbg__pwndbg-864 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'start' command behavior has changed
`git bisect` suggests this was introduced in 8f33ec480f47c038e0077cab67153f3ec3be9181
Prior to this, when debugging programs built with `gcc` using the `-ggdb` flag, pwndbg would break at the program's `main()` function immediately after the `start` command was issued. Since 8f33ec480f47c038e0077cab67153f3ec3be9181 the `start` command breaks at `_start()` the 1st time it's issued, and must be issued a 2nd time before `main()` is reached, after which it consistently stops at `main()`. See below for an example.
A simple program:
```
// main.c
#include <stdio.h>
void main(void) {
printf("Hello World!\n");
}
```
Built and run with:
```
$ gcc -ggdb main.c
$ gdb a.out
pwndbg> set context-sections code
pwndbg> start
```
The `set context-sections` command is used here to truncate output for the screenshots and isn't part of the bug.
Previously, `main()` was reached immediately:

After 8f33ec480f47c038e0077cab67153f3ec3be9181 the `start` command breaks at `_start()` first, then it must be issued a 2nd time before `main()` is reached and the "source" panel appears. After this, every invocation of `start` will break at the `main()` function:

Perhaps some folk would rather break earlier in their code when issuing the `start` command, but it's odd that when starting the program for the 2nd time a different result is obtained, since `start` tells GDB to run the program over from scratch.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/commands/start.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Launches the target process after setting a breakpoint at a convenient
5 entry point.
6 """
7 import argparse
8 import shlex
9 from shlex import quote
10
11 import gdb
12
13 import pwndbg.commands
14 import pwndbg.elf
15 import pwndbg.events
16 import pwndbg.symbol
17
18 break_on_first_instruction = False
19
20
21 @pwndbg.events.start
22 def on_start():
23 global break_on_first_instruction
24 if break_on_first_instruction:
25 spec = "*%#x" % (int(pwndbg.elf.entry()))
26 gdb.Breakpoint(spec, temporary=True)
27 break_on_first_instruction = False
28
29
30 parser = argparse.ArgumentParser(description="""
31 Set a breakpoint at a convenient location in the binary,
32 generally 'main', 'init', or the entry point.""")
33 parser.add_argument("args", nargs="*", type=str, default=None, help="The arguments to run the binary with.")
34 @pwndbg.commands.ArgparsedCommand(parser)
35 def start(args=None):
36 if args is None:
37 args = []
38 """
39 Set a breakpoint at a convenient location in the binary,
40 generally 'main', 'init', or the entry point.
41 """
42 run = 'run ' + ' '.join(args)
43
44 symbols = ["main",
45 "_main",
46 "start",
47 "_start",
48 "init",
49 "_init"]
50
51 for symbol in symbols:
52 address = pwndbg.symbol.address(symbol)
53
54 if not address:
55 continue
56
57 b = gdb.Breakpoint(symbol, temporary=True)
58 gdb.execute(run, from_tty=False, to_string=True)
59 return
60
61 # Try a breakpoint at the binary entry
62 entry(args)
63
64
65 parser = argparse.ArgumentParser(description="""
66 Set a breakpoint at the first instruction executed in
67 the target binary.
68 """)
69 parser.add_argument("args", nargs="*", type=str, default=None, help="The arguments to run the binary with.")
70 @pwndbg.commands.ArgparsedCommand(parser)
71 @pwndbg.commands.OnlyWithFile
72 def entry(args=None):
73 if args is None:
74 arg = []
75 """
76 Set a breakpoint at the first instruction executed in
77 the target binary.
78 """
79 global break_on_first_instruction
80 break_on_first_instruction = True
81 run = 'run ' + ' '.join(map(quote, args))
82 gdb.execute(run, from_tty=False)
83
```
Path: `pwndbg/symbol.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Looking up addresses for function names / symbols, and
5 vice-versa.
6
7 Uses IDA when available if there isn't sufficient symbol
8 information available.
9 """
10 import os
11 import re
12 import shutil
13 import tempfile
14
15 import elftools.common.exceptions
16 import elftools.elf.constants
17 import elftools.elf.elffile
18 import elftools.elf.segments
19 import gdb
20
21 import pwndbg.arch
22 import pwndbg.elf
23 import pwndbg.events
24 import pwndbg.file
25 import pwndbg.ida
26 import pwndbg.memoize
27 import pwndbg.memory
28 import pwndbg.qemu
29 import pwndbg.remote
30 import pwndbg.stack
31 import pwndbg.vmmap
32
33
34 def get_directory():
35 """
36 Retrieve the debug file directory path.
37
38 The debug file directory path ('show debug-file-directory') is a comma-
39 separated list of directories which GDB will look in to find the binaries
40 currently loaded.
41 """
42 result = gdb.execute('show debug-file-directory', to_string=True, from_tty=False)
43 expr = r'The directory where separate debug symbols are searched for is "(.*)".\n'
44
45 match = re.search(expr, result)
46
47 if match:
48 return match.group(1)
49 return ''
50
51 def set_directory(d):
52 gdb.execute('set debug-file-directory %s' % d, to_string=True, from_tty=False)
53
54 def add_directory(d):
55 current = get_directory()
56 if current:
57 set_directory('%s:%s' % (current, d))
58 else:
59 set_directory(d)
60
61 remote_files = {}
62 remote_files_dir = None
63
64 @pwndbg.events.exit
65 def reset_remote_files():
66 global remote_files
67 global remote_files_dir
68 remote_files = {}
69 if remote_files_dir is not None:
70 shutil.rmtree(remote_files_dir)
71 remote_files_dir = None
72
73 @pwndbg.events.new_objfile
74 def autofetch():
75 """
76 """
77 global remote_files_dir
78 if not pwndbg.remote.is_remote():
79 return
80
81 if pwndbg.qemu.is_qemu_usermode():
82 return
83
84 if pwndbg.android.is_android():
85 return
86
87 if not remote_files_dir:
88 remote_files_dir = tempfile.mkdtemp()
89 add_directory(remote_files_dir)
90
91 searchpath = get_directory()
92
93 for mapping in pwndbg.vmmap.get():
94 objfile = mapping.objfile
95
96 # Don't attempt to download things like '[stack]' and '[heap]'
97 if not objfile.startswith('/'):
98 continue
99
100 # Don't re-download things that we have already downloaded
101 if not objfile or objfile in remote_files:
102 continue
103
104 msg = "Downloading %r from the remote server" % objfile
105 print(msg, end='')
106
107 try:
108 data = pwndbg.file.get(objfile)
109 print('\r' + msg + ': OK')
110 except OSError:
111 # The file could not be downloaded :(
112 print('\r' + msg + ': Failed')
113 return
114
115 filename = os.path.basename(objfile)
116 local_path = os.path.join(remote_files_dir, filename)
117
118 with open(local_path, 'wb+') as f:
119 f.write(data)
120
121 remote_files[objfile] = local_path
122
123 base = None
124 for mapping in pwndbg.vmmap.get():
125 if mapping.objfile != objfile:
126 continue
127
128 if base is None or mapping.vaddr < base.vaddr:
129 base = mapping
130
131 if not base:
132 continue
133
134 base = base.vaddr
135
136 try:
137 elf = elftools.elf.elffile.ELFFile(open(local_path, 'rb'))
138 except elftools.common.exceptions.ELFError:
139 continue
140
141 gdb_command = ['add-symbol-file', local_path, hex(int(base))]
142 for section in elf.iter_sections():
143 name = section.name #.decode('latin-1')
144 section = section.header
145 if not section.sh_flags & elftools.elf.constants.SH_FLAGS.SHF_ALLOC:
146 continue
147 gdb_command += ['-s', name, hex(int(base + section.sh_addr))]
148
149 print(' '.join(gdb_command))
150 # gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True)
151
152 @pwndbg.memoize.reset_on_objfile
153 def get(address, gdb_only=False):
154 """
155 Retrieve the textual name for a symbol
156 """
157 # Fast path
158 if address < pwndbg.memory.MMAP_MIN_ADDR or address >= ((1 << 64)-1):
159 return ''
160
161 # Don't look up stack addresses
162 if pwndbg.stack.find(address):
163 return ''
164
165 # This sucks, but there's not a GDB API for this.
166 result = gdb.execute('info symbol %#x' % int(address), to_string=True, from_tty=False)
167
168 if not gdb_only and result.startswith('No symbol'):
169 address = int(address)
170 exe = pwndbg.elf.exe()
171 if exe:
172 exe_map = pwndbg.vmmap.find(exe.address)
173 if exe_map and address in exe_map:
174 res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)
175 return res or ''
176
177 # Expected format looks like this:
178 # main in section .text of /bin/bash
179 # main + 3 in section .text of /bin/bash
180 # system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6
181 # No symbol matches system-1.
182 a, b, c, _ = result.split(None, 3)
183
184
185 if b == '+':
186 return "%s+%s" % (a, c)
187 if b == 'in':
188 return a
189
190 return ''
191
192 @pwndbg.memoize.reset_on_objfile
193 def address(symbol):
194 if isinstance(symbol, int):
195 return symbol
196
197 try:
198 return int(symbol, 0)
199 except:
200 pass
201
202 try:
203 symbol_obj = gdb.lookup_symbol(symbol)[0]
204 if symbol_obj:
205 return int(symbol_obj.value().address)
206 except Exception:
207 pass
208
209 try:
210 result = gdb.execute('info address %s' % symbol, to_string=True, from_tty=False)
211 address = int(re.search('0x[0-9a-fA-F]+', result).group(), 0)
212
213 # The address found should lie in one of the memory maps
214 # There are cases when GDB shows offsets e.g.:
215 # pwndbg> info address tcache
216 # Symbol "tcache" is a thread-local variable at offset 0x40
217 # in the thread-local storage for `/lib/x86_64-linux-gnu/libc.so.6'.
218 if not pwndbg.vmmap.find(address):
219 return None
220
221 return address
222
223 except gdb.error:
224 return None
225
226 try:
227 address = pwndbg.ida.LocByName(symbol)
228 if address:
229 return address
230 except Exception:
231 pass
232
233 @pwndbg.events.stop
234 @pwndbg.memoize.reset_on_start
235 def add_main_exe_to_symbols():
236 if not pwndbg.remote.is_remote():
237 return
238
239 if pwndbg.android.is_android():
240 return
241
242 exe = pwndbg.elf.exe()
243
244 if not exe:
245 return
246
247 addr = exe.address
248
249 if not addr:
250 return
251
252 addr = int(addr)
253
254 mmap = pwndbg.vmmap.find(addr)
255 if not mmap:
256 return
257
258 path = mmap.objfile
259 if path and (pwndbg.arch.endian == pwndbg.arch.native_endian):
260 try:
261 gdb.execute('add-symbol-file %s %#x' % (path, addr), from_tty=False, to_string=True)
262 except gdb.error:
263 pass
264
265 if '/usr/lib/debug' not in get_directory():
266 set_directory(get_directory() + ':/usr/lib/debug')
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/commands/start.py b/pwndbg/commands/start.py
--- a/pwndbg/commands/start.py
+++ b/pwndbg/commands/start.py
@@ -49,7 +49,7 @@
"_init"]
for symbol in symbols:
- address = pwndbg.symbol.address(symbol)
+ address = pwndbg.symbol.address(symbol, allow_unmapped=True)
if not address:
continue
diff --git a/pwndbg/symbol.py b/pwndbg/symbol.py
--- a/pwndbg/symbol.py
+++ b/pwndbg/symbol.py
@@ -190,7 +190,7 @@
return ''
@pwndbg.memoize.reset_on_objfile
-def address(symbol):
+def address(symbol, allow_unmapped=False):
if isinstance(symbol, int):
return symbol
@@ -215,7 +215,7 @@
# pwndbg> info address tcache
# Symbol "tcache" is a thread-local variable at offset 0x40
# in the thread-local storage for `/lib/x86_64-linux-gnu/libc.so.6'.
- if not pwndbg.vmmap.find(address):
+ if not allow_unmapped and not pwndbg.vmmap.find(address):
return None
return address
| {"golden_diff": "diff --git a/pwndbg/commands/start.py b/pwndbg/commands/start.py\n--- a/pwndbg/commands/start.py\n+++ b/pwndbg/commands/start.py\n@@ -49,7 +49,7 @@\n \"_init\"]\n \n for symbol in symbols:\n- address = pwndbg.symbol.address(symbol)\n+ address = pwndbg.symbol.address(symbol, allow_unmapped=True)\n \n if not address:\n continue\ndiff --git a/pwndbg/symbol.py b/pwndbg/symbol.py\n--- a/pwndbg/symbol.py\n+++ b/pwndbg/symbol.py\n@@ -190,7 +190,7 @@\n return ''\n \n @pwndbg.memoize.reset_on_objfile\n-def address(symbol):\n+def address(symbol, allow_unmapped=False):\n if isinstance(symbol, int):\n return symbol\n \n@@ -215,7 +215,7 @@\n # pwndbg> info address tcache\n # Symbol \"tcache\" is a thread-local variable at offset 0x40\n # in the thread-local storage for `/lib/x86_64-linux-gnu/libc.so.6'.\n- if not pwndbg.vmmap.find(address):\n+ if not allow_unmapped and not pwndbg.vmmap.find(address):\n return None\n \n return address\n", "issue": "'start' command behavior has changed\n`git bisect` suggests this was introduced in 8f33ec480f47c038e0077cab67153f3ec3be9181\r\n\r\nPrior to this, when debugging programs built with `gcc` using the `-ggdb` flag, pwndbg would break at the program's `main()` function immediately after the `start` command was issued. Since 8f33ec480f47c038e0077cab67153f3ec3be9181 the `start` command breaks at `_start()` the 1st time it's issued, and must be issued a 2nd time before `main()` is reached, after which it consistently stops at `main()`. See below for an example.\r\n\r\nA simple program:\r\n```\r\n// main.c\r\n#include <stdio.h>\r\n\r\nvoid main(void) {\r\n printf(\"Hello World!\\n\");\r\n}\r\n```\r\n\r\nBuilt and run with:\r\n```\r\n$ gcc -ggdb main.c\r\n$ gdb a.out\r\npwndbg> set context-sections code\r\npwndbg> start\r\n```\r\n\r\nThe `set context-sections` command is used here to truncate output for the screenshots and isn't part of the bug.\r\nPreviously, `main()` was reached immediately:\r\n\r\n\r\nAfter 8f33ec480f47c038e0077cab67153f3ec3be9181 the `start` command breaks at `_start()` first, then it must be issued a 2nd time before `main()` is reached and the \"source\" panel appears. After this, every invocation of `start` will break at the `main()` function:\r\n\r\n\r\nPerhaps some folk would rather break earlier in their code when issuing the `start` command, but it's odd that when starting the program for the 2nd time a different result is obtained, since `start` tells GDB to run the program over from scratch.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nLaunches the target process after setting a breakpoint at a convenient\nentry point.\n\"\"\"\nimport argparse\nimport shlex\nfrom shlex import quote\n\nimport gdb\n\nimport pwndbg.commands\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.symbol\n\nbreak_on_first_instruction = False\n\n\[email protected]\ndef on_start():\n global break_on_first_instruction\n if break_on_first_instruction:\n spec = \"*%#x\" % (int(pwndbg.elf.entry()))\n gdb.Breakpoint(spec, temporary=True)\n break_on_first_instruction = False\n\n\nparser = argparse.ArgumentParser(description=\"\"\"\n Set a breakpoint at a convenient location in the binary,\n generally 'main', 'init', or the entry point.\"\"\")\nparser.add_argument(\"args\", nargs=\"*\", type=str, default=None, help=\"The arguments to run the binary with.\")\[email protected](parser)\ndef start(args=None):\n if args is None:\n args = []\n \"\"\"\n Set a breakpoint at a convenient location in the binary,\n generally 'main', 'init', or the entry point.\n \"\"\"\n run = 'run ' + ' '.join(args)\n\n symbols = [\"main\",\n \"_main\",\n \"start\",\n \"_start\",\n \"init\",\n \"_init\"]\n\n for symbol in symbols:\n address = pwndbg.symbol.address(symbol)\n\n if not address:\n continue\n\n b = gdb.Breakpoint(symbol, temporary=True)\n gdb.execute(run, from_tty=False, to_string=True)\n return\n\n # Try a breakpoint at the binary entry\n entry(args)\n\n\nparser = argparse.ArgumentParser(description=\"\"\"\n Set a breakpoint at the first instruction executed in\n the target binary.\n \"\"\")\nparser.add_argument(\"args\", nargs=\"*\", type=str, default=None, help=\"The arguments to run the binary with.\")\[email protected](parser)\[email protected]\ndef entry(args=None):\n if args is None:\n arg = []\n \"\"\"\n Set a breakpoint at the first instruction executed in\n the target binary.\n \"\"\"\n global break_on_first_instruction\n break_on_first_instruction = True\n run = 'run ' + ' '.join(map(quote, args))\n gdb.execute(run, from_tty=False)\n", "path": "pwndbg/commands/start.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nLooking up addresses for function names / symbols, and\nvice-versa.\n\nUses IDA when available if there isn't sufficient symbol\ninformation available.\n\"\"\"\nimport os\nimport re\nimport shutil\nimport tempfile\n\nimport elftools.common.exceptions\nimport elftools.elf.constants\nimport elftools.elf.elffile\nimport elftools.elf.segments\nimport gdb\n\nimport pwndbg.arch\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.file\nimport pwndbg.ida\nimport pwndbg.memoize\nimport pwndbg.memory\nimport pwndbg.qemu\nimport pwndbg.remote\nimport pwndbg.stack\nimport pwndbg.vmmap\n\n\ndef get_directory():\n \"\"\"\n Retrieve the debug file directory path.\n\n The debug file directory path ('show debug-file-directory') is a comma-\n separated list of directories which GDB will look in to find the binaries\n currently loaded.\n \"\"\"\n result = gdb.execute('show debug-file-directory', to_string=True, from_tty=False)\n expr = r'The directory where separate debug symbols are searched for is \"(.*)\".\\n'\n\n match = re.search(expr, result)\n\n if match:\n return match.group(1)\n return ''\n\ndef set_directory(d):\n gdb.execute('set debug-file-directory %s' % d, to_string=True, from_tty=False)\n\ndef add_directory(d):\n current = get_directory()\n if current:\n set_directory('%s:%s' % (current, d))\n else:\n set_directory(d)\n\nremote_files = {}\nremote_files_dir = None\n\[email protected]\ndef reset_remote_files():\n global remote_files\n global remote_files_dir\n remote_files = {}\n if remote_files_dir is not None:\n shutil.rmtree(remote_files_dir)\n remote_files_dir = None\n\[email protected]_objfile\ndef autofetch():\n \"\"\"\n \"\"\"\n global remote_files_dir\n if not pwndbg.remote.is_remote():\n return\n\n if pwndbg.qemu.is_qemu_usermode():\n return\n\n if pwndbg.android.is_android():\n return\n\n if not remote_files_dir:\n remote_files_dir = tempfile.mkdtemp()\n add_directory(remote_files_dir)\n\n searchpath = get_directory()\n\n for mapping in pwndbg.vmmap.get():\n objfile = mapping.objfile\n\n # Don't attempt to download things like '[stack]' and '[heap]'\n if not objfile.startswith('/'):\n continue\n\n # Don't re-download things that we have already downloaded\n if not objfile or objfile in remote_files:\n continue\n\n msg = \"Downloading %r from the remote server\" % objfile\n print(msg, end='')\n\n try:\n data = pwndbg.file.get(objfile)\n print('\\r' + msg + ': OK')\n except OSError:\n # The file could not be downloaded :(\n print('\\r' + msg + ': Failed')\n return\n\n filename = os.path.basename(objfile)\n local_path = os.path.join(remote_files_dir, filename)\n\n with open(local_path, 'wb+') as f:\n f.write(data)\n\n remote_files[objfile] = local_path\n\n base = None\n for mapping in pwndbg.vmmap.get():\n if mapping.objfile != objfile:\n continue\n\n if base is None or mapping.vaddr < base.vaddr:\n base = mapping\n\n if not base:\n continue\n\n base = base.vaddr\n\n try:\n elf = elftools.elf.elffile.ELFFile(open(local_path, 'rb'))\n except elftools.common.exceptions.ELFError:\n continue\n\n gdb_command = ['add-symbol-file', local_path, hex(int(base))]\n for section in elf.iter_sections():\n name = section.name #.decode('latin-1')\n section = section.header\n if not section.sh_flags & elftools.elf.constants.SH_FLAGS.SHF_ALLOC:\n continue\n gdb_command += ['-s', name, hex(int(base + section.sh_addr))]\n\n print(' '.join(gdb_command))\n # gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True)\n\[email protected]_on_objfile\ndef get(address, gdb_only=False):\n \"\"\"\n Retrieve the textual name for a symbol\n \"\"\"\n # Fast path\n if address < pwndbg.memory.MMAP_MIN_ADDR or address >= ((1 << 64)-1):\n return ''\n\n # Don't look up stack addresses\n if pwndbg.stack.find(address):\n return ''\n\n # This sucks, but there's not a GDB API for this.\n result = gdb.execute('info symbol %#x' % int(address), to_string=True, from_tty=False)\n\n if not gdb_only and result.startswith('No symbol'):\n address = int(address)\n exe = pwndbg.elf.exe()\n if exe:\n exe_map = pwndbg.vmmap.find(exe.address)\n if exe_map and address in exe_map:\n res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)\n return res or ''\n\n # Expected format looks like this:\n # main in section .text of /bin/bash\n # main + 3 in section .text of /bin/bash\n # system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6\n # No symbol matches system-1.\n a, b, c, _ = result.split(None, 3)\n\n\n if b == '+':\n return \"%s+%s\" % (a, c)\n if b == 'in':\n return a\n\n return ''\n\[email protected]_on_objfile\ndef address(symbol):\n if isinstance(symbol, int):\n return symbol\n\n try:\n return int(symbol, 0)\n except:\n pass\n\n try:\n symbol_obj = gdb.lookup_symbol(symbol)[0]\n if symbol_obj:\n return int(symbol_obj.value().address)\n except Exception:\n pass\n\n try:\n result = gdb.execute('info address %s' % symbol, to_string=True, from_tty=False)\n address = int(re.search('0x[0-9a-fA-F]+', result).group(), 0)\n\n # The address found should lie in one of the memory maps\n # There are cases when GDB shows offsets e.g.:\n # pwndbg> info address tcache\n # Symbol \"tcache\" is a thread-local variable at offset 0x40\n # in the thread-local storage for `/lib/x86_64-linux-gnu/libc.so.6'.\n if not pwndbg.vmmap.find(address):\n return None\n\n return address\n\n except gdb.error:\n return None\n\n try:\n address = pwndbg.ida.LocByName(symbol)\n if address:\n return address\n except Exception:\n pass\n\[email protected]\[email protected]_on_start\ndef add_main_exe_to_symbols():\n if not pwndbg.remote.is_remote():\n return\n\n if pwndbg.android.is_android():\n return\n\n exe = pwndbg.elf.exe()\n\n if not exe:\n return\n\n addr = exe.address\n\n if not addr:\n return\n\n addr = int(addr)\n\n mmap = pwndbg.vmmap.find(addr)\n if not mmap:\n return\n\n path = mmap.objfile\n if path and (pwndbg.arch.endian == pwndbg.arch.native_endian):\n try:\n gdb.execute('add-symbol-file %s %#x' % (path, addr), from_tty=False, to_string=True)\n except gdb.error:\n pass\n\nif '/usr/lib/debug' not in get_directory():\n set_directory(get_directory() + ':/usr/lib/debug')\n", "path": "pwndbg/symbol.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nLaunches the target process after setting a breakpoint at a convenient\nentry point.\n\"\"\"\nimport argparse\nimport shlex\nfrom shlex import quote\n\nimport gdb\n\nimport pwndbg.commands\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.symbol\n\nbreak_on_first_instruction = False\n\n\[email protected]\ndef on_start():\n global break_on_first_instruction\n if break_on_first_instruction:\n spec = \"*%#x\" % (int(pwndbg.elf.entry()))\n gdb.Breakpoint(spec, temporary=True)\n break_on_first_instruction = False\n\n\nparser = argparse.ArgumentParser(description=\"\"\"\n Set a breakpoint at a convenient location in the binary,\n generally 'main', 'init', or the entry point.\"\"\")\nparser.add_argument(\"args\", nargs=\"*\", type=str, default=None, help=\"The arguments to run the binary with.\")\[email protected](parser)\ndef start(args=None):\n if args is None:\n args = []\n \"\"\"\n Set a breakpoint at a convenient location in the binary,\n generally 'main', 'init', or the entry point.\n \"\"\"\n run = 'run ' + ' '.join(args)\n\n symbols = [\"main\",\n \"_main\",\n \"start\",\n \"_start\",\n \"init\",\n \"_init\"]\n\n for symbol in symbols:\n address = pwndbg.symbol.address(symbol, allow_unmapped=True)\n\n if not address:\n continue\n\n b = gdb.Breakpoint(symbol, temporary=True)\n gdb.execute(run, from_tty=False, to_string=True)\n return\n\n # Try a breakpoint at the binary entry\n entry(args)\n\n\nparser = argparse.ArgumentParser(description=\"\"\"\n Set a breakpoint at the first instruction executed in\n the target binary.\n \"\"\")\nparser.add_argument(\"args\", nargs=\"*\", type=str, default=None, help=\"The arguments to run the binary with.\")\[email protected](parser)\[email protected]\ndef entry(args=None):\n if args is None:\n arg = []\n \"\"\"\n Set a breakpoint at the first instruction executed in\n the target binary.\n \"\"\"\n global break_on_first_instruction\n break_on_first_instruction = True\n run = 'run ' + ' '.join(map(quote, args))\n gdb.execute(run, from_tty=False)\n", "path": "pwndbg/commands/start.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nLooking up addresses for function names / symbols, and\nvice-versa.\n\nUses IDA when available if there isn't sufficient symbol\ninformation available.\n\"\"\"\nimport os\nimport re\nimport shutil\nimport tempfile\n\nimport elftools.common.exceptions\nimport elftools.elf.constants\nimport elftools.elf.elffile\nimport elftools.elf.segments\nimport gdb\n\nimport pwndbg.arch\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.file\nimport pwndbg.ida\nimport pwndbg.memoize\nimport pwndbg.memory\nimport pwndbg.qemu\nimport pwndbg.remote\nimport pwndbg.stack\nimport pwndbg.vmmap\n\n\ndef get_directory():\n \"\"\"\n Retrieve the debug file directory path.\n\n The debug file directory path ('show debug-file-directory') is a comma-\n separated list of directories which GDB will look in to find the binaries\n currently loaded.\n \"\"\"\n result = gdb.execute('show debug-file-directory', to_string=True, from_tty=False)\n expr = r'The directory where separate debug symbols are searched for is \"(.*)\".\\n'\n\n match = re.search(expr, result)\n\n if match:\n return match.group(1)\n return ''\n\ndef set_directory(d):\n gdb.execute('set debug-file-directory %s' % d, to_string=True, from_tty=False)\n\ndef add_directory(d):\n current = get_directory()\n if current:\n set_directory('%s:%s' % (current, d))\n else:\n set_directory(d)\n\nremote_files = {}\nremote_files_dir = None\n\[email protected]\ndef reset_remote_files():\n global remote_files\n global remote_files_dir\n remote_files = {}\n if remote_files_dir is not None:\n shutil.rmtree(remote_files_dir)\n remote_files_dir = None\n\[email protected]_objfile\ndef autofetch():\n \"\"\"\n \"\"\"\n global remote_files_dir\n if not pwndbg.remote.is_remote():\n return\n\n if pwndbg.qemu.is_qemu_usermode():\n return\n\n if pwndbg.android.is_android():\n return\n\n if not remote_files_dir:\n remote_files_dir = tempfile.mkdtemp()\n add_directory(remote_files_dir)\n\n searchpath = get_directory()\n\n for mapping in pwndbg.vmmap.get():\n objfile = mapping.objfile\n\n # Don't attempt to download things like '[stack]' and '[heap]'\n if not objfile.startswith('/'):\n continue\n\n # Don't re-download things that we have already downloaded\n if not objfile or objfile in remote_files:\n continue\n\n msg = \"Downloading %r from the remote server\" % objfile\n print(msg, end='')\n\n try:\n data = pwndbg.file.get(objfile)\n print('\\r' + msg + ': OK')\n except OSError:\n # The file could not be downloaded :(\n print('\\r' + msg + ': Failed')\n return\n\n filename = os.path.basename(objfile)\n local_path = os.path.join(remote_files_dir, filename)\n\n with open(local_path, 'wb+') as f:\n f.write(data)\n\n remote_files[objfile] = local_path\n\n base = None\n for mapping in pwndbg.vmmap.get():\n if mapping.objfile != objfile:\n continue\n\n if base is None or mapping.vaddr < base.vaddr:\n base = mapping\n\n if not base:\n continue\n\n base = base.vaddr\n\n try:\n elf = elftools.elf.elffile.ELFFile(open(local_path, 'rb'))\n except elftools.common.exceptions.ELFError:\n continue\n\n gdb_command = ['add-symbol-file', local_path, hex(int(base))]\n for section in elf.iter_sections():\n name = section.name #.decode('latin-1')\n section = section.header\n if not section.sh_flags & elftools.elf.constants.SH_FLAGS.SHF_ALLOC:\n continue\n gdb_command += ['-s', name, hex(int(base + section.sh_addr))]\n\n print(' '.join(gdb_command))\n # gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True)\n\[email protected]_on_objfile\ndef get(address, gdb_only=False):\n \"\"\"\n Retrieve the textual name for a symbol\n \"\"\"\n # Fast path\n if address < pwndbg.memory.MMAP_MIN_ADDR or address >= ((1 << 64)-1):\n return ''\n\n # Don't look up stack addresses\n if pwndbg.stack.find(address):\n return ''\n\n # This sucks, but there's not a GDB API for this.\n result = gdb.execute('info symbol %#x' % int(address), to_string=True, from_tty=False)\n\n if not gdb_only and result.startswith('No symbol'):\n address = int(address)\n exe = pwndbg.elf.exe()\n if exe:\n exe_map = pwndbg.vmmap.find(exe.address)\n if exe_map and address in exe_map:\n res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)\n return res or ''\n\n # Expected format looks like this:\n # main in section .text of /bin/bash\n # main + 3 in section .text of /bin/bash\n # system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6\n # No symbol matches system-1.\n a, b, c, _ = result.split(None, 3)\n\n\n if b == '+':\n return \"%s+%s\" % (a, c)\n if b == 'in':\n return a\n\n return ''\n\[email protected]_on_objfile\ndef address(symbol, allow_unmapped=False):\n if isinstance(symbol, int):\n return symbol\n\n try:\n return int(symbol, 0)\n except:\n pass\n\n try:\n symbol_obj = gdb.lookup_symbol(symbol)[0]\n if symbol_obj:\n return int(symbol_obj.value().address)\n except Exception:\n pass\n\n try:\n result = gdb.execute('info address %s' % symbol, to_string=True, from_tty=False)\n address = int(re.search('0x[0-9a-fA-F]+', result).group(), 0)\n\n # The address found should lie in one of the memory maps\n # There are cases when GDB shows offsets e.g.:\n # pwndbg> info address tcache\n # Symbol \"tcache\" is a thread-local variable at offset 0x40\n # in the thread-local storage for `/lib/x86_64-linux-gnu/libc.so.6'.\n if not allow_unmapped and not pwndbg.vmmap.find(address):\n return None\n\n return address\n\n except gdb.error:\n return None\n\n try:\n address = pwndbg.ida.LocByName(symbol)\n if address:\n return address\n except Exception:\n pass\n\[email protected]\[email protected]_on_start\ndef add_main_exe_to_symbols():\n if not pwndbg.remote.is_remote():\n return\n\n if pwndbg.android.is_android():\n return\n\n exe = pwndbg.elf.exe()\n\n if not exe:\n return\n\n addr = exe.address\n\n if not addr:\n return\n\n addr = int(addr)\n\n mmap = pwndbg.vmmap.find(addr)\n if not mmap:\n return\n\n path = mmap.objfile\n if path and (pwndbg.arch.endian == pwndbg.arch.native_endian):\n try:\n gdb.execute('add-symbol-file %s %#x' % (path, addr), from_tty=False, to_string=True)\n except gdb.error:\n pass\n\nif '/usr/lib/debug' not in get_directory():\n set_directory(get_directory() + ':/usr/lib/debug')\n", "path": "pwndbg/symbol.py"}]} | 3,970 | 295 |
gh_patches_debug_44697 | rasdani/github-patches | git_diff | ephios-dev__ephios-1264 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Filter working hours by type
As a planner, I want to filter the working hours by event type or manual working hours.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ephios/core/views/workinghour.py`
Content:
```
1 import datetime
2 from collections import Counter
3 from datetime import date
4 from itertools import chain
5 from typing import Optional
6
7 from django import forms
8 from django.contrib import messages
9 from django.contrib.auth.mixins import LoginRequiredMixin
10 from django.contrib.auth.models import Group
11 from django.contrib.messages.views import SuccessMessageMixin
12 from django.db.models import DurationField, ExpressionWrapper, F, Sum
13 from django.shortcuts import get_object_or_404, redirect
14 from django.urls import reverse
15 from django.utils.functional import cached_property
16 from django.utils.translation import gettext as _
17 from django.views.generic import DeleteView, DetailView, FormView, TemplateView, UpdateView
18 from guardian.shortcuts import get_objects_for_user
19
20 from ephios.core.forms.users import WorkingHourRequestForm
21 from ephios.core.models import LocalParticipation, UserProfile, WorkingHours
22 from ephios.extra.mixins import CustomPermissionRequiredMixin
23 from ephios.extra.widgets import CustomDateInput
24
25
26 class CanGrantMixin:
27 @cached_property
28 def can_grant(self):
29 """
30 Return whether the current request user can grant
31 working hours to the target user (which might be themselves).
32 """
33 if self.request.user.is_anonymous:
34 return False
35 can_grant_for_groups = get_objects_for_user(
36 self.request.user, "decide_workinghours_for_group", klass=Group
37 )
38 return self._get_target_user().groups.filter(id__in=can_grant_for_groups).exists()
39
40 def _get_target_user(self):
41 """Return the user whose working hours are being managed."""
42 return get_object_or_404(UserProfile, pk=self.kwargs["pk"])
43
44
45 class DateFilterForm(forms.Form):
46 start = forms.DateField(required=False, label=_("From"), widget=CustomDateInput)
47 end = forms.DateField(required=False, label=_("To"), widget=CustomDateInput)
48
49
50 class WorkingHourOverview(CustomPermissionRequiredMixin, TemplateView):
51 template_name = "core/workinghours_list.html"
52 permission_required = "core.view_userprofile"
53
54 def _get_working_hours_stats(self, start: Optional[date], end: Optional[date]):
55 participations = (
56 LocalParticipation.objects.filter(
57 state=LocalParticipation.States.CONFIRMED,
58 start_time__date__gte=start,
59 end_time__date__lte=end,
60 )
61 .annotate(
62 duration=ExpressionWrapper(
63 (F("end_time") - F("start_time")),
64 output_field=DurationField(),
65 ),
66 )
67 .annotate(hour_sum=Sum("duration"))
68 .values_list("user__pk", "user__display_name", "hour_sum")
69 )
70 workinghours = (
71 WorkingHours.objects.filter(date__gte=start, date__lte=end)
72 .annotate(hour_sum=Sum("hours"))
73 .values_list("user__pk", "user__display_name", "hour_sum")
74 )
75 result = {}
76 c = Counter()
77 for user_pk, display_name, hours in chain(participations, workinghours):
78 current_sum = (
79 hours.total_seconds() / (60 * 60)
80 if isinstance(hours, datetime.timedelta)
81 else float(hours)
82 )
83 c[user_pk] += current_sum
84 result[user_pk] = {
85 "pk": user_pk,
86 "display_name": display_name,
87 "hours": c[user_pk],
88 }
89 return sorted(result.values(), key=lambda x: x["hours"], reverse=True)
90
91 def get_context_data(self, **kwargs):
92 filter_form = DateFilterForm(
93 self.request.GET
94 or {
95 # intitial data for initial page laod
96 # (does not use `initial` cause that only works with unbound forms)
97 "start": date.today().replace(month=1, day=1),
98 "end": date.today().replace(month=12, day=31),
99 }
100 )
101 filter_form.is_valid()
102 kwargs["filter_form"] = filter_form
103 kwargs["users"] = self._get_working_hours_stats(
104 start=filter_form.cleaned_data.get("start") or date.min, # start/end are not required
105 end=filter_form.cleaned_data.get("end") or date.max,
106 )
107 kwargs["can_grant_for"] = set(
108 get_objects_for_user(self.request.user, "decide_workinghours_for_group", klass=Group)
109 )
110 kwargs["groups_by_user"] = {
111 profile.pk: set(profile.groups.all())
112 for profile in UserProfile.objects.all().prefetch_related("groups")
113 }
114 return super().get_context_data(**kwargs)
115
116
117 class OwnWorkingHourView(LoginRequiredMixin, DetailView):
118 template_name = "core/userprofile_workinghours.html"
119
120 def get_object(self, queryset=None):
121 return self.request.user
122
123 def get_context_data(self, **kwargs):
124 kwargs["own_profile"] = True
125 grant_ids = get_objects_for_user(
126 self.request.user, "decide_workinghours_for_group", klass=Group
127 ).values_list("id", flat=True)
128 kwargs["can_grant"] = self.request.user.groups.filter(id__in=grant_ids).exists()
129 return super().get_context_data(**kwargs)
130
131
132 class UserProfileWorkingHourView(CanGrantMixin, CustomPermissionRequiredMixin, DetailView):
133 model = UserProfile
134 permission_required = "core.view_userprofile"
135 template_name = "core/userprofile_workinghours.html"
136
137 def get_context_data(self, **kwargs):
138 kwargs["can_grant"] = self.can_grant
139 return super().get_context_data(**kwargs)
140
141
142 class WorkingHourRequestView(LoginRequiredMixin, FormView):
143 form_class = WorkingHourRequestForm
144 template_name = "core/workinghours_form.html"
145
146 def get_form_kwargs(self):
147 kwargs = super().get_form_kwargs()
148 kwargs["user"] = self.request.user
149 return kwargs
150
151 def form_valid(self, form):
152 form.create_consequence()
153 messages.success(self.request, _("Your request has been submitted."))
154 return redirect(reverse("core:workinghours_own"))
155
156
157 class WorkingHourCreateView(CanGrantMixin, CustomPermissionRequiredMixin, WorkingHourRequestView):
158 def has_permission(self):
159 return self.can_grant
160
161 def get_form_kwargs(self):
162 kwargs = super().get_form_kwargs()
163 kwargs["can_grant"] = True
164 kwargs["user"] = self._get_target_user()
165 return kwargs
166
167 def form_valid(self, form):
168 workinghour = form.save(commit=False)
169 workinghour.user = form.user
170 workinghour.save()
171 messages.success(self.request, _("Working hours have been added."))
172 return redirect(reverse("core:workinghours_list"))
173
174
175 class WorkingHourUpdateView(CanGrantMixin, CustomPermissionRequiredMixin, UpdateView):
176 model = WorkingHours
177 form_class = WorkingHourRequestForm
178
179 def has_permission(self):
180 return self.can_grant
181
182 def get_success_url(self):
183 return reverse("core:workinghours_detail", kwargs={"pk": self.object.user.pk})
184
185 def get_form_kwargs(self):
186 kwargs = super().get_form_kwargs()
187 kwargs["user"] = self.object.user
188 kwargs["can_grant"] = True
189 return kwargs
190
191 def _get_target_user(self):
192 return get_object_or_404(WorkingHours, pk=self.kwargs["pk"]).user
193
194
195 class WorkingHourDeleteView(
196 CanGrantMixin, CustomPermissionRequiredMixin, SuccessMessageMixin, DeleteView
197 ):
198 permission_required = "core.decide_workinghours_for_group"
199 model = WorkingHours
200 success_message = _("Working hours have been deleted.")
201
202 def _get_target_user(self):
203 return get_object_or_404(WorkingHours, pk=self.kwargs["pk"]).user
204
205 def has_permission(self):
206 return self.can_grant
207
208 def get_success_url(self):
209 return reverse("core:workinghours_detail", kwargs={"pk": self.object.user.pk})
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ephios/core/views/workinghour.py b/ephios/core/views/workinghour.py
--- a/ephios/core/views/workinghour.py
+++ b/ephios/core/views/workinghour.py
@@ -15,10 +15,11 @@
from django.utils.functional import cached_property
from django.utils.translation import gettext as _
from django.views.generic import DeleteView, DetailView, FormView, TemplateView, UpdateView
+from django_select2.forms import Select2Widget
from guardian.shortcuts import get_objects_for_user
from ephios.core.forms.users import WorkingHourRequestForm
-from ephios.core.models import LocalParticipation, UserProfile, WorkingHours
+from ephios.core.models import EventType, LocalParticipation, UserProfile, WorkingHours
from ephios.extra.mixins import CustomPermissionRequiredMixin
from ephios.extra.widgets import CustomDateInput
@@ -42,23 +43,43 @@
return get_object_or_404(UserProfile, pk=self.kwargs["pk"])
-class DateFilterForm(forms.Form):
+class WorkingHourFilterForm(forms.Form):
start = forms.DateField(required=False, label=_("From"), widget=CustomDateInput)
end = forms.DateField(required=False, label=_("To"), widget=CustomDateInput)
+ type = forms.ModelChoiceField(
+ label=_("Event type"),
+ queryset=EventType.objects.all(),
+ required=False,
+ widget=Select2Widget(
+ attrs={
+ "data-placeholder": _("Event type"),
+ "classes": "w-auto",
+ }
+ ),
+ )
class WorkingHourOverview(CustomPermissionRequiredMixin, TemplateView):
template_name = "core/workinghours_list.html"
permission_required = "core.view_userprofile"
- def _get_working_hours_stats(self, start: Optional[date], end: Optional[date]):
- participations = (
- LocalParticipation.objects.filter(
- state=LocalParticipation.States.CONFIRMED,
- start_time__date__gte=start,
- end_time__date__lte=end,
+ def _get_working_hours_stats(self, start: date, end: date, eventtype: Optional[EventType]):
+ participations = LocalParticipation.objects.filter(
+ state=LocalParticipation.States.CONFIRMED,
+ start_time__date__gte=start,
+ end_time__date__lte=end,
+ )
+ workinghours = {}
+ if eventtype is not None:
+ participations = participations.filter(shift__event__type=eventtype)
+ else:
+ workinghours = (
+ WorkingHours.objects.filter(date__gte=start, date__lte=end)
+ .annotate(hour_sum=Sum("hours"))
+ .values_list("user__pk", "user__display_name", "hour_sum")
)
- .annotate(
+ participations = (
+ participations.annotate(
duration=ExpressionWrapper(
(F("end_time") - F("start_time")),
output_field=DurationField(),
@@ -67,11 +88,7 @@
.annotate(hour_sum=Sum("duration"))
.values_list("user__pk", "user__display_name", "hour_sum")
)
- workinghours = (
- WorkingHours.objects.filter(date__gte=start, date__lte=end)
- .annotate(hour_sum=Sum("hours"))
- .values_list("user__pk", "user__display_name", "hour_sum")
- )
+
result = {}
c = Counter()
for user_pk, display_name, hours in chain(participations, workinghours):
@@ -89,7 +106,7 @@
return sorted(result.values(), key=lambda x: x["hours"], reverse=True)
def get_context_data(self, **kwargs):
- filter_form = DateFilterForm(
+ filter_form = WorkingHourFilterForm(
self.request.GET
or {
# intitial data for initial page laod
@@ -103,6 +120,7 @@
kwargs["users"] = self._get_working_hours_stats(
start=filter_form.cleaned_data.get("start") or date.min, # start/end are not required
end=filter_form.cleaned_data.get("end") or date.max,
+ eventtype=filter_form.cleaned_data.get("type"),
)
kwargs["can_grant_for"] = set(
get_objects_for_user(self.request.user, "decide_workinghours_for_group", klass=Group)
| {"golden_diff": "diff --git a/ephios/core/views/workinghour.py b/ephios/core/views/workinghour.py\n--- a/ephios/core/views/workinghour.py\n+++ b/ephios/core/views/workinghour.py\n@@ -15,10 +15,11 @@\n from django.utils.functional import cached_property\n from django.utils.translation import gettext as _\n from django.views.generic import DeleteView, DetailView, FormView, TemplateView, UpdateView\n+from django_select2.forms import Select2Widget\n from guardian.shortcuts import get_objects_for_user\n \n from ephios.core.forms.users import WorkingHourRequestForm\n-from ephios.core.models import LocalParticipation, UserProfile, WorkingHours\n+from ephios.core.models import EventType, LocalParticipation, UserProfile, WorkingHours\n from ephios.extra.mixins import CustomPermissionRequiredMixin\n from ephios.extra.widgets import CustomDateInput\n \n@@ -42,23 +43,43 @@\n return get_object_or_404(UserProfile, pk=self.kwargs[\"pk\"])\n \n \n-class DateFilterForm(forms.Form):\n+class WorkingHourFilterForm(forms.Form):\n start = forms.DateField(required=False, label=_(\"From\"), widget=CustomDateInput)\n end = forms.DateField(required=False, label=_(\"To\"), widget=CustomDateInput)\n+ type = forms.ModelChoiceField(\n+ label=_(\"Event type\"),\n+ queryset=EventType.objects.all(),\n+ required=False,\n+ widget=Select2Widget(\n+ attrs={\n+ \"data-placeholder\": _(\"Event type\"),\n+ \"classes\": \"w-auto\",\n+ }\n+ ),\n+ )\n \n \n class WorkingHourOverview(CustomPermissionRequiredMixin, TemplateView):\n template_name = \"core/workinghours_list.html\"\n permission_required = \"core.view_userprofile\"\n \n- def _get_working_hours_stats(self, start: Optional[date], end: Optional[date]):\n- participations = (\n- LocalParticipation.objects.filter(\n- state=LocalParticipation.States.CONFIRMED,\n- start_time__date__gte=start,\n- end_time__date__lte=end,\n+ def _get_working_hours_stats(self, start: date, end: date, eventtype: Optional[EventType]):\n+ participations = LocalParticipation.objects.filter(\n+ state=LocalParticipation.States.CONFIRMED,\n+ start_time__date__gte=start,\n+ end_time__date__lte=end,\n+ )\n+ workinghours = {}\n+ if eventtype is not None:\n+ participations = participations.filter(shift__event__type=eventtype)\n+ else:\n+ workinghours = (\n+ WorkingHours.objects.filter(date__gte=start, date__lte=end)\n+ .annotate(hour_sum=Sum(\"hours\"))\n+ .values_list(\"user__pk\", \"user__display_name\", \"hour_sum\")\n )\n- .annotate(\n+ participations = (\n+ participations.annotate(\n duration=ExpressionWrapper(\n (F(\"end_time\") - F(\"start_time\")),\n output_field=DurationField(),\n@@ -67,11 +88,7 @@\n .annotate(hour_sum=Sum(\"duration\"))\n .values_list(\"user__pk\", \"user__display_name\", \"hour_sum\")\n )\n- workinghours = (\n- WorkingHours.objects.filter(date__gte=start, date__lte=end)\n- .annotate(hour_sum=Sum(\"hours\"))\n- .values_list(\"user__pk\", \"user__display_name\", \"hour_sum\")\n- )\n+\n result = {}\n c = Counter()\n for user_pk, display_name, hours in chain(participations, workinghours):\n@@ -89,7 +106,7 @@\n return sorted(result.values(), key=lambda x: x[\"hours\"], reverse=True)\n \n def get_context_data(self, **kwargs):\n- filter_form = DateFilterForm(\n+ filter_form = WorkingHourFilterForm(\n self.request.GET\n or {\n # intitial data for initial page laod\n@@ -103,6 +120,7 @@\n kwargs[\"users\"] = self._get_working_hours_stats(\n start=filter_form.cleaned_data.get(\"start\") or date.min, # start/end are not required\n end=filter_form.cleaned_data.get(\"end\") or date.max,\n+ eventtype=filter_form.cleaned_data.get(\"type\"),\n )\n kwargs[\"can_grant_for\"] = set(\n get_objects_for_user(self.request.user, \"decide_workinghours_for_group\", klass=Group)\n", "issue": "Filter working hours by type\nAs a planner, I want to filter the working hours by event type or manual working hours.\n", "before_files": [{"content": "import datetime\nfrom collections import Counter\nfrom datetime import date\nfrom itertools import chain\nfrom typing import Optional\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import Group\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.db.models import DurationField, ExpressionWrapper, F, Sum\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext as _\nfrom django.views.generic import DeleteView, DetailView, FormView, TemplateView, UpdateView\nfrom guardian.shortcuts import get_objects_for_user\n\nfrom ephios.core.forms.users import WorkingHourRequestForm\nfrom ephios.core.models import LocalParticipation, UserProfile, WorkingHours\nfrom ephios.extra.mixins import CustomPermissionRequiredMixin\nfrom ephios.extra.widgets import CustomDateInput\n\n\nclass CanGrantMixin:\n @cached_property\n def can_grant(self):\n \"\"\"\n Return whether the current request user can grant\n working hours to the target user (which might be themselves).\n \"\"\"\n if self.request.user.is_anonymous:\n return False\n can_grant_for_groups = get_objects_for_user(\n self.request.user, \"decide_workinghours_for_group\", klass=Group\n )\n return self._get_target_user().groups.filter(id__in=can_grant_for_groups).exists()\n\n def _get_target_user(self):\n \"\"\"Return the user whose working hours are being managed.\"\"\"\n return get_object_or_404(UserProfile, pk=self.kwargs[\"pk\"])\n\n\nclass DateFilterForm(forms.Form):\n start = forms.DateField(required=False, label=_(\"From\"), widget=CustomDateInput)\n end = forms.DateField(required=False, label=_(\"To\"), widget=CustomDateInput)\n\n\nclass WorkingHourOverview(CustomPermissionRequiredMixin, TemplateView):\n template_name = \"core/workinghours_list.html\"\n permission_required = \"core.view_userprofile\"\n\n def _get_working_hours_stats(self, start: Optional[date], end: Optional[date]):\n participations = (\n LocalParticipation.objects.filter(\n state=LocalParticipation.States.CONFIRMED,\n start_time__date__gte=start,\n end_time__date__lte=end,\n )\n .annotate(\n duration=ExpressionWrapper(\n (F(\"end_time\") - F(\"start_time\")),\n output_field=DurationField(),\n ),\n )\n .annotate(hour_sum=Sum(\"duration\"))\n .values_list(\"user__pk\", \"user__display_name\", \"hour_sum\")\n )\n workinghours = (\n WorkingHours.objects.filter(date__gte=start, date__lte=end)\n .annotate(hour_sum=Sum(\"hours\"))\n .values_list(\"user__pk\", \"user__display_name\", \"hour_sum\")\n )\n result = {}\n c = Counter()\n for user_pk, display_name, hours in chain(participations, workinghours):\n current_sum = (\n hours.total_seconds() / (60 * 60)\n if isinstance(hours, datetime.timedelta)\n else float(hours)\n )\n c[user_pk] += current_sum\n result[user_pk] = {\n \"pk\": user_pk,\n \"display_name\": display_name,\n \"hours\": c[user_pk],\n }\n return sorted(result.values(), key=lambda x: x[\"hours\"], reverse=True)\n\n def get_context_data(self, **kwargs):\n filter_form = DateFilterForm(\n self.request.GET\n or {\n # intitial data for initial page laod\n # (does not use `initial` cause that only works with unbound forms)\n \"start\": date.today().replace(month=1, day=1),\n \"end\": date.today().replace(month=12, day=31),\n }\n )\n filter_form.is_valid()\n kwargs[\"filter_form\"] = filter_form\n kwargs[\"users\"] = self._get_working_hours_stats(\n start=filter_form.cleaned_data.get(\"start\") or date.min, # start/end are not required\n end=filter_form.cleaned_data.get(\"end\") or date.max,\n )\n kwargs[\"can_grant_for\"] = set(\n get_objects_for_user(self.request.user, \"decide_workinghours_for_group\", klass=Group)\n )\n kwargs[\"groups_by_user\"] = {\n profile.pk: set(profile.groups.all())\n for profile in UserProfile.objects.all().prefetch_related(\"groups\")\n }\n return super().get_context_data(**kwargs)\n\n\nclass OwnWorkingHourView(LoginRequiredMixin, DetailView):\n template_name = \"core/userprofile_workinghours.html\"\n\n def get_object(self, queryset=None):\n return self.request.user\n\n def get_context_data(self, **kwargs):\n kwargs[\"own_profile\"] = True\n grant_ids = get_objects_for_user(\n self.request.user, \"decide_workinghours_for_group\", klass=Group\n ).values_list(\"id\", flat=True)\n kwargs[\"can_grant\"] = self.request.user.groups.filter(id__in=grant_ids).exists()\n return super().get_context_data(**kwargs)\n\n\nclass UserProfileWorkingHourView(CanGrantMixin, CustomPermissionRequiredMixin, DetailView):\n model = UserProfile\n permission_required = \"core.view_userprofile\"\n template_name = \"core/userprofile_workinghours.html\"\n\n def get_context_data(self, **kwargs):\n kwargs[\"can_grant\"] = self.can_grant\n return super().get_context_data(**kwargs)\n\n\nclass WorkingHourRequestView(LoginRequiredMixin, FormView):\n form_class = WorkingHourRequestForm\n template_name = \"core/workinghours_form.html\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"user\"] = self.request.user\n return kwargs\n\n def form_valid(self, form):\n form.create_consequence()\n messages.success(self.request, _(\"Your request has been submitted.\"))\n return redirect(reverse(\"core:workinghours_own\"))\n\n\nclass WorkingHourCreateView(CanGrantMixin, CustomPermissionRequiredMixin, WorkingHourRequestView):\n def has_permission(self):\n return self.can_grant\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"can_grant\"] = True\n kwargs[\"user\"] = self._get_target_user()\n return kwargs\n\n def form_valid(self, form):\n workinghour = form.save(commit=False)\n workinghour.user = form.user\n workinghour.save()\n messages.success(self.request, _(\"Working hours have been added.\"))\n return redirect(reverse(\"core:workinghours_list\"))\n\n\nclass WorkingHourUpdateView(CanGrantMixin, CustomPermissionRequiredMixin, UpdateView):\n model = WorkingHours\n form_class = WorkingHourRequestForm\n\n def has_permission(self):\n return self.can_grant\n\n def get_success_url(self):\n return reverse(\"core:workinghours_detail\", kwargs={\"pk\": self.object.user.pk})\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"user\"] = self.object.user\n kwargs[\"can_grant\"] = True\n return kwargs\n\n def _get_target_user(self):\n return get_object_or_404(WorkingHours, pk=self.kwargs[\"pk\"]).user\n\n\nclass WorkingHourDeleteView(\n CanGrantMixin, CustomPermissionRequiredMixin, SuccessMessageMixin, DeleteView\n):\n permission_required = \"core.decide_workinghours_for_group\"\n model = WorkingHours\n success_message = _(\"Working hours have been deleted.\")\n\n def _get_target_user(self):\n return get_object_or_404(WorkingHours, pk=self.kwargs[\"pk\"]).user\n\n def has_permission(self):\n return self.can_grant\n\n def get_success_url(self):\n return reverse(\"core:workinghours_detail\", kwargs={\"pk\": self.object.user.pk})\n", "path": "ephios/core/views/workinghour.py"}], "after_files": [{"content": "import datetime\nfrom collections import Counter\nfrom datetime import date\nfrom itertools import chain\nfrom typing import Optional\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import Group\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.db.models import DurationField, ExpressionWrapper, F, Sum\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext as _\nfrom django.views.generic import DeleteView, DetailView, FormView, TemplateView, UpdateView\nfrom django_select2.forms import Select2Widget\nfrom guardian.shortcuts import get_objects_for_user\n\nfrom ephios.core.forms.users import WorkingHourRequestForm\nfrom ephios.core.models import EventType, LocalParticipation, UserProfile, WorkingHours\nfrom ephios.extra.mixins import CustomPermissionRequiredMixin\nfrom ephios.extra.widgets import CustomDateInput\n\n\nclass CanGrantMixin:\n @cached_property\n def can_grant(self):\n \"\"\"\n Return whether the current request user can grant\n working hours to the target user (which might be themselves).\n \"\"\"\n if self.request.user.is_anonymous:\n return False\n can_grant_for_groups = get_objects_for_user(\n self.request.user, \"decide_workinghours_for_group\", klass=Group\n )\n return self._get_target_user().groups.filter(id__in=can_grant_for_groups).exists()\n\n def _get_target_user(self):\n \"\"\"Return the user whose working hours are being managed.\"\"\"\n return get_object_or_404(UserProfile, pk=self.kwargs[\"pk\"])\n\n\nclass WorkingHourFilterForm(forms.Form):\n start = forms.DateField(required=False, label=_(\"From\"), widget=CustomDateInput)\n end = forms.DateField(required=False, label=_(\"To\"), widget=CustomDateInput)\n type = forms.ModelChoiceField(\n label=_(\"Event type\"),\n queryset=EventType.objects.all(),\n required=False,\n widget=Select2Widget(\n attrs={\n \"data-placeholder\": _(\"Event type\"),\n \"classes\": \"w-auto\",\n }\n ),\n )\n\n\nclass WorkingHourOverview(CustomPermissionRequiredMixin, TemplateView):\n template_name = \"core/workinghours_list.html\"\n permission_required = \"core.view_userprofile\"\n\n def _get_working_hours_stats(self, start: date, end: date, eventtype: Optional[EventType]):\n participations = LocalParticipation.objects.filter(\n state=LocalParticipation.States.CONFIRMED,\n start_time__date__gte=start,\n end_time__date__lte=end,\n )\n workinghours = {}\n if eventtype is not None:\n participations = participations.filter(shift__event__type=eventtype)\n else:\n workinghours = (\n WorkingHours.objects.filter(date__gte=start, date__lte=end)\n .annotate(hour_sum=Sum(\"hours\"))\n .values_list(\"user__pk\", \"user__display_name\", \"hour_sum\")\n )\n participations = (\n participations.annotate(\n duration=ExpressionWrapper(\n (F(\"end_time\") - F(\"start_time\")),\n output_field=DurationField(),\n ),\n )\n .annotate(hour_sum=Sum(\"duration\"))\n .values_list(\"user__pk\", \"user__display_name\", \"hour_sum\")\n )\n\n result = {}\n c = Counter()\n for user_pk, display_name, hours in chain(participations, workinghours):\n current_sum = (\n hours.total_seconds() / (60 * 60)\n if isinstance(hours, datetime.timedelta)\n else float(hours)\n )\n c[user_pk] += current_sum\n result[user_pk] = {\n \"pk\": user_pk,\n \"display_name\": display_name,\n \"hours\": c[user_pk],\n }\n return sorted(result.values(), key=lambda x: x[\"hours\"], reverse=True)\n\n def get_context_data(self, **kwargs):\n filter_form = WorkingHourFilterForm(\n self.request.GET\n or {\n # intitial data for initial page laod\n # (does not use `initial` cause that only works with unbound forms)\n \"start\": date.today().replace(month=1, day=1),\n \"end\": date.today().replace(month=12, day=31),\n }\n )\n filter_form.is_valid()\n kwargs[\"filter_form\"] = filter_form\n kwargs[\"users\"] = self._get_working_hours_stats(\n start=filter_form.cleaned_data.get(\"start\") or date.min, # start/end are not required\n end=filter_form.cleaned_data.get(\"end\") or date.max,\n eventtype=filter_form.cleaned_data.get(\"type\"),\n )\n kwargs[\"can_grant_for\"] = set(\n get_objects_for_user(self.request.user, \"decide_workinghours_for_group\", klass=Group)\n )\n kwargs[\"groups_by_user\"] = {\n profile.pk: set(profile.groups.all())\n for profile in UserProfile.objects.all().prefetch_related(\"groups\")\n }\n return super().get_context_data(**kwargs)\n\n\nclass OwnWorkingHourView(LoginRequiredMixin, DetailView):\n template_name = \"core/userprofile_workinghours.html\"\n\n def get_object(self, queryset=None):\n return self.request.user\n\n def get_context_data(self, **kwargs):\n kwargs[\"own_profile\"] = True\n grant_ids = get_objects_for_user(\n self.request.user, \"decide_workinghours_for_group\", klass=Group\n ).values_list(\"id\", flat=True)\n kwargs[\"can_grant\"] = self.request.user.groups.filter(id__in=grant_ids).exists()\n return super().get_context_data(**kwargs)\n\n\nclass UserProfileWorkingHourView(CanGrantMixin, CustomPermissionRequiredMixin, DetailView):\n model = UserProfile\n permission_required = \"core.view_userprofile\"\n template_name = \"core/userprofile_workinghours.html\"\n\n def get_context_data(self, **kwargs):\n kwargs[\"can_grant\"] = self.can_grant\n return super().get_context_data(**kwargs)\n\n\nclass WorkingHourRequestView(LoginRequiredMixin, FormView):\n form_class = WorkingHourRequestForm\n template_name = \"core/workinghours_form.html\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"user\"] = self.request.user\n return kwargs\n\n def form_valid(self, form):\n form.create_consequence()\n messages.success(self.request, _(\"Your request has been submitted.\"))\n return redirect(reverse(\"core:workinghours_own\"))\n\n\nclass WorkingHourCreateView(CanGrantMixin, CustomPermissionRequiredMixin, WorkingHourRequestView):\n def has_permission(self):\n return self.can_grant\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"can_grant\"] = True\n kwargs[\"user\"] = self._get_target_user()\n return kwargs\n\n def form_valid(self, form):\n workinghour = form.save(commit=False)\n workinghour.user = form.user\n workinghour.save()\n messages.success(self.request, _(\"Working hours have been added.\"))\n return redirect(reverse(\"core:workinghours_list\"))\n\n\nclass WorkingHourUpdateView(CanGrantMixin, CustomPermissionRequiredMixin, UpdateView):\n model = WorkingHours\n form_class = WorkingHourRequestForm\n\n def has_permission(self):\n return self.can_grant\n\n def get_success_url(self):\n return reverse(\"core:workinghours_detail\", kwargs={\"pk\": self.object.user.pk})\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"user\"] = self.object.user\n kwargs[\"can_grant\"] = True\n return kwargs\n\n def _get_target_user(self):\n return get_object_or_404(WorkingHours, pk=self.kwargs[\"pk\"]).user\n\n\nclass WorkingHourDeleteView(\n CanGrantMixin, CustomPermissionRequiredMixin, SuccessMessageMixin, DeleteView\n):\n permission_required = \"core.decide_workinghours_for_group\"\n model = WorkingHours\n success_message = _(\"Working hours have been deleted.\")\n\n def _get_target_user(self):\n return get_object_or_404(WorkingHours, pk=self.kwargs[\"pk\"]).user\n\n def has_permission(self):\n return self.can_grant\n\n def get_success_url(self):\n return reverse(\"core:workinghours_detail\", kwargs={\"pk\": self.object.user.pk})\n", "path": "ephios/core/views/workinghour.py"}]} | 2,497 | 972 |
gh_patches_debug_1086 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-1084 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError thrown when trying to import hypothesis in 3.44.21
hypothesis (3.44.21)
In [4]: from hypothesis import given
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-4-4ce9639ca03b> in <module>()
----> 1 from hypothesis import given
/usr/local/lib/python2.7/dist-packages/hypothesis/__init__.py in <module>()
29 from hypothesis.version import __version_info__, __version__
30 from hypothesis.control import assume, note, reject, event
---> 31 from hypothesis.core import given, find, example, seed, reproduce_failure, \
32 PrintSettings
33 from hypothesis.utils.conventions import infer
/usr/local/lib/python2.7/dist-packages/hypothesis/core.py in <module>()
35 from coverage.collector import Collector
36
---> 37 import hypothesis.strategies as st
38 from hypothesis import __version__
39 from hypothesis.errors import Flaky, Timeout, NoSuchExample, \
/usr/local/lib/python2.7/dist-packages/hypothesis/strategies.py in <module>()
30 from hypothesis.control import assume
31 from hypothesis._settings import note_deprecation
---> 32 from hypothesis.internal.cache import LRUReusedCache
33 from hypothesis.searchstrategy import SearchStrategy
34 from hypothesis.internal.compat import gcd, ceil, floor, hrange, \
/usr/local/lib/python2.7/dist-packages/hypothesis/internal/cache.py in <module>()
21
22
---> 23 @attr.s(slots=True)
24 class Entry(object):
25 key = attr.ib()
TypeError: attributes() got an unexpected keyword argument 'slots'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2018 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 import os
21 import sys
22
23 import setuptools
24
25
26 def local_file(name):
27 return os.path.relpath(os.path.join(os.path.dirname(__file__), name))
28
29
30 SOURCE = local_file('src')
31 README = local_file('README.rst')
32
33
34 # Assignment to placate pyflakes. The actual version is from the exec that
35 # follows.
36 __version__ = None
37
38 with open(local_file('src/hypothesis/version.py')) as o:
39 exec(o.read())
40
41 assert __version__ is not None
42
43
44 extras = {
45 'datetime': ['pytz'],
46 'pytz': ['pytz'],
47 'fakefactory': ['Faker>=0.7'],
48 'numpy': ['numpy>=1.9.0'],
49 'pytest': ['pytest>=2.8.0'],
50 }
51
52 # Django 2 only supports Python 3, but doesn't have any python_requires
53 # markers in its setup.py --- so "pip install django" just fails in
54 # Python 2. So rather than relying on pip, we pin the version of
55 # Django on Python 2 ourselves.
56 #
57 # See https://github.com/HypothesisWorks/hypothesis-python/pull/1008
58 if sys.version_info[0] < 3:
59 django_major_pin = '<2'
60 else:
61 django_major_pin = '<3'
62
63 # We only support the releases of Django that are supported by the Django
64 # core team. See https://www.djangoproject.com/download/#supported-versions
65 #
66 # New versions of setuptools allow us to set very precise pins; older versions
67 # of setuptools are coarser.
68 major_setuptools_version = int(setuptools.__version__.split('.')[0])
69 if major_setuptools_version >= 8:
70 django_minor_pin = '>=1.8,!=1.9.*,!=1.10.*'
71 else:
72 django_minor_pin = '>=1.8'
73
74 django_pin = 'django%s,%s' % (django_minor_pin, django_major_pin)
75 extras['django'] = ['pytz', django_pin]
76
77 extras['faker'] = extras['fakefactory']
78
79 extras['all'] = sorted(sum(extras.values(), []))
80
81 extras[":python_version == '2.7'"] = ['enum34']
82
83 install_requires = ['attrs', 'coverage']
84
85 if sys.version_info[0] < 3:
86 install_requires.append('enum34')
87
88 setuptools.setup(
89 name='hypothesis',
90 version=__version__,
91 author='David R. MacIver',
92 author_email='[email protected]',
93 packages=setuptools.find_packages(SOURCE),
94 package_dir={'': SOURCE},
95 url='https://github.com/HypothesisWorks/hypothesis-python',
96 license='MPL v2',
97 description='A library for property based testing',
98 zip_safe=False,
99 extras_require=extras,
100 install_requires=install_requires,
101 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
102 classifiers=[
103 'Development Status :: 5 - Production/Stable',
104 'Intended Audience :: Developers',
105 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
106 'Operating System :: Unix',
107 'Operating System :: POSIX',
108 'Operating System :: Microsoft :: Windows',
109 'Programming Language :: Python',
110 'Programming Language :: Python :: 2.7',
111 'Programming Language :: Python :: 3',
112 'Programming Language :: Python :: 3.4',
113 'Programming Language :: Python :: 3.5',
114 'Programming Language :: Python :: 3.6',
115 'Programming Language :: Python :: Implementation :: CPython',
116 'Programming Language :: Python :: Implementation :: PyPy',
117 'Topic :: Software Development :: Testing',
118 ],
119 entry_points={
120 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],
121 },
122 long_description=open(README).read(),
123 )
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -80,7 +80,7 @@
extras[":python_version == '2.7'"] = ['enum34']
-install_requires = ['attrs', 'coverage']
+install_requires = ['attrs>=16.0.0', 'coverage']
if sys.version_info[0] < 3:
install_requires.append('enum34')
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -80,7 +80,7 @@\n \n extras[\":python_version == '2.7'\"] = ['enum34']\n \n-install_requires = ['attrs', 'coverage']\n+install_requires = ['attrs>=16.0.0', 'coverage']\n \n if sys.version_info[0] < 3:\n install_requires.append('enum34')\n", "issue": "TypeError thrown when trying to import hypothesis in 3.44.21\nhypothesis (3.44.21)\r\n\r\nIn [4]: from hypothesis import given\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-4-4ce9639ca03b> in <module>()\r\n----> 1 from hypothesis import given\r\n\r\n/usr/local/lib/python2.7/dist-packages/hypothesis/__init__.py in <module>()\r\n 29 from hypothesis.version import __version_info__, __version__\r\n 30 from hypothesis.control import assume, note, reject, event\r\n---> 31 from hypothesis.core import given, find, example, seed, reproduce_failure, \\\r\n 32 PrintSettings\r\n 33 from hypothesis.utils.conventions import infer\r\n\r\n/usr/local/lib/python2.7/dist-packages/hypothesis/core.py in <module>()\r\n 35 from coverage.collector import Collector\r\n 36\r\n---> 37 import hypothesis.strategies as st\r\n 38 from hypothesis import __version__\r\n 39 from hypothesis.errors import Flaky, Timeout, NoSuchExample, \\\r\n\r\n/usr/local/lib/python2.7/dist-packages/hypothesis/strategies.py in <module>()\r\n 30 from hypothesis.control import assume\r\n 31 from hypothesis._settings import note_deprecation\r\n---> 32 from hypothesis.internal.cache import LRUReusedCache\r\n 33 from hypothesis.searchstrategy import SearchStrategy\r\n 34 from hypothesis.internal.compat import gcd, ceil, floor, hrange, \\\r\n\r\n/usr/local/lib/python2.7/dist-packages/hypothesis/internal/cache.py in <module>()\r\n 21\r\n 22\r\n---> 23 @attr.s(slots=True)\r\n 24 class Entry(object):\r\n 25 key = attr.ib()\r\n\r\nTypeError: attributes() got an unexpected keyword argument 'slots'\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\n\nimport setuptools\n\n\ndef local_file(name):\n return os.path.relpath(os.path.join(os.path.dirname(__file__), name))\n\n\nSOURCE = local_file('src')\nREADME = local_file('README.rst')\n\n\n# Assignment to placate pyflakes. The actual version is from the exec that\n# follows.\n__version__ = None\n\nwith open(local_file('src/hypothesis/version.py')) as o:\n exec(o.read())\n\nassert __version__ is not None\n\n\nextras = {\n 'datetime': ['pytz'],\n 'pytz': ['pytz'],\n 'fakefactory': ['Faker>=0.7'],\n 'numpy': ['numpy>=1.9.0'],\n 'pytest': ['pytest>=2.8.0'],\n}\n\n# Django 2 only supports Python 3, but doesn't have any python_requires\n# markers in its setup.py --- so \"pip install django\" just fails in\n# Python 2. So rather than relying on pip, we pin the version of\n# Django on Python 2 ourselves.\n#\n# See https://github.com/HypothesisWorks/hypothesis-python/pull/1008\nif sys.version_info[0] < 3:\n django_major_pin = '<2'\nelse:\n django_major_pin = '<3'\n\n# We only support the releases of Django that are supported by the Django\n# core team. See https://www.djangoproject.com/download/#supported-versions\n#\n# New versions of setuptools allow us to set very precise pins; older versions\n# of setuptools are coarser.\nmajor_setuptools_version = int(setuptools.__version__.split('.')[0])\nif major_setuptools_version >= 8:\n django_minor_pin = '>=1.8,!=1.9.*,!=1.10.*'\nelse:\n django_minor_pin = '>=1.8'\n\ndjango_pin = 'django%s,%s' % (django_minor_pin, django_major_pin)\nextras['django'] = ['pytz', django_pin]\n\nextras['faker'] = extras['fakefactory']\n\nextras['all'] = sorted(sum(extras.values(), []))\n\nextras[\":python_version == '2.7'\"] = ['enum34']\n\ninstall_requires = ['attrs', 'coverage']\n\nif sys.version_info[0] < 3:\n install_requires.append('enum34')\n\nsetuptools.setup(\n name='hypothesis',\n version=__version__,\n author='David R. MacIver',\n author_email='[email protected]',\n packages=setuptools.find_packages(SOURCE),\n package_dir={'': SOURCE},\n url='https://github.com/HypothesisWorks/hypothesis-python',\n license='MPL v2',\n description='A library for property based testing',\n zip_safe=False,\n extras_require=extras,\n install_requires=install_requires,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',\n 'Operating System :: Unix',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Testing',\n ],\n entry_points={\n 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],\n },\n long_description=open(README).read(),\n)\n", "path": "setup.py"}], "after_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\n\nimport setuptools\n\n\ndef local_file(name):\n return os.path.relpath(os.path.join(os.path.dirname(__file__), name))\n\n\nSOURCE = local_file('src')\nREADME = local_file('README.rst')\n\n\n# Assignment to placate pyflakes. The actual version is from the exec that\n# follows.\n__version__ = None\n\nwith open(local_file('src/hypothesis/version.py')) as o:\n exec(o.read())\n\nassert __version__ is not None\n\n\nextras = {\n 'datetime': ['pytz'],\n 'pytz': ['pytz'],\n 'fakefactory': ['Faker>=0.7'],\n 'numpy': ['numpy>=1.9.0'],\n 'pytest': ['pytest>=2.8.0'],\n}\n\n# Django 2 only supports Python 3, but doesn't have any python_requires\n# markers in its setup.py --- so \"pip install django\" just fails in\n# Python 2. So rather than relying on pip, we pin the version of\n# Django on Python 2 ourselves.\n#\n# See https://github.com/HypothesisWorks/hypothesis-python/pull/1008\nif sys.version_info[0] < 3:\n django_major_pin = '<2'\nelse:\n django_major_pin = '<3'\n\n# We only support the releases of Django that are supported by the Django\n# core team. See https://www.djangoproject.com/download/#supported-versions\n#\n# New versions of setuptools allow us to set very precise pins; older versions\n# of setuptools are coarser.\nmajor_setuptools_version = int(setuptools.__version__.split('.')[0])\nif major_setuptools_version >= 8:\n django_minor_pin = '>=1.8,!=1.9.*,!=1.10.*'\nelse:\n django_minor_pin = '>=1.8'\n\ndjango_pin = 'django%s,%s' % (django_minor_pin, django_major_pin)\nextras['django'] = ['pytz', django_pin]\n\nextras['faker'] = extras['fakefactory']\n\nextras['all'] = sorted(sum(extras.values(), []))\n\nextras[\":python_version == '2.7'\"] = ['enum34']\n\ninstall_requires = ['attrs>=16.0.0', 'coverage']\n\nif sys.version_info[0] < 3:\n install_requires.append('enum34')\n\nsetuptools.setup(\n name='hypothesis',\n version=__version__,\n author='David R. MacIver',\n author_email='[email protected]',\n packages=setuptools.find_packages(SOURCE),\n package_dir={'': SOURCE},\n url='https://github.com/HypothesisWorks/hypothesis-python',\n license='MPL v2',\n description='A library for property based testing',\n zip_safe=False,\n extras_require=extras,\n install_requires=install_requires,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',\n 'Operating System :: Unix',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Testing',\n ],\n entry_points={\n 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],\n },\n long_description=open(README).read(),\n)\n", "path": "setup.py"}]} | 1,981 | 99 |
gh_patches_debug_22830 | rasdani/github-patches | git_diff | wright-group__WrightTools-201 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
distribute client_secrets
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # !/usr/bin/env python
2
3 import os
4 from setuptools import setup, find_packages
5
6 def package_files(directory):
7 paths = []
8 for (path, directories, filenames) in os.walk(directory):
9 for filename in filenames:
10 paths.append(os.path.join('..', path, filename))
11 return paths
12
13 here = os.path.abspath(os.path.dirname(__file__))
14
15 extra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))
16 extra_files.append(os.path.join(here, 'CONTRIBUTORS'))
17 extra_files.append(os.path.join(here, 'LICENSE'))
18 extra_files.append(os.path.join(here, 'README.rst'))
19 extra_files.append(os.path.join(here, 'requirements.txt'))
20 extra_files.append(os.path.join(here, 'VERSION'))
21
22 with open(os.path.join(here, 'requirements.txt')) as f:
23 required = f.read().splitlines()
24
25 with open(os.path.join(here, 'VERSION')) as version_file:
26 version = version_file.read().strip()
27
28 setup(
29 name='WrightTools',
30 packages=find_packages(),
31 package_data={'': extra_files},
32 setup_requires=['pytest-runner'],
33 tests_require=['pytest'],
34 install_requires=required,
35 extras_require={'docs': ['sphinx-gallery>=0.1.9']},
36 version=version,
37 description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',
38 author='Blaise Thompson',
39 author_email='[email protected]',
40 license='MIT',
41 url='http://wright.tools',
42 keywords='spectroscopy science multidimensional visualization',
43 classifiers=['Development Status :: 5 - Production/Stable',
44 'Intended Audience :: Science/Research',
45 'License :: OSI Approved :: MIT License',
46 'Natural Language :: English',
47 'Programming Language :: Python :: 2',
48 'Programming Language :: Python :: 2.7',
49 'Programming Language :: Python :: 3',
50 'Programming Language :: Python :: 3.3',
51 'Programming Language :: Python :: 3.4',
52 'Programming Language :: Python :: 3.5',
53 'Topic :: Scientific/Engineering']
54 )
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,6 +3,7 @@
import os
from setuptools import setup, find_packages
+
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
@@ -10,6 +11,7 @@
paths.append(os.path.join('..', path, filename))
return paths
+
here = os.path.abspath(os.path.dirname(__file__))
extra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))
@@ -18,6 +20,7 @@
extra_files.append(os.path.join(here, 'README.rst'))
extra_files.append(os.path.join(here, 'requirements.txt'))
extra_files.append(os.path.join(here, 'VERSION'))
+extra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))
with open(os.path.join(here, 'requirements.txt')) as f:
required = f.read().splitlines()
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,6 +3,7 @@\n import os\n from setuptools import setup, find_packages\n \n+\n def package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n@@ -10,6 +11,7 @@\n paths.append(os.path.join('..', path, filename))\n return paths\n \n+\n here = os.path.abspath(os.path.dirname(__file__))\n \n extra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\n@@ -18,6 +20,7 @@\n extra_files.append(os.path.join(here, 'README.rst'))\n extra_files.append(os.path.join(here, 'requirements.txt'))\n extra_files.append(os.path.join(here, 'VERSION'))\n+extra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))\n \n with open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n", "issue": "distribute client_secrets\n\n", "before_files": [{"content": "# !/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nextra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\nextra_files.append(os.path.join(here, 'CONTRIBUTORS'))\nextra_files.append(os.path.join(here, 'LICENSE'))\nextra_files.append(os.path.join(here, 'README.rst'))\nextra_files.append(os.path.join(here, 'requirements.txt'))\nextra_files.append(os.path.join(here, 'VERSION'))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='WrightTools',\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',\n author='Blaise Thompson',\n author_email='[email protected]',\n license='MIT',\n url='http://wright.tools',\n keywords='spectroscopy science multidimensional visualization',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering']\n)\n", "path": "setup.py"}], "after_files": [{"content": "# !/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nextra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\nextra_files.append(os.path.join(here, 'CONTRIBUTORS'))\nextra_files.append(os.path.join(here, 'LICENSE'))\nextra_files.append(os.path.join(here, 'README.rst'))\nextra_files.append(os.path.join(here, 'requirements.txt'))\nextra_files.append(os.path.join(here, 'VERSION'))\nextra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='WrightTools',\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',\n author='Blaise Thompson',\n author_email='[email protected]',\n license='MIT',\n url='http://wright.tools',\n keywords='spectroscopy science multidimensional visualization',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering']\n)\n", "path": "setup.py"}]} | 818 | 223 |
gh_patches_debug_5384 | rasdani/github-patches | git_diff | dynaconf__dynaconf-457 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow core_loaders to be case insensitive.
**Describe the bug**
When using the `core_loaders` attribute with the value `["yaml"]` Dynaconf fails reading settings from the respective YML configuration file.
**To Reproduce**
1. Go into an empty dir.
2. Setup environment.
* Python 3.9
* `pip install dynaconf[yaml]` or `poetry add "dynaconf[yaml]"`
* dynaconf 3.1.2
* ruaml.yml 0.16.2
3. Create `settings.yml`
```yml
test: hallo
logging:
json: true
```
4. Create `test.py` and execute it. It will succeed.
```python
from dynaconf import Dynaconf
settings = Dynaconf(
merge_enabled=True,
settings_files=["settings.yml"],
)
print(settings.TEST)
print(settings.LOGGING.JSON)
```
5. Add attribute `core_loaders` as recommended in the Dynaconf doc. Execute. This will fail.
```python
from dynaconf import Dynaconf
settings = Dynaconf(
merge_enabled=True,
core_loaders=["yaml"],
settings_files=["settings.yml"],
)
print(settings.TEST)
print(settings.LOGGING.JSON)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynaconf/loaders/__init__.py`
Content:
```
1 import os
2
3 from dynaconf import constants as ct
4 from dynaconf import default_settings
5 from dynaconf.loaders import ini_loader
6 from dynaconf.loaders import json_loader
7 from dynaconf.loaders import py_loader
8 from dynaconf.loaders import toml_loader
9 from dynaconf.loaders import yaml_loader
10 from dynaconf.utils import deduplicate
11 from dynaconf.utils import ensure_a_list
12 from dynaconf.utils.boxing import DynaBox
13 from dynaconf.utils.files import get_local_filename
14 from dynaconf.utils.parse_conf import false_values
15
16
17 def default_loader(obj, defaults=None):
18 """Loads default settings and check if there are overridings
19 exported as environment variables"""
20 defaults = defaults or {}
21 default_settings_values = {
22 key: value
23 for key, value in default_settings.__dict__.items() # noqa
24 if key.isupper()
25 }
26
27 all_keys = deduplicate(
28 list(defaults.keys()) + list(default_settings_values.keys())
29 )
30
31 for key in all_keys:
32 if not obj.exists(key):
33 value = defaults.get(key, default_settings_values.get(key))
34 obj.set(key, value)
35
36 # start dotenv to get default env vars from there
37 # check overrides in env vars
38 if obj.get("load_dotenv") is True:
39 default_settings.start_dotenv(obj)
40
41 # Deal with cases where a custom ENV_SWITCHER_IS_PROVIDED
42 # Example: Flask and Django Extensions
43 env_switcher = defaults.get(
44 "ENV_SWITCHER_FOR_DYNACONF", "ENV_FOR_DYNACONF"
45 )
46
47 for key in all_keys:
48 if key not in default_settings_values.keys():
49 continue
50
51 env_value = obj.get_environ(
52 env_switcher if key == "ENV_FOR_DYNACONF" else key,
53 default="_not_found",
54 )
55
56 if env_value != "_not_found":
57 obj.set(key, env_value, tomlfy=True)
58
59
60 def settings_loader(
61 obj, settings_module=None, env=None, silent=True, key=None, filename=None
62 ):
63 """Loads from defined settings module
64
65 :param obj: A dynaconf instance
66 :param settings_module: A path or a list of paths e.g settings.toml
67 :param env: Env to look for data defaults: development
68 :param silent: Boolean to raise loading errors
69 :param key: Load a single key if provided
70 :param filename: optional filename to override the settings_module
71 """
72 if filename is None:
73 settings_module = settings_module or obj.settings_module
74 if not settings_module: # pragma: no cover
75 return
76 files = ensure_a_list(settings_module)
77 else:
78 files = ensure_a_list(filename)
79
80 files.extend(ensure_a_list(obj.get("SECRETS_FOR_DYNACONF", None)))
81
82 found_files = []
83 modules_names = []
84 for item in files:
85 item = str(item) # Ensure str in case of LocalPath/Path is passed.
86 if item.endswith(ct.ALL_EXTENSIONS + (".py",)):
87 p_root = obj._root_path or (
88 os.path.dirname(found_files[0]) if found_files else None
89 )
90 found = obj.find_file(item, project_root=p_root)
91 if found:
92 found_files.append(found)
93 else:
94 # a bare python module name w/o extension
95 modules_names.append(item)
96
97 enabled_core_loaders = obj.get("CORE_LOADERS_FOR_DYNACONF")
98
99 # add `.local.` to found_files list to search for local files.
100 found_files.extend(
101 [
102 get_local_filename(item)
103 for item in found_files
104 if ".local." not in str(item)
105 ]
106 )
107
108 for mod_file in modules_names + found_files:
109 # can be set to multiple files settings.py,settings.yaml,...
110
111 # Cascade all loaders
112 loaders = [
113 {"ext": ct.YAML_EXTENSIONS, "name": "YAML", "loader": yaml_loader},
114 {"ext": ct.TOML_EXTENSIONS, "name": "TOML", "loader": toml_loader},
115 {"ext": ct.INI_EXTENSIONS, "name": "INI", "loader": ini_loader},
116 {"ext": ct.JSON_EXTENSIONS, "name": "JSON", "loader": json_loader},
117 ]
118
119 for loader in loaders:
120 if loader["name"] not in enabled_core_loaders:
121 continue
122
123 if mod_file.endswith(loader["ext"]):
124 loader["loader"].load(
125 obj, filename=mod_file, env=env, silent=silent, key=key
126 )
127 continue
128
129 if mod_file.endswith(ct.ALL_EXTENSIONS):
130 continue
131
132 if "PY" not in enabled_core_loaders:
133 # pyloader is disabled
134 continue
135
136 # must be Python file or module
137 # load from default defined module settings.py or .secrets.py if exists
138 py_loader.load(obj, mod_file, key=key)
139
140 # load from the current env e.g: development_settings.py
141 env = env or obj.current_env
142 if mod_file.endswith(".py"):
143 if ".secrets.py" == mod_file:
144 tmpl = ".{0}_{1}{2}"
145 mod_file = "secrets.py"
146 else:
147 tmpl = "{0}_{1}{2}"
148
149 dirname = os.path.dirname(mod_file)
150 filename, extension = os.path.splitext(os.path.basename(mod_file))
151 new_filename = tmpl.format(env.lower(), filename, extension)
152 env_mod_file = os.path.join(dirname, new_filename)
153 global_filename = tmpl.format("global", filename, extension)
154 global_mod_file = os.path.join(dirname, global_filename)
155 else:
156 env_mod_file = f"{env.lower()}_{mod_file}"
157 global_mod_file = f"global_{mod_file}"
158
159 py_loader.load(
160 obj,
161 env_mod_file,
162 identifier=f"py_{env.upper()}",
163 silent=True,
164 key=key,
165 )
166
167 # load from global_settings.py
168 py_loader.load(
169 obj, global_mod_file, identifier="py_global", silent=True, key=key
170 )
171
172
173 def enable_external_loaders(obj):
174 """Enable external service loaders like `VAULT_` and `REDIS_`
175 looks forenv variables like `REDIS_ENABLED_FOR_DYNACONF`
176 """
177 for name, loader in ct.EXTERNAL_LOADERS.items():
178 enabled = getattr(obj, f"{name.upper()}_ENABLED_FOR_DYNACONF", False)
179 if (
180 enabled
181 and enabled not in false_values
182 and loader not in obj.LOADERS_FOR_DYNACONF
183 ): # noqa
184 obj.LOADERS_FOR_DYNACONF.insert(0, loader)
185
186
187 def write(filename, data, env=None):
188 """Writes `data` to `filename` infers format by file extension."""
189 loader_name = f"{filename.rpartition('.')[-1]}_loader"
190 loader = globals().get(loader_name)
191 if not loader:
192 raise IOError(f"{loader_name} cannot be found.")
193
194 data = DynaBox(data, box_settings={}).to_dict()
195 if loader is not py_loader and env and env not in data:
196 data = {env: data}
197
198 loader.write(filename, data, merge=False)
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dynaconf/loaders/__init__.py b/dynaconf/loaders/__init__.py
--- a/dynaconf/loaders/__init__.py
+++ b/dynaconf/loaders/__init__.py
@@ -94,7 +94,9 @@
# a bare python module name w/o extension
modules_names.append(item)
- enabled_core_loaders = obj.get("CORE_LOADERS_FOR_DYNACONF")
+ enabled_core_loaders = [
+ item.upper() for item in obj.get("CORE_LOADERS_FOR_DYNACONF") or []
+ ]
# add `.local.` to found_files list to search for local files.
found_files.extend(
| {"golden_diff": "diff --git a/dynaconf/loaders/__init__.py b/dynaconf/loaders/__init__.py\n--- a/dynaconf/loaders/__init__.py\n+++ b/dynaconf/loaders/__init__.py\n@@ -94,7 +94,9 @@\n # a bare python module name w/o extension\n modules_names.append(item)\n \n- enabled_core_loaders = obj.get(\"CORE_LOADERS_FOR_DYNACONF\")\n+ enabled_core_loaders = [\n+ item.upper() for item in obj.get(\"CORE_LOADERS_FOR_DYNACONF\") or []\n+ ]\n \n # add `.local.` to found_files list to search for local files.\n found_files.extend(\n", "issue": "Allow core_loaders to be case insensitive.\n**Describe the bug**\r\n\r\nWhen using the `core_loaders` attribute with the value `[\"yaml\"]` Dynaconf fails reading settings from the respective YML configuration file.\r\n\r\n**To Reproduce**\r\n\r\n1. Go into an empty dir.\r\n\r\n2. Setup environment.\r\n\r\n * Python 3.9\r\n * `pip install dynaconf[yaml]` or `poetry add \"dynaconf[yaml]\"`\r\n * dynaconf 3.1.2\r\n * ruaml.yml 0.16.2\r\n\r\n3. Create `settings.yml`\r\n\r\n```yml\r\ntest: hallo\r\nlogging:\r\n json: true\r\n```\r\n4. Create `test.py` and execute it. It will succeed.\r\n\r\n```python\r\nfrom dynaconf import Dynaconf\r\n\r\nsettings = Dynaconf(\r\n merge_enabled=True,\r\n settings_files=[\"settings.yml\"],\r\n)\r\n\r\nprint(settings.TEST)\r\nprint(settings.LOGGING.JSON)\r\n```\r\n\r\n5. Add attribute `core_loaders` as recommended in the Dynaconf doc. Execute. This will fail.\r\n\r\n```python\r\nfrom dynaconf import Dynaconf\r\n\r\nsettings = Dynaconf(\r\n merge_enabled=True,\r\n core_loaders=[\"yaml\"],\r\n settings_files=[\"settings.yml\"],\r\n)\r\n\r\nprint(settings.TEST)\r\nprint(settings.LOGGING.JSON)\r\n```\r\n\n", "before_files": [{"content": "import os\n\nfrom dynaconf import constants as ct\nfrom dynaconf import default_settings\nfrom dynaconf.loaders import ini_loader\nfrom dynaconf.loaders import json_loader\nfrom dynaconf.loaders import py_loader\nfrom dynaconf.loaders import toml_loader\nfrom dynaconf.loaders import yaml_loader\nfrom dynaconf.utils import deduplicate\nfrom dynaconf.utils import ensure_a_list\nfrom dynaconf.utils.boxing import DynaBox\nfrom dynaconf.utils.files import get_local_filename\nfrom dynaconf.utils.parse_conf import false_values\n\n\ndef default_loader(obj, defaults=None):\n \"\"\"Loads default settings and check if there are overridings\n exported as environment variables\"\"\"\n defaults = defaults or {}\n default_settings_values = {\n key: value\n for key, value in default_settings.__dict__.items() # noqa\n if key.isupper()\n }\n\n all_keys = deduplicate(\n list(defaults.keys()) + list(default_settings_values.keys())\n )\n\n for key in all_keys:\n if not obj.exists(key):\n value = defaults.get(key, default_settings_values.get(key))\n obj.set(key, value)\n\n # start dotenv to get default env vars from there\n # check overrides in env vars\n if obj.get(\"load_dotenv\") is True:\n default_settings.start_dotenv(obj)\n\n # Deal with cases where a custom ENV_SWITCHER_IS_PROVIDED\n # Example: Flask and Django Extensions\n env_switcher = defaults.get(\n \"ENV_SWITCHER_FOR_DYNACONF\", \"ENV_FOR_DYNACONF\"\n )\n\n for key in all_keys:\n if key not in default_settings_values.keys():\n continue\n\n env_value = obj.get_environ(\n env_switcher if key == \"ENV_FOR_DYNACONF\" else key,\n default=\"_not_found\",\n )\n\n if env_value != \"_not_found\":\n obj.set(key, env_value, tomlfy=True)\n\n\ndef settings_loader(\n obj, settings_module=None, env=None, silent=True, key=None, filename=None\n):\n \"\"\"Loads from defined settings module\n\n :param obj: A dynaconf instance\n :param settings_module: A path or a list of paths e.g settings.toml\n :param env: Env to look for data defaults: development\n :param silent: Boolean to raise loading errors\n :param key: Load a single key if provided\n :param filename: optional filename to override the settings_module\n \"\"\"\n if filename is None:\n settings_module = settings_module or obj.settings_module\n if not settings_module: # pragma: no cover\n return\n files = ensure_a_list(settings_module)\n else:\n files = ensure_a_list(filename)\n\n files.extend(ensure_a_list(obj.get(\"SECRETS_FOR_DYNACONF\", None)))\n\n found_files = []\n modules_names = []\n for item in files:\n item = str(item) # Ensure str in case of LocalPath/Path is passed.\n if item.endswith(ct.ALL_EXTENSIONS + (\".py\",)):\n p_root = obj._root_path or (\n os.path.dirname(found_files[0]) if found_files else None\n )\n found = obj.find_file(item, project_root=p_root)\n if found:\n found_files.append(found)\n else:\n # a bare python module name w/o extension\n modules_names.append(item)\n\n enabled_core_loaders = obj.get(\"CORE_LOADERS_FOR_DYNACONF\")\n\n # add `.local.` to found_files list to search for local files.\n found_files.extend(\n [\n get_local_filename(item)\n for item in found_files\n if \".local.\" not in str(item)\n ]\n )\n\n for mod_file in modules_names + found_files:\n # can be set to multiple files settings.py,settings.yaml,...\n\n # Cascade all loaders\n loaders = [\n {\"ext\": ct.YAML_EXTENSIONS, \"name\": \"YAML\", \"loader\": yaml_loader},\n {\"ext\": ct.TOML_EXTENSIONS, \"name\": \"TOML\", \"loader\": toml_loader},\n {\"ext\": ct.INI_EXTENSIONS, \"name\": \"INI\", \"loader\": ini_loader},\n {\"ext\": ct.JSON_EXTENSIONS, \"name\": \"JSON\", \"loader\": json_loader},\n ]\n\n for loader in loaders:\n if loader[\"name\"] not in enabled_core_loaders:\n continue\n\n if mod_file.endswith(loader[\"ext\"]):\n loader[\"loader\"].load(\n obj, filename=mod_file, env=env, silent=silent, key=key\n )\n continue\n\n if mod_file.endswith(ct.ALL_EXTENSIONS):\n continue\n\n if \"PY\" not in enabled_core_loaders:\n # pyloader is disabled\n continue\n\n # must be Python file or module\n # load from default defined module settings.py or .secrets.py if exists\n py_loader.load(obj, mod_file, key=key)\n\n # load from the current env e.g: development_settings.py\n env = env or obj.current_env\n if mod_file.endswith(\".py\"):\n if \".secrets.py\" == mod_file:\n tmpl = \".{0}_{1}{2}\"\n mod_file = \"secrets.py\"\n else:\n tmpl = \"{0}_{1}{2}\"\n\n dirname = os.path.dirname(mod_file)\n filename, extension = os.path.splitext(os.path.basename(mod_file))\n new_filename = tmpl.format(env.lower(), filename, extension)\n env_mod_file = os.path.join(dirname, new_filename)\n global_filename = tmpl.format(\"global\", filename, extension)\n global_mod_file = os.path.join(dirname, global_filename)\n else:\n env_mod_file = f\"{env.lower()}_{mod_file}\"\n global_mod_file = f\"global_{mod_file}\"\n\n py_loader.load(\n obj,\n env_mod_file,\n identifier=f\"py_{env.upper()}\",\n silent=True,\n key=key,\n )\n\n # load from global_settings.py\n py_loader.load(\n obj, global_mod_file, identifier=\"py_global\", silent=True, key=key\n )\n\n\ndef enable_external_loaders(obj):\n \"\"\"Enable external service loaders like `VAULT_` and `REDIS_`\n looks forenv variables like `REDIS_ENABLED_FOR_DYNACONF`\n \"\"\"\n for name, loader in ct.EXTERNAL_LOADERS.items():\n enabled = getattr(obj, f\"{name.upper()}_ENABLED_FOR_DYNACONF\", False)\n if (\n enabled\n and enabled not in false_values\n and loader not in obj.LOADERS_FOR_DYNACONF\n ): # noqa\n obj.LOADERS_FOR_DYNACONF.insert(0, loader)\n\n\ndef write(filename, data, env=None):\n \"\"\"Writes `data` to `filename` infers format by file extension.\"\"\"\n loader_name = f\"{filename.rpartition('.')[-1]}_loader\"\n loader = globals().get(loader_name)\n if not loader:\n raise IOError(f\"{loader_name} cannot be found.\")\n\n data = DynaBox(data, box_settings={}).to_dict()\n if loader is not py_loader and env and env not in data:\n data = {env: data}\n\n loader.write(filename, data, merge=False)\n", "path": "dynaconf/loaders/__init__.py"}], "after_files": [{"content": "import os\n\nfrom dynaconf import constants as ct\nfrom dynaconf import default_settings\nfrom dynaconf.loaders import ini_loader\nfrom dynaconf.loaders import json_loader\nfrom dynaconf.loaders import py_loader\nfrom dynaconf.loaders import toml_loader\nfrom dynaconf.loaders import yaml_loader\nfrom dynaconf.utils import deduplicate\nfrom dynaconf.utils import ensure_a_list\nfrom dynaconf.utils.boxing import DynaBox\nfrom dynaconf.utils.files import get_local_filename\nfrom dynaconf.utils.parse_conf import false_values\n\n\ndef default_loader(obj, defaults=None):\n \"\"\"Loads default settings and check if there are overridings\n exported as environment variables\"\"\"\n defaults = defaults or {}\n default_settings_values = {\n key: value\n for key, value in default_settings.__dict__.items() # noqa\n if key.isupper()\n }\n\n all_keys = deduplicate(\n list(defaults.keys()) + list(default_settings_values.keys())\n )\n\n for key in all_keys:\n if not obj.exists(key):\n value = defaults.get(key, default_settings_values.get(key))\n obj.set(key, value)\n\n # start dotenv to get default env vars from there\n # check overrides in env vars\n if obj.get(\"load_dotenv\") is True:\n default_settings.start_dotenv(obj)\n\n # Deal with cases where a custom ENV_SWITCHER_IS_PROVIDED\n # Example: Flask and Django Extensions\n env_switcher = defaults.get(\n \"ENV_SWITCHER_FOR_DYNACONF\", \"ENV_FOR_DYNACONF\"\n )\n\n for key in all_keys:\n if key not in default_settings_values.keys():\n continue\n\n env_value = obj.get_environ(\n env_switcher if key == \"ENV_FOR_DYNACONF\" else key,\n default=\"_not_found\",\n )\n\n if env_value != \"_not_found\":\n obj.set(key, env_value, tomlfy=True)\n\n\ndef settings_loader(\n obj, settings_module=None, env=None, silent=True, key=None, filename=None\n):\n \"\"\"Loads from defined settings module\n\n :param obj: A dynaconf instance\n :param settings_module: A path or a list of paths e.g settings.toml\n :param env: Env to look for data defaults: development\n :param silent: Boolean to raise loading errors\n :param key: Load a single key if provided\n :param filename: optional filename to override the settings_module\n \"\"\"\n if filename is None:\n settings_module = settings_module or obj.settings_module\n if not settings_module: # pragma: no cover\n return\n files = ensure_a_list(settings_module)\n else:\n files = ensure_a_list(filename)\n\n files.extend(ensure_a_list(obj.get(\"SECRETS_FOR_DYNACONF\", None)))\n\n found_files = []\n modules_names = []\n for item in files:\n item = str(item) # Ensure str in case of LocalPath/Path is passed.\n if item.endswith(ct.ALL_EXTENSIONS + (\".py\",)):\n p_root = obj._root_path or (\n os.path.dirname(found_files[0]) if found_files else None\n )\n found = obj.find_file(item, project_root=p_root)\n if found:\n found_files.append(found)\n else:\n # a bare python module name w/o extension\n modules_names.append(item)\n\n enabled_core_loaders = [\n item.upper() for item in obj.get(\"CORE_LOADERS_FOR_DYNACONF\") or []\n ]\n\n # add `.local.` to found_files list to search for local files.\n found_files.extend(\n [\n get_local_filename(item)\n for item in found_files\n if \".local.\" not in str(item)\n ]\n )\n\n for mod_file in modules_names + found_files:\n # can be set to multiple files settings.py,settings.yaml,...\n\n # Cascade all loaders\n loaders = [\n {\"ext\": ct.YAML_EXTENSIONS, \"name\": \"YAML\", \"loader\": yaml_loader},\n {\"ext\": ct.TOML_EXTENSIONS, \"name\": \"TOML\", \"loader\": toml_loader},\n {\"ext\": ct.INI_EXTENSIONS, \"name\": \"INI\", \"loader\": ini_loader},\n {\"ext\": ct.JSON_EXTENSIONS, \"name\": \"JSON\", \"loader\": json_loader},\n ]\n\n for loader in loaders:\n if loader[\"name\"] not in enabled_core_loaders:\n continue\n\n if mod_file.endswith(loader[\"ext\"]):\n loader[\"loader\"].load(\n obj, filename=mod_file, env=env, silent=silent, key=key\n )\n continue\n\n if mod_file.endswith(ct.ALL_EXTENSIONS):\n continue\n\n if \"PY\" not in enabled_core_loaders:\n # pyloader is disabled\n continue\n\n # must be Python file or module\n # load from default defined module settings.py or .secrets.py if exists\n py_loader.load(obj, mod_file, key=key)\n\n # load from the current env e.g: development_settings.py\n env = env or obj.current_env\n if mod_file.endswith(\".py\"):\n if \".secrets.py\" == mod_file:\n tmpl = \".{0}_{1}{2}\"\n mod_file = \"secrets.py\"\n else:\n tmpl = \"{0}_{1}{2}\"\n\n dirname = os.path.dirname(mod_file)\n filename, extension = os.path.splitext(os.path.basename(mod_file))\n new_filename = tmpl.format(env.lower(), filename, extension)\n env_mod_file = os.path.join(dirname, new_filename)\n global_filename = tmpl.format(\"global\", filename, extension)\n global_mod_file = os.path.join(dirname, global_filename)\n else:\n env_mod_file = f\"{env.lower()}_{mod_file}\"\n global_mod_file = f\"global_{mod_file}\"\n\n py_loader.load(\n obj,\n env_mod_file,\n identifier=f\"py_{env.upper()}\",\n silent=True,\n key=key,\n )\n\n # load from global_settings.py\n py_loader.load(\n obj, global_mod_file, identifier=\"py_global\", silent=True, key=key\n )\n\n\ndef enable_external_loaders(obj):\n \"\"\"Enable external service loaders like `VAULT_` and `REDIS_`\n looks forenv variables like `REDIS_ENABLED_FOR_DYNACONF`\n \"\"\"\n for name, loader in ct.EXTERNAL_LOADERS.items():\n enabled = getattr(obj, f\"{name.upper()}_ENABLED_FOR_DYNACONF\", False)\n if (\n enabled\n and enabled not in false_values\n and loader not in obj.LOADERS_FOR_DYNACONF\n ): # noqa\n obj.LOADERS_FOR_DYNACONF.insert(0, loader)\n\n\ndef write(filename, data, env=None):\n \"\"\"Writes `data` to `filename` infers format by file extension.\"\"\"\n loader_name = f\"{filename.rpartition('.')[-1]}_loader\"\n loader = globals().get(loader_name)\n if not loader:\n raise IOError(f\"{loader_name} cannot be found.\")\n\n data = DynaBox(data, box_settings={}).to_dict()\n if loader is not py_loader and env and env not in data:\n data = {env: data}\n\n loader.write(filename, data, merge=False)\n", "path": "dynaconf/loaders/__init__.py"}]} | 2,620 | 154 |
gh_patches_debug_18266 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-949 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E2057 False positive for IAM policy Effect conditionals
*cfn-lint version: (`cfn-lint --version`)*
cfn-lint 0.21.4
*Description of issue.*
cfn-lint reports "E2507 IAM Policy Effect should be Allow or Deny" when an IAM policy statement contains an intrinsic statement, even if that intrinsic returns Allow or Deny eg:
`Effect: !If [ IsUsEast1, 'Allow', 'Deny' ]`
I would guess this probably affects all IAM policy statements and intrinsics, I have observed it for the following policy types:
- AWS::S3::BucketPolicy
- AWS::IAM::ManagedPolicy
- AWS::IAM::User policies
And the following intrinsics
- !If
- !Sub
- !Ref
A simple reproduce example termplate is attached: [E2507-false-positive.yml.txt](https://github.com/aws-cloudformation/cfn-python-lint/files/3269255/E2507-false-positive.yml.txt)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/iam/Policy.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import json
18 from datetime import date
19 import six
20 from cfnlint.helpers import convert_dict
21 from cfnlint import CloudFormationLintRule
22 from cfnlint import RuleMatch
23
24
25 class Policy(CloudFormationLintRule):
26 """Check if IAM Policy JSON is correct"""
27 id = 'E2507'
28 shortdesc = 'Check if IAM Policies are properly configured'
29 description = 'See if there elements inside an IAM policy ' + \
30 'are correct'
31 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html'
32 tags = ['properties', 'iam']
33
34 def __init__(self):
35 """Init"""
36 super(Policy, self).__init__()
37 self.resource_exceptions = {
38 'AWS::ECR::Repository': 'RepositoryPolicyText',
39 }
40 self.resources_and_keys = {
41 'AWS::SNS::TopicPolicy': 'PolicyDocument',
42 'AWS::S3::BucketPolicy': 'PolicyDocument',
43 'AWS::KMS::Key': 'KeyPolicy',
44 'AWS::SQS::QueuePolicy': 'PolicyDocument',
45 'AWS::ECR::Repository': 'RepositoryPolicyText',
46 'AWS::Elasticsearch::Domain': 'AccessPolicies',
47 }
48 self.idp_and_keys = {
49 'AWS::IAM::Group': 'Policies',
50 'AWS::IAM::ManagedPolicy': 'PolicyDocument',
51 'AWS::IAM::Policy': 'PolicyDocument',
52 'AWS::IAM::Role': 'Policies',
53 'AWS::IAM::User': 'Policies',
54 }
55 for resource_type in self.resources_and_keys:
56 self.resource_property_types.append(resource_type)
57 for resource_type in self.idp_and_keys:
58 self.resource_property_types.append(resource_type)
59
60 def check_policy_document(self, value, path, is_identity_policy, resource_exceptions, start_mark, end_mark):
61 """Check policy document"""
62 matches = []
63
64 valid_keys = [
65 'Version',
66 'Id',
67 'Statement',
68 ]
69 valid_versions = ['2012-10-17', '2008-10-17', date(2012, 10, 17), date(2008, 10, 17)]
70
71 if isinstance(value, six.string_types):
72 try:
73 value = convert_dict(json.loads(value), start_mark, end_mark)
74 except Exception as ex: # pylint: disable=W0703,W0612
75 message = 'IAM Policy Documents need to be JSON'
76 matches.append(RuleMatch(path[:], message))
77 return matches
78
79 if not isinstance(value, dict):
80 message = 'IAM Policy Documents needs to be JSON'
81 matches.append(
82 RuleMatch(path[:], message))
83 return matches
84
85 for p_vs, p_p in value.items_safe(path[:], (dict)):
86 for parent_key, parent_value in p_vs.items():
87 if parent_key not in valid_keys:
88 message = 'IAM Policy key %s doesn\'t exist.' % (parent_key)
89 matches.append(
90 RuleMatch(path[:] + p_p + [parent_key], message))
91 if parent_key == 'Version':
92 if parent_value not in valid_versions:
93 message = 'IAM Policy Version needs to be one of (%s).' % (
94 ', '.join(map(str, ['2012-10-17', '2008-10-17'])))
95 matches.append(
96 RuleMatch(p_p + [parent_key], message))
97 if parent_key == 'Statement':
98 if isinstance(parent_value, list):
99 for i_s_v, i_s_p in parent_value.items_safe(p_p + ['Statement'], (dict)):
100 matches.extend(
101 self._check_policy_statement(
102 i_s_p, i_s_v, is_identity_policy, resource_exceptions
103 )
104 )
105 elif isinstance(parent_value, dict):
106 for i_s_v, i_s_p in parent_value.items_safe(p_p + ['Statement']):
107 matches.extend(
108 self._check_policy_statement(
109 i_s_p, i_s_v, is_identity_policy, resource_exceptions
110 )
111 )
112 else:
113 message = 'IAM Policy statement should be of list.'
114 matches.append(
115 RuleMatch(p_p + [parent_key], message))
116 return matches
117
118 def _check_policy_statement(self, branch, statement, is_identity_policy, resource_exceptions):
119 """Check statements"""
120 matches = []
121 statement_valid_keys = [
122 'Effect',
123 'Principal',
124 'NotPrincipal',
125 'Action',
126 'NotAction',
127 'Resource',
128 'NotResource',
129 'Condition',
130 'Sid',
131 ]
132
133 for key, _ in statement.items():
134 if key not in statement_valid_keys:
135 message = 'IAM Policy statement key %s isn\'t valid' % (key)
136 matches.append(
137 RuleMatch(branch[:] + [key], message))
138 if 'Effect' not in statement:
139 message = 'IAM Policy statement missing Effect'
140 matches.append(
141 RuleMatch(branch[:], message))
142 else:
143 effect = statement.get('Effect')
144 if effect not in ['Allow', 'Deny']:
145 message = 'IAM Policy Effect should be Allow or Deny'
146 matches.append(
147 RuleMatch(branch[:] + ['Effect'], message))
148 if 'Action' not in statement and 'NotAction' not in statement:
149 message = 'IAM Policy statement missing Action or NotAction'
150 matches.append(
151 RuleMatch(branch[:], message))
152 if is_identity_policy:
153 if 'Principal' in statement or 'NotPrincipal' in statement:
154 message = 'IAM Resource Policy statement shouldn\'t have Principal or NotPrincipal'
155 matches.append(
156 RuleMatch(branch[:], message))
157 else:
158 if 'Principal' not in statement and 'NotPrincipal' not in statement:
159 message = 'IAM Resource Policy statement should have Principal or NotPrincipal'
160 matches.append(
161 RuleMatch(branch[:] + ['Principal'], message))
162 if not resource_exceptions:
163 if 'Resource' not in statement and 'NotResource' not in statement:
164 message = 'IAM Policy statement missing Resource or NotResource'
165 matches.append(
166 RuleMatch(branch[:], message))
167
168 return(matches)
169
170 def match_resource_properties(self, properties, resourcetype, path, cfn):
171 """Check CloudFormation Properties"""
172 matches = []
173
174 is_identity_policy = True
175 if resourcetype in self.resources_and_keys:
176 is_identity_policy = False
177
178 key = None
179 if resourcetype in self.resources_and_keys:
180 key = self.resources_and_keys.get(resourcetype)
181 else:
182 key = self.idp_and_keys.get(resourcetype)
183
184 if not key:
185 # Key isn't defined return nothing
186 return matches
187
188 resource_exceptions = False
189 if key == self.resource_exceptions.get(resourcetype):
190 resource_exceptions = True
191
192 other_keys = []
193 for key, value in self.resources_and_keys.items():
194 if value != 'Policies':
195 other_keys.append(key)
196 for key, value in self.idp_and_keys.items():
197 if value != 'Policies':
198 other_keys.append(key)
199
200 for key, value in properties.items():
201 if key == 'Policies' and isinstance(value, list):
202 for index, policy in enumerate(properties.get(key, [])):
203 matches.extend(
204 cfn.check_value(
205 obj=policy, key='PolicyDocument',
206 path=path[:] + ['Policies', index],
207 check_value=self.check_policy_document,
208 is_identity_policy=is_identity_policy,
209 resource_exceptions=resource_exceptions,
210 start_mark=key.start_mark, end_mark=key.end_mark,
211 ))
212 elif key in ['KeyPolicy', 'PolicyDocument', 'RepositoryPolicyText', 'AccessPolicies']:
213 matches.extend(
214 cfn.check_value(
215 obj=properties, key=key,
216 path=path[:],
217 check_value=self.check_policy_document,
218 is_identity_policy=is_identity_policy,
219 resource_exceptions=resource_exceptions,
220 start_mark=key.start_mark, end_mark=key.end_mark,
221 ))
222
223 return matches
224
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/iam/Policy.py b/src/cfnlint/rules/resources/iam/Policy.py
--- a/src/cfnlint/rules/resources/iam/Policy.py
+++ b/src/cfnlint/rules/resources/iam/Policy.py
@@ -140,11 +140,12 @@
matches.append(
RuleMatch(branch[:], message))
else:
- effect = statement.get('Effect')
- if effect not in ['Allow', 'Deny']:
- message = 'IAM Policy Effect should be Allow or Deny'
- matches.append(
- RuleMatch(branch[:] + ['Effect'], message))
+ for effect, effect_path in statement.get_safe('Effect'):
+ if isinstance(effect, six.string_types):
+ if effect not in ['Allow', 'Deny']:
+ message = 'IAM Policy Effect should be Allow or Deny'
+ matches.append(
+ RuleMatch(branch[:] + effect_path, message))
if 'Action' not in statement and 'NotAction' not in statement:
message = 'IAM Policy statement missing Action or NotAction'
matches.append(
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/iam/Policy.py b/src/cfnlint/rules/resources/iam/Policy.py\n--- a/src/cfnlint/rules/resources/iam/Policy.py\n+++ b/src/cfnlint/rules/resources/iam/Policy.py\n@@ -140,11 +140,12 @@\n matches.append(\n RuleMatch(branch[:], message))\n else:\n- effect = statement.get('Effect')\n- if effect not in ['Allow', 'Deny']:\n- message = 'IAM Policy Effect should be Allow or Deny'\n- matches.append(\n- RuleMatch(branch[:] + ['Effect'], message))\n+ for effect, effect_path in statement.get_safe('Effect'):\n+ if isinstance(effect, six.string_types):\n+ if effect not in ['Allow', 'Deny']:\n+ message = 'IAM Policy Effect should be Allow or Deny'\n+ matches.append(\n+ RuleMatch(branch[:] + effect_path, message))\n if 'Action' not in statement and 'NotAction' not in statement:\n message = 'IAM Policy statement missing Action or NotAction'\n matches.append(\n", "issue": "E2057 False positive for IAM policy Effect conditionals\n*cfn-lint version: (`cfn-lint --version`)*\r\ncfn-lint 0.21.4\r\n\r\n*Description of issue.*\r\ncfn-lint reports \"E2507 IAM Policy Effect should be Allow or Deny\" when an IAM policy statement contains an intrinsic statement, even if that intrinsic returns Allow or Deny eg:\r\n\r\n`Effect: !If [ IsUsEast1, 'Allow', 'Deny' ]`\r\n\r\nI would guess this probably affects all IAM policy statements and intrinsics, I have observed it for the following policy types:\r\n- AWS::S3::BucketPolicy\r\n- AWS::IAM::ManagedPolicy\r\n- AWS::IAM::User policies\r\n\r\nAnd the following intrinsics\r\n- !If\r\n- !Sub\r\n- !Ref\r\n\r\nA simple reproduce example termplate is attached: [E2507-false-positive.yml.txt](https://github.com/aws-cloudformation/cfn-python-lint/files/3269255/E2507-false-positive.yml.txt)\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport json\nfrom datetime import date\nimport six\nfrom cfnlint.helpers import convert_dict\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Policy(CloudFormationLintRule):\n \"\"\"Check if IAM Policy JSON is correct\"\"\"\n id = 'E2507'\n shortdesc = 'Check if IAM Policies are properly configured'\n description = 'See if there elements inside an IAM policy ' + \\\n 'are correct'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html'\n tags = ['properties', 'iam']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(Policy, self).__init__()\n self.resource_exceptions = {\n 'AWS::ECR::Repository': 'RepositoryPolicyText',\n }\n self.resources_and_keys = {\n 'AWS::SNS::TopicPolicy': 'PolicyDocument',\n 'AWS::S3::BucketPolicy': 'PolicyDocument',\n 'AWS::KMS::Key': 'KeyPolicy',\n 'AWS::SQS::QueuePolicy': 'PolicyDocument',\n 'AWS::ECR::Repository': 'RepositoryPolicyText',\n 'AWS::Elasticsearch::Domain': 'AccessPolicies',\n }\n self.idp_and_keys = {\n 'AWS::IAM::Group': 'Policies',\n 'AWS::IAM::ManagedPolicy': 'PolicyDocument',\n 'AWS::IAM::Policy': 'PolicyDocument',\n 'AWS::IAM::Role': 'Policies',\n 'AWS::IAM::User': 'Policies',\n }\n for resource_type in self.resources_and_keys:\n self.resource_property_types.append(resource_type)\n for resource_type in self.idp_and_keys:\n self.resource_property_types.append(resource_type)\n\n def check_policy_document(self, value, path, is_identity_policy, resource_exceptions, start_mark, end_mark):\n \"\"\"Check policy document\"\"\"\n matches = []\n\n valid_keys = [\n 'Version',\n 'Id',\n 'Statement',\n ]\n valid_versions = ['2012-10-17', '2008-10-17', date(2012, 10, 17), date(2008, 10, 17)]\n\n if isinstance(value, six.string_types):\n try:\n value = convert_dict(json.loads(value), start_mark, end_mark)\n except Exception as ex: # pylint: disable=W0703,W0612\n message = 'IAM Policy Documents need to be JSON'\n matches.append(RuleMatch(path[:], message))\n return matches\n\n if not isinstance(value, dict):\n message = 'IAM Policy Documents needs to be JSON'\n matches.append(\n RuleMatch(path[:], message))\n return matches\n\n for p_vs, p_p in value.items_safe(path[:], (dict)):\n for parent_key, parent_value in p_vs.items():\n if parent_key not in valid_keys:\n message = 'IAM Policy key %s doesn\\'t exist.' % (parent_key)\n matches.append(\n RuleMatch(path[:] + p_p + [parent_key], message))\n if parent_key == 'Version':\n if parent_value not in valid_versions:\n message = 'IAM Policy Version needs to be one of (%s).' % (\n ', '.join(map(str, ['2012-10-17', '2008-10-17'])))\n matches.append(\n RuleMatch(p_p + [parent_key], message))\n if parent_key == 'Statement':\n if isinstance(parent_value, list):\n for i_s_v, i_s_p in parent_value.items_safe(p_p + ['Statement'], (dict)):\n matches.extend(\n self._check_policy_statement(\n i_s_p, i_s_v, is_identity_policy, resource_exceptions\n )\n )\n elif isinstance(parent_value, dict):\n for i_s_v, i_s_p in parent_value.items_safe(p_p + ['Statement']):\n matches.extend(\n self._check_policy_statement(\n i_s_p, i_s_v, is_identity_policy, resource_exceptions\n )\n )\n else:\n message = 'IAM Policy statement should be of list.'\n matches.append(\n RuleMatch(p_p + [parent_key], message))\n return matches\n\n def _check_policy_statement(self, branch, statement, is_identity_policy, resource_exceptions):\n \"\"\"Check statements\"\"\"\n matches = []\n statement_valid_keys = [\n 'Effect',\n 'Principal',\n 'NotPrincipal',\n 'Action',\n 'NotAction',\n 'Resource',\n 'NotResource',\n 'Condition',\n 'Sid',\n ]\n\n for key, _ in statement.items():\n if key not in statement_valid_keys:\n message = 'IAM Policy statement key %s isn\\'t valid' % (key)\n matches.append(\n RuleMatch(branch[:] + [key], message))\n if 'Effect' not in statement:\n message = 'IAM Policy statement missing Effect'\n matches.append(\n RuleMatch(branch[:], message))\n else:\n effect = statement.get('Effect')\n if effect not in ['Allow', 'Deny']:\n message = 'IAM Policy Effect should be Allow or Deny'\n matches.append(\n RuleMatch(branch[:] + ['Effect'], message))\n if 'Action' not in statement and 'NotAction' not in statement:\n message = 'IAM Policy statement missing Action or NotAction'\n matches.append(\n RuleMatch(branch[:], message))\n if is_identity_policy:\n if 'Principal' in statement or 'NotPrincipal' in statement:\n message = 'IAM Resource Policy statement shouldn\\'t have Principal or NotPrincipal'\n matches.append(\n RuleMatch(branch[:], message))\n else:\n if 'Principal' not in statement and 'NotPrincipal' not in statement:\n message = 'IAM Resource Policy statement should have Principal or NotPrincipal'\n matches.append(\n RuleMatch(branch[:] + ['Principal'], message))\n if not resource_exceptions:\n if 'Resource' not in statement and 'NotResource' not in statement:\n message = 'IAM Policy statement missing Resource or NotResource'\n matches.append(\n RuleMatch(branch[:], message))\n\n return(matches)\n\n def match_resource_properties(self, properties, resourcetype, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n is_identity_policy = True\n if resourcetype in self.resources_and_keys:\n is_identity_policy = False\n\n key = None\n if resourcetype in self.resources_and_keys:\n key = self.resources_and_keys.get(resourcetype)\n else:\n key = self.idp_and_keys.get(resourcetype)\n\n if not key:\n # Key isn't defined return nothing\n return matches\n\n resource_exceptions = False\n if key == self.resource_exceptions.get(resourcetype):\n resource_exceptions = True\n\n other_keys = []\n for key, value in self.resources_and_keys.items():\n if value != 'Policies':\n other_keys.append(key)\n for key, value in self.idp_and_keys.items():\n if value != 'Policies':\n other_keys.append(key)\n\n for key, value in properties.items():\n if key == 'Policies' and isinstance(value, list):\n for index, policy in enumerate(properties.get(key, [])):\n matches.extend(\n cfn.check_value(\n obj=policy, key='PolicyDocument',\n path=path[:] + ['Policies', index],\n check_value=self.check_policy_document,\n is_identity_policy=is_identity_policy,\n resource_exceptions=resource_exceptions,\n start_mark=key.start_mark, end_mark=key.end_mark,\n ))\n elif key in ['KeyPolicy', 'PolicyDocument', 'RepositoryPolicyText', 'AccessPolicies']:\n matches.extend(\n cfn.check_value(\n obj=properties, key=key,\n path=path[:],\n check_value=self.check_policy_document,\n is_identity_policy=is_identity_policy,\n resource_exceptions=resource_exceptions,\n start_mark=key.start_mark, end_mark=key.end_mark,\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/iam/Policy.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport json\nfrom datetime import date\nimport six\nfrom cfnlint.helpers import convert_dict\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Policy(CloudFormationLintRule):\n \"\"\"Check if IAM Policy JSON is correct\"\"\"\n id = 'E2507'\n shortdesc = 'Check if IAM Policies are properly configured'\n description = 'See if there elements inside an IAM policy ' + \\\n 'are correct'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html'\n tags = ['properties', 'iam']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(Policy, self).__init__()\n self.resource_exceptions = {\n 'AWS::ECR::Repository': 'RepositoryPolicyText',\n }\n self.resources_and_keys = {\n 'AWS::SNS::TopicPolicy': 'PolicyDocument',\n 'AWS::S3::BucketPolicy': 'PolicyDocument',\n 'AWS::KMS::Key': 'KeyPolicy',\n 'AWS::SQS::QueuePolicy': 'PolicyDocument',\n 'AWS::ECR::Repository': 'RepositoryPolicyText',\n 'AWS::Elasticsearch::Domain': 'AccessPolicies',\n }\n self.idp_and_keys = {\n 'AWS::IAM::Group': 'Policies',\n 'AWS::IAM::ManagedPolicy': 'PolicyDocument',\n 'AWS::IAM::Policy': 'PolicyDocument',\n 'AWS::IAM::Role': 'Policies',\n 'AWS::IAM::User': 'Policies',\n }\n for resource_type in self.resources_and_keys:\n self.resource_property_types.append(resource_type)\n for resource_type in self.idp_and_keys:\n self.resource_property_types.append(resource_type)\n\n def check_policy_document(self, value, path, is_identity_policy, resource_exceptions, start_mark, end_mark):\n \"\"\"Check policy document\"\"\"\n matches = []\n\n valid_keys = [\n 'Version',\n 'Id',\n 'Statement',\n ]\n valid_versions = ['2012-10-17', '2008-10-17', date(2012, 10, 17), date(2008, 10, 17)]\n\n if isinstance(value, six.string_types):\n try:\n value = convert_dict(json.loads(value), start_mark, end_mark)\n except Exception as ex: # pylint: disable=W0703,W0612\n message = 'IAM Policy Documents need to be JSON'\n matches.append(RuleMatch(path[:], message))\n return matches\n\n if not isinstance(value, dict):\n message = 'IAM Policy Documents needs to be JSON'\n matches.append(\n RuleMatch(path[:], message))\n return matches\n\n for p_vs, p_p in value.items_safe(path[:], (dict)):\n for parent_key, parent_value in p_vs.items():\n if parent_key not in valid_keys:\n message = 'IAM Policy key %s doesn\\'t exist.' % (parent_key)\n matches.append(\n RuleMatch(path[:] + p_p + [parent_key], message))\n if parent_key == 'Version':\n if parent_value not in valid_versions:\n message = 'IAM Policy Version needs to be one of (%s).' % (\n ', '.join(map(str, ['2012-10-17', '2008-10-17'])))\n matches.append(\n RuleMatch(p_p + [parent_key], message))\n if parent_key == 'Statement':\n if isinstance(parent_value, list):\n for i_s_v, i_s_p in parent_value.items_safe(p_p + ['Statement'], (dict)):\n matches.extend(\n self._check_policy_statement(\n i_s_p, i_s_v, is_identity_policy, resource_exceptions\n )\n )\n elif isinstance(parent_value, dict):\n for i_s_v, i_s_p in parent_value.items_safe(p_p + ['Statement']):\n matches.extend(\n self._check_policy_statement(\n i_s_p, i_s_v, is_identity_policy, resource_exceptions\n )\n )\n else:\n message = 'IAM Policy statement should be of list.'\n matches.append(\n RuleMatch(p_p + [parent_key], message))\n return matches\n\n def _check_policy_statement(self, branch, statement, is_identity_policy, resource_exceptions):\n \"\"\"Check statements\"\"\"\n matches = []\n statement_valid_keys = [\n 'Effect',\n 'Principal',\n 'NotPrincipal',\n 'Action',\n 'NotAction',\n 'Resource',\n 'NotResource',\n 'Condition',\n 'Sid',\n ]\n\n for key, _ in statement.items():\n if key not in statement_valid_keys:\n message = 'IAM Policy statement key %s isn\\'t valid' % (key)\n matches.append(\n RuleMatch(branch[:] + [key], message))\n if 'Effect' not in statement:\n message = 'IAM Policy statement missing Effect'\n matches.append(\n RuleMatch(branch[:], message))\n else:\n for effect, effect_path in statement.get_safe('Effect'):\n if isinstance(effect, six.string_types):\n if effect not in ['Allow', 'Deny']:\n message = 'IAM Policy Effect should be Allow or Deny'\n matches.append(\n RuleMatch(branch[:] + effect_path, message))\n if 'Action' not in statement and 'NotAction' not in statement:\n message = 'IAM Policy statement missing Action or NotAction'\n matches.append(\n RuleMatch(branch[:], message))\n if is_identity_policy:\n if 'Principal' in statement or 'NotPrincipal' in statement:\n message = 'IAM Resource Policy statement shouldn\\'t have Principal or NotPrincipal'\n matches.append(\n RuleMatch(branch[:], message))\n else:\n if 'Principal' not in statement and 'NotPrincipal' not in statement:\n message = 'IAM Resource Policy statement should have Principal or NotPrincipal'\n matches.append(\n RuleMatch(branch[:] + ['Principal'], message))\n if not resource_exceptions:\n if 'Resource' not in statement and 'NotResource' not in statement:\n message = 'IAM Policy statement missing Resource or NotResource'\n matches.append(\n RuleMatch(branch[:], message))\n\n return(matches)\n\n def match_resource_properties(self, properties, resourcetype, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n is_identity_policy = True\n if resourcetype in self.resources_and_keys:\n is_identity_policy = False\n\n key = None\n if resourcetype in self.resources_and_keys:\n key = self.resources_and_keys.get(resourcetype)\n else:\n key = self.idp_and_keys.get(resourcetype)\n\n if not key:\n # Key isn't defined return nothing\n return matches\n\n resource_exceptions = False\n if key == self.resource_exceptions.get(resourcetype):\n resource_exceptions = True\n\n other_keys = []\n for key, value in self.resources_and_keys.items():\n if value != 'Policies':\n other_keys.append(key)\n for key, value in self.idp_and_keys.items():\n if value != 'Policies':\n other_keys.append(key)\n\n for key, value in properties.items():\n if key == 'Policies' and isinstance(value, list):\n for index, policy in enumerate(properties.get(key, [])):\n matches.extend(\n cfn.check_value(\n obj=policy, key='PolicyDocument',\n path=path[:] + ['Policies', index],\n check_value=self.check_policy_document,\n is_identity_policy=is_identity_policy,\n resource_exceptions=resource_exceptions,\n start_mark=key.start_mark, end_mark=key.end_mark,\n ))\n elif key in ['KeyPolicy', 'PolicyDocument', 'RepositoryPolicyText', 'AccessPolicies']:\n matches.extend(\n cfn.check_value(\n obj=properties, key=key,\n path=path[:],\n check_value=self.check_policy_document,\n is_identity_policy=is_identity_policy,\n resource_exceptions=resource_exceptions,\n start_mark=key.start_mark, end_mark=key.end_mark,\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/iam/Policy.py"}]} | 3,014 | 244 |
gh_patches_debug_15358 | rasdani/github-patches | git_diff | mkdocs__mkdocs-2852 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
What is the expected behavior of `readme.md`?
According to the docs, [Writing-your-docs:Index-pages](https://www.mkdocs.org/user-guide/writing-your-docs/#index-pages), `README.md` will render into `index.html`. There is no mention of `readme.md`. The implementation reflects this behavior: https://github.com/mkdocs/mkdocs/blob/dc35569ade5e101c8c48b78bbed8708dd0b1b49a/mkdocs/structure/files.py#L151
But the check in [get_files()](https://github.com/mkdocs/mkdocs/blob/dc35569ade5e101c8c48b78bbed8708dd0b1b49a/mkdocs/structure/files.py#L225) ignores casing:
https://github.com/mkdocs/mkdocs/blob/dc35569ade5e101c8c48b78bbed8708dd0b1b49a/mkdocs/structure/files.py#L246-L247
I believe it should be:
```python
if filename == 'README.md' and 'index.md' in filenames:
log.warning(f"Both index.md and README.md found. Skipping README.md from {source_dir}")
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/structure/files.py`
Content:
```
1 import fnmatch
2 import os
3 import logging
4 from urllib.parse import quote as urlquote
5
6 from mkdocs import utils
7
8
9 log = logging.getLogger(__name__)
10
11
12 class Files:
13 """ A collection of File objects. """
14 def __init__(self, files):
15 self._files = files
16
17 def __iter__(self):
18 return iter(self._files)
19
20 def __len__(self):
21 return len(self._files)
22
23 def __contains__(self, path):
24 return path in self.src_paths
25
26 @property
27 def src_paths(self):
28 return {file.src_path: file for file in self._files}
29
30 def get_file_from_path(self, path):
31 """ Return a File instance with File.src_path equal to path. """
32 return self.src_paths.get(os.path.normpath(path))
33
34 def append(self, file):
35 """ Append file to Files collection. """
36 self._files.append(file)
37
38 def remove(self, file):
39 """ Remove file from Files collection. """
40 self._files.remove(file)
41
42 def copy_static_files(self, dirty=False):
43 """ Copy static files from source to destination. """
44 for file in self:
45 if not file.is_documentation_page():
46 file.copy_file(dirty)
47
48 def documentation_pages(self):
49 """ Return iterable of all Markdown page file objects. """
50 return [file for file in self if file.is_documentation_page()]
51
52 def static_pages(self):
53 """ Return iterable of all static page file objects. """
54 return [file for file in self if file.is_static_page()]
55
56 def media_files(self):
57 """ Return iterable of all file objects which are not documentation or static pages. """
58 return [file for file in self if file.is_media_file()]
59
60 def javascript_files(self):
61 """ Return iterable of all javascript file objects. """
62 return [file for file in self if file.is_javascript()]
63
64 def css_files(self):
65 """ Return iterable of all CSS file objects. """
66 return [file for file in self if file.is_css()]
67
68 def add_files_from_theme(self, env, config):
69 """ Retrieve static files from Jinja environment and add to collection. """
70 def filter(name):
71 # '.*' filters dot files/dirs at root level whereas '*/.*' filters nested levels
72 patterns = ['.*', '*/.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']
73 # Exclude translation files
74 patterns.append("locales/*")
75 patterns.extend(f'*{x}' for x in utils.markdown_extensions)
76 patterns.extend(config['theme'].static_templates)
77 for pattern in patterns:
78 if fnmatch.fnmatch(name.lower(), pattern):
79 return False
80 return True
81 for path in env.list_templates(filter_func=filter):
82 # Theme files do not override docs_dir files
83 path = os.path.normpath(path)
84 if path not in self:
85 for dir in config['theme'].dirs:
86 # Find the first theme dir which contains path
87 if os.path.isfile(os.path.join(dir, path)):
88 self.append(File(path, dir, config['site_dir'], config['use_directory_urls']))
89 break
90
91
92 class File:
93 """
94 A MkDocs File object.
95
96 Points to the source and destination locations of a file.
97
98 The `path` argument must be a path that exists relative to `src_dir`.
99
100 The `src_dir` and `dest_dir` must be absolute paths on the local file system.
101
102 The `use_directory_urls` argument controls how destination paths are generated. If `False`, a Markdown file is
103 mapped to an HTML file of the same name (the file extension is changed to `.html`). If True, a Markdown file is
104 mapped to an HTML index file (`index.html`) nested in a directory using the "name" of the file in `path`. The
105 `use_directory_urls` argument has no effect on non-Markdown files.
106
107 File objects have the following properties, which are Unicode strings:
108
109 File.src_path
110 The pure path of the source file relative to the source directory.
111
112 File.abs_src_path
113 The absolute concrete path of the source file.
114
115 File.dest_path
116 The pure path of the destination file relative to the destination directory.
117
118 File.abs_dest_path
119 The absolute concrete path of the destination file.
120
121 File.url
122 The url of the destination file relative to the destination directory as a string.
123 """
124 def __init__(self, path, src_dir, dest_dir, use_directory_urls):
125 self.page = None
126 self.src_path = os.path.normpath(path)
127 self.abs_src_path = os.path.normpath(os.path.join(src_dir, self.src_path))
128 self.name = self._get_stem()
129 self.dest_path = self._get_dest_path(use_directory_urls)
130 self.abs_dest_path = os.path.normpath(os.path.join(dest_dir, self.dest_path))
131 self.url = self._get_url(use_directory_urls)
132
133 def __eq__(self, other):
134 return (
135 isinstance(other, self.__class__) and
136 self.src_path == other.src_path and
137 self.abs_src_path == other.abs_src_path and
138 self.url == other.url
139 )
140
141 def __repr__(self):
142 return (
143 f"File(src_path='{self.src_path}', dest_path='{self.dest_path}',"
144 f" name='{self.name}', url='{self.url}')"
145 )
146
147 def _get_stem(self):
148 """ Return the name of the file without it's extension. """
149 filename = os.path.basename(self.src_path)
150 stem, ext = os.path.splitext(filename)
151 return 'index' if stem in ('index', 'README') else stem
152
153 def _get_dest_path(self, use_directory_urls):
154 """ Return destination path based on source path. """
155 if self.is_documentation_page():
156 parent, filename = os.path.split(self.src_path)
157 if not use_directory_urls or self.name == 'index':
158 # index.md or README.md => index.html
159 # foo.md => foo.html
160 return os.path.join(parent, self.name + '.html')
161 else:
162 # foo.md => foo/index.html
163 return os.path.join(parent, self.name, 'index.html')
164 return self.src_path
165
166 def _get_url(self, use_directory_urls):
167 """ Return url based in destination path. """
168 url = self.dest_path.replace(os.path.sep, '/')
169 dirname, filename = os.path.split(url)
170 if use_directory_urls and filename == 'index.html':
171 if dirname == '':
172 url = '.'
173 else:
174 url = dirname + '/'
175 return urlquote(url)
176
177 def url_relative_to(self, other):
178 """ Return url for file relative to other file. """
179 return utils.get_relative_url(self.url, other.url if isinstance(other, File) else other)
180
181 def copy_file(self, dirty=False):
182 """ Copy source file to destination, ensuring parent directories exist. """
183 if dirty and not self.is_modified():
184 log.debug(f"Skip copying unmodified file: '{self.src_path}'")
185 else:
186 log.debug(f"Copying media file: '{self.src_path}'")
187 utils.copy_file(self.abs_src_path, self.abs_dest_path)
188
189 def is_modified(self):
190 if os.path.isfile(self.abs_dest_path):
191 return os.path.getmtime(self.abs_dest_path) < os.path.getmtime(self.abs_src_path)
192 return True
193
194 def is_documentation_page(self):
195 """ Return True if file is a Markdown page. """
196 return os.path.splitext(self.src_path)[1] in utils.markdown_extensions
197
198 def is_static_page(self):
199 """ Return True if file is a static page (html, xml, json). """
200 return os.path.splitext(self.src_path)[1] in (
201 '.html',
202 '.htm',
203 '.xml',
204 '.json',
205 )
206
207 def is_media_file(self):
208 """ Return True if file is not a documentation or static page. """
209 return not (self.is_documentation_page() or self.is_static_page())
210
211 def is_javascript(self):
212 """ Return True if file is a JavaScript file. """
213 return os.path.splitext(self.src_path)[1] in (
214 '.js',
215 '.javascript',
216 )
217
218 def is_css(self):
219 """ Return True if file is a CSS file. """
220 return os.path.splitext(self.src_path)[1] in (
221 '.css',
222 )
223
224
225 def get_files(config):
226 """ Walk the `docs_dir` and return a Files collection. """
227 files = []
228 exclude = ['.*', '/templates']
229
230 for source_dir, dirnames, filenames in os.walk(config['docs_dir'], followlinks=True):
231 relative_dir = os.path.relpath(source_dir, config['docs_dir'])
232
233 for dirname in list(dirnames):
234 path = os.path.normpath(os.path.join(relative_dir, dirname))
235 # Skip any excluded directories
236 if _filter_paths(basename=dirname, path=path, is_dir=True, exclude=exclude):
237 dirnames.remove(dirname)
238 dirnames.sort()
239
240 for filename in _sort_files(filenames):
241 path = os.path.normpath(os.path.join(relative_dir, filename))
242 # Skip any excluded files
243 if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude):
244 continue
245 # Skip README.md if an index file also exists in dir
246 if filename.lower() == 'readme.md' and 'index.md' in filenames:
247 log.warning(f"Both index.md and readme.md found. Skipping readme.md from {source_dir}")
248 continue
249 files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls']))
250
251 return Files(files)
252
253
254 def _sort_files(filenames):
255 """ Always sort `index` or `README` as first filename in list. """
256
257 def key(f):
258 if os.path.splitext(f)[0] in ['index', 'README']:
259 return (0,)
260 return (1, f)
261
262 return sorted(filenames, key=key)
263
264
265 def _filter_paths(basename, path, is_dir, exclude):
266 """ .gitignore style file filtering. """
267 for item in exclude:
268 # Items ending in '/' apply only to directories.
269 if item.endswith('/') and not is_dir:
270 continue
271 # Items starting with '/' apply to the whole path.
272 # In any other cases just the basename is used.
273 match = path if item.startswith('/') else basename
274 if fnmatch.fnmatch(match, item.strip('/')):
275 return True
276 return False
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/structure/files.py b/mkdocs/structure/files.py
--- a/mkdocs/structure/files.py
+++ b/mkdocs/structure/files.py
@@ -243,8 +243,8 @@
if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude):
continue
# Skip README.md if an index file also exists in dir
- if filename.lower() == 'readme.md' and 'index.md' in filenames:
- log.warning(f"Both index.md and readme.md found. Skipping readme.md from {source_dir}")
+ if filename == 'README.md' and 'index.md' in filenames:
+ log.warning(f"Both index.md and README.md found. Skipping README.md from {source_dir}")
continue
files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls']))
| {"golden_diff": "diff --git a/mkdocs/structure/files.py b/mkdocs/structure/files.py\n--- a/mkdocs/structure/files.py\n+++ b/mkdocs/structure/files.py\n@@ -243,8 +243,8 @@\n if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude):\n continue\n # Skip README.md if an index file also exists in dir\n- if filename.lower() == 'readme.md' and 'index.md' in filenames:\n- log.warning(f\"Both index.md and readme.md found. Skipping readme.md from {source_dir}\")\n+ if filename == 'README.md' and 'index.md' in filenames:\n+ log.warning(f\"Both index.md and README.md found. Skipping README.md from {source_dir}\")\n continue\n files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls']))\n", "issue": "What is the expected behavior of `readme.md`?\nAccording to the docs, [Writing-your-docs:Index-pages](https://www.mkdocs.org/user-guide/writing-your-docs/#index-pages), `README.md` will render into `index.html`. There is no mention of `readme.md`. The implementation reflects this behavior: https://github.com/mkdocs/mkdocs/blob/dc35569ade5e101c8c48b78bbed8708dd0b1b49a/mkdocs/structure/files.py#L151\r\n\r\nBut the check in [get_files()](https://github.com/mkdocs/mkdocs/blob/dc35569ade5e101c8c48b78bbed8708dd0b1b49a/mkdocs/structure/files.py#L225) ignores casing:\r\nhttps://github.com/mkdocs/mkdocs/blob/dc35569ade5e101c8c48b78bbed8708dd0b1b49a/mkdocs/structure/files.py#L246-L247\r\n\r\nI believe it should be:\r\n```python\r\nif filename == 'README.md' and 'index.md' in filenames:\r\n log.warning(f\"Both index.md and README.md found. Skipping README.md from {source_dir}\")\r\n```\n", "before_files": [{"content": "import fnmatch\nimport os\nimport logging\nfrom urllib.parse import quote as urlquote\n\nfrom mkdocs import utils\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Files:\n \"\"\" A collection of File objects. \"\"\"\n def __init__(self, files):\n self._files = files\n\n def __iter__(self):\n return iter(self._files)\n\n def __len__(self):\n return len(self._files)\n\n def __contains__(self, path):\n return path in self.src_paths\n\n @property\n def src_paths(self):\n return {file.src_path: file for file in self._files}\n\n def get_file_from_path(self, path):\n \"\"\" Return a File instance with File.src_path equal to path. \"\"\"\n return self.src_paths.get(os.path.normpath(path))\n\n def append(self, file):\n \"\"\" Append file to Files collection. \"\"\"\n self._files.append(file)\n\n def remove(self, file):\n \"\"\" Remove file from Files collection. \"\"\"\n self._files.remove(file)\n\n def copy_static_files(self, dirty=False):\n \"\"\" Copy static files from source to destination. \"\"\"\n for file in self:\n if not file.is_documentation_page():\n file.copy_file(dirty)\n\n def documentation_pages(self):\n \"\"\" Return iterable of all Markdown page file objects. \"\"\"\n return [file for file in self if file.is_documentation_page()]\n\n def static_pages(self):\n \"\"\" Return iterable of all static page file objects. \"\"\"\n return [file for file in self if file.is_static_page()]\n\n def media_files(self):\n \"\"\" Return iterable of all file objects which are not documentation or static pages. \"\"\"\n return [file for file in self if file.is_media_file()]\n\n def javascript_files(self):\n \"\"\" Return iterable of all javascript file objects. \"\"\"\n return [file for file in self if file.is_javascript()]\n\n def css_files(self):\n \"\"\" Return iterable of all CSS file objects. \"\"\"\n return [file for file in self if file.is_css()]\n\n def add_files_from_theme(self, env, config):\n \"\"\" Retrieve static files from Jinja environment and add to collection. \"\"\"\n def filter(name):\n # '.*' filters dot files/dirs at root level whereas '*/.*' filters nested levels\n patterns = ['.*', '*/.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']\n # Exclude translation files\n patterns.append(\"locales/*\")\n patterns.extend(f'*{x}' for x in utils.markdown_extensions)\n patterns.extend(config['theme'].static_templates)\n for pattern in patterns:\n if fnmatch.fnmatch(name.lower(), pattern):\n return False\n return True\n for path in env.list_templates(filter_func=filter):\n # Theme files do not override docs_dir files\n path = os.path.normpath(path)\n if path not in self:\n for dir in config['theme'].dirs:\n # Find the first theme dir which contains path\n if os.path.isfile(os.path.join(dir, path)):\n self.append(File(path, dir, config['site_dir'], config['use_directory_urls']))\n break\n\n\nclass File:\n \"\"\"\n A MkDocs File object.\n\n Points to the source and destination locations of a file.\n\n The `path` argument must be a path that exists relative to `src_dir`.\n\n The `src_dir` and `dest_dir` must be absolute paths on the local file system.\n\n The `use_directory_urls` argument controls how destination paths are generated. If `False`, a Markdown file is\n mapped to an HTML file of the same name (the file extension is changed to `.html`). If True, a Markdown file is\n mapped to an HTML index file (`index.html`) nested in a directory using the \"name\" of the file in `path`. The\n `use_directory_urls` argument has no effect on non-Markdown files.\n\n File objects have the following properties, which are Unicode strings:\n\n File.src_path\n The pure path of the source file relative to the source directory.\n\n File.abs_src_path\n The absolute concrete path of the source file.\n\n File.dest_path\n The pure path of the destination file relative to the destination directory.\n\n File.abs_dest_path\n The absolute concrete path of the destination file.\n\n File.url\n The url of the destination file relative to the destination directory as a string.\n \"\"\"\n def __init__(self, path, src_dir, dest_dir, use_directory_urls):\n self.page = None\n self.src_path = os.path.normpath(path)\n self.abs_src_path = os.path.normpath(os.path.join(src_dir, self.src_path))\n self.name = self._get_stem()\n self.dest_path = self._get_dest_path(use_directory_urls)\n self.abs_dest_path = os.path.normpath(os.path.join(dest_dir, self.dest_path))\n self.url = self._get_url(use_directory_urls)\n\n def __eq__(self, other):\n return (\n isinstance(other, self.__class__) and\n self.src_path == other.src_path and\n self.abs_src_path == other.abs_src_path and\n self.url == other.url\n )\n\n def __repr__(self):\n return (\n f\"File(src_path='{self.src_path}', dest_path='{self.dest_path}',\"\n f\" name='{self.name}', url='{self.url}')\"\n )\n\n def _get_stem(self):\n \"\"\" Return the name of the file without it's extension. \"\"\"\n filename = os.path.basename(self.src_path)\n stem, ext = os.path.splitext(filename)\n return 'index' if stem in ('index', 'README') else stem\n\n def _get_dest_path(self, use_directory_urls):\n \"\"\" Return destination path based on source path. \"\"\"\n if self.is_documentation_page():\n parent, filename = os.path.split(self.src_path)\n if not use_directory_urls or self.name == 'index':\n # index.md or README.md => index.html\n # foo.md => foo.html\n return os.path.join(parent, self.name + '.html')\n else:\n # foo.md => foo/index.html\n return os.path.join(parent, self.name, 'index.html')\n return self.src_path\n\n def _get_url(self, use_directory_urls):\n \"\"\" Return url based in destination path. \"\"\"\n url = self.dest_path.replace(os.path.sep, '/')\n dirname, filename = os.path.split(url)\n if use_directory_urls and filename == 'index.html':\n if dirname == '':\n url = '.'\n else:\n url = dirname + '/'\n return urlquote(url)\n\n def url_relative_to(self, other):\n \"\"\" Return url for file relative to other file. \"\"\"\n return utils.get_relative_url(self.url, other.url if isinstance(other, File) else other)\n\n def copy_file(self, dirty=False):\n \"\"\" Copy source file to destination, ensuring parent directories exist. \"\"\"\n if dirty and not self.is_modified():\n log.debug(f\"Skip copying unmodified file: '{self.src_path}'\")\n else:\n log.debug(f\"Copying media file: '{self.src_path}'\")\n utils.copy_file(self.abs_src_path, self.abs_dest_path)\n\n def is_modified(self):\n if os.path.isfile(self.abs_dest_path):\n return os.path.getmtime(self.abs_dest_path) < os.path.getmtime(self.abs_src_path)\n return True\n\n def is_documentation_page(self):\n \"\"\" Return True if file is a Markdown page. \"\"\"\n return os.path.splitext(self.src_path)[1] in utils.markdown_extensions\n\n def is_static_page(self):\n \"\"\" Return True if file is a static page (html, xml, json). \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.html',\n '.htm',\n '.xml',\n '.json',\n )\n\n def is_media_file(self):\n \"\"\" Return True if file is not a documentation or static page. \"\"\"\n return not (self.is_documentation_page() or self.is_static_page())\n\n def is_javascript(self):\n \"\"\" Return True if file is a JavaScript file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.js',\n '.javascript',\n )\n\n def is_css(self):\n \"\"\" Return True if file is a CSS file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.css',\n )\n\n\ndef get_files(config):\n \"\"\" Walk the `docs_dir` and return a Files collection. \"\"\"\n files = []\n exclude = ['.*', '/templates']\n\n for source_dir, dirnames, filenames in os.walk(config['docs_dir'], followlinks=True):\n relative_dir = os.path.relpath(source_dir, config['docs_dir'])\n\n for dirname in list(dirnames):\n path = os.path.normpath(os.path.join(relative_dir, dirname))\n # Skip any excluded directories\n if _filter_paths(basename=dirname, path=path, is_dir=True, exclude=exclude):\n dirnames.remove(dirname)\n dirnames.sort()\n\n for filename in _sort_files(filenames):\n path = os.path.normpath(os.path.join(relative_dir, filename))\n # Skip any excluded files\n if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude):\n continue\n # Skip README.md if an index file also exists in dir\n if filename.lower() == 'readme.md' and 'index.md' in filenames:\n log.warning(f\"Both index.md and readme.md found. Skipping readme.md from {source_dir}\")\n continue\n files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls']))\n\n return Files(files)\n\n\ndef _sort_files(filenames):\n \"\"\" Always sort `index` or `README` as first filename in list. \"\"\"\n\n def key(f):\n if os.path.splitext(f)[0] in ['index', 'README']:\n return (0,)\n return (1, f)\n\n return sorted(filenames, key=key)\n\n\ndef _filter_paths(basename, path, is_dir, exclude):\n \"\"\" .gitignore style file filtering. \"\"\"\n for item in exclude:\n # Items ending in '/' apply only to directories.\n if item.endswith('/') and not is_dir:\n continue\n # Items starting with '/' apply to the whole path.\n # In any other cases just the basename is used.\n match = path if item.startswith('/') else basename\n if fnmatch.fnmatch(match, item.strip('/')):\n return True\n return False\n", "path": "mkdocs/structure/files.py"}], "after_files": [{"content": "import fnmatch\nimport os\nimport logging\nfrom urllib.parse import quote as urlquote\n\nfrom mkdocs import utils\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Files:\n \"\"\" A collection of File objects. \"\"\"\n def __init__(self, files):\n self._files = files\n\n def __iter__(self):\n return iter(self._files)\n\n def __len__(self):\n return len(self._files)\n\n def __contains__(self, path):\n return path in self.src_paths\n\n @property\n def src_paths(self):\n return {file.src_path: file for file in self._files}\n\n def get_file_from_path(self, path):\n \"\"\" Return a File instance with File.src_path equal to path. \"\"\"\n return self.src_paths.get(os.path.normpath(path))\n\n def append(self, file):\n \"\"\" Append file to Files collection. \"\"\"\n self._files.append(file)\n\n def remove(self, file):\n \"\"\" Remove file from Files collection. \"\"\"\n self._files.remove(file)\n\n def copy_static_files(self, dirty=False):\n \"\"\" Copy static files from source to destination. \"\"\"\n for file in self:\n if not file.is_documentation_page():\n file.copy_file(dirty)\n\n def documentation_pages(self):\n \"\"\" Return iterable of all Markdown page file objects. \"\"\"\n return [file for file in self if file.is_documentation_page()]\n\n def static_pages(self):\n \"\"\" Return iterable of all static page file objects. \"\"\"\n return [file for file in self if file.is_static_page()]\n\n def media_files(self):\n \"\"\" Return iterable of all file objects which are not documentation or static pages. \"\"\"\n return [file for file in self if file.is_media_file()]\n\n def javascript_files(self):\n \"\"\" Return iterable of all javascript file objects. \"\"\"\n return [file for file in self if file.is_javascript()]\n\n def css_files(self):\n \"\"\" Return iterable of all CSS file objects. \"\"\"\n return [file for file in self if file.is_css()]\n\n def add_files_from_theme(self, env, config):\n \"\"\" Retrieve static files from Jinja environment and add to collection. \"\"\"\n def filter(name):\n # '.*' filters dot files/dirs at root level whereas '*/.*' filters nested levels\n patterns = ['.*', '*/.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']\n # Exclude translation files\n patterns.append(\"locales/*\")\n patterns.extend(f'*{x}' for x in utils.markdown_extensions)\n patterns.extend(config['theme'].static_templates)\n for pattern in patterns:\n if fnmatch.fnmatch(name.lower(), pattern):\n return False\n return True\n for path in env.list_templates(filter_func=filter):\n # Theme files do not override docs_dir files\n path = os.path.normpath(path)\n if path not in self:\n for dir in config['theme'].dirs:\n # Find the first theme dir which contains path\n if os.path.isfile(os.path.join(dir, path)):\n self.append(File(path, dir, config['site_dir'], config['use_directory_urls']))\n break\n\n\nclass File:\n \"\"\"\n A MkDocs File object.\n\n Points to the source and destination locations of a file.\n\n The `path` argument must be a path that exists relative to `src_dir`.\n\n The `src_dir` and `dest_dir` must be absolute paths on the local file system.\n\n The `use_directory_urls` argument controls how destination paths are generated. If `False`, a Markdown file is\n mapped to an HTML file of the same name (the file extension is changed to `.html`). If True, a Markdown file is\n mapped to an HTML index file (`index.html`) nested in a directory using the \"name\" of the file in `path`. The\n `use_directory_urls` argument has no effect on non-Markdown files.\n\n File objects have the following properties, which are Unicode strings:\n\n File.src_path\n The pure path of the source file relative to the source directory.\n\n File.abs_src_path\n The absolute concrete path of the source file.\n\n File.dest_path\n The pure path of the destination file relative to the destination directory.\n\n File.abs_dest_path\n The absolute concrete path of the destination file.\n\n File.url\n The url of the destination file relative to the destination directory as a string.\n \"\"\"\n def __init__(self, path, src_dir, dest_dir, use_directory_urls):\n self.page = None\n self.src_path = os.path.normpath(path)\n self.abs_src_path = os.path.normpath(os.path.join(src_dir, self.src_path))\n self.name = self._get_stem()\n self.dest_path = self._get_dest_path(use_directory_urls)\n self.abs_dest_path = os.path.normpath(os.path.join(dest_dir, self.dest_path))\n self.url = self._get_url(use_directory_urls)\n\n def __eq__(self, other):\n return (\n isinstance(other, self.__class__) and\n self.src_path == other.src_path and\n self.abs_src_path == other.abs_src_path and\n self.url == other.url\n )\n\n def __repr__(self):\n return (\n f\"File(src_path='{self.src_path}', dest_path='{self.dest_path}',\"\n f\" name='{self.name}', url='{self.url}')\"\n )\n\n def _get_stem(self):\n \"\"\" Return the name of the file without it's extension. \"\"\"\n filename = os.path.basename(self.src_path)\n stem, ext = os.path.splitext(filename)\n return 'index' if stem in ('index', 'README') else stem\n\n def _get_dest_path(self, use_directory_urls):\n \"\"\" Return destination path based on source path. \"\"\"\n if self.is_documentation_page():\n parent, filename = os.path.split(self.src_path)\n if not use_directory_urls or self.name == 'index':\n # index.md or README.md => index.html\n # foo.md => foo.html\n return os.path.join(parent, self.name + '.html')\n else:\n # foo.md => foo/index.html\n return os.path.join(parent, self.name, 'index.html')\n return self.src_path\n\n def _get_url(self, use_directory_urls):\n \"\"\" Return url based in destination path. \"\"\"\n url = self.dest_path.replace(os.path.sep, '/')\n dirname, filename = os.path.split(url)\n if use_directory_urls and filename == 'index.html':\n if dirname == '':\n url = '.'\n else:\n url = dirname + '/'\n return urlquote(url)\n\n def url_relative_to(self, other):\n \"\"\" Return url for file relative to other file. \"\"\"\n return utils.get_relative_url(self.url, other.url if isinstance(other, File) else other)\n\n def copy_file(self, dirty=False):\n \"\"\" Copy source file to destination, ensuring parent directories exist. \"\"\"\n if dirty and not self.is_modified():\n log.debug(f\"Skip copying unmodified file: '{self.src_path}'\")\n else:\n log.debug(f\"Copying media file: '{self.src_path}'\")\n utils.copy_file(self.abs_src_path, self.abs_dest_path)\n\n def is_modified(self):\n if os.path.isfile(self.abs_dest_path):\n return os.path.getmtime(self.abs_dest_path) < os.path.getmtime(self.abs_src_path)\n return True\n\n def is_documentation_page(self):\n \"\"\" Return True if file is a Markdown page. \"\"\"\n return os.path.splitext(self.src_path)[1] in utils.markdown_extensions\n\n def is_static_page(self):\n \"\"\" Return True if file is a static page (html, xml, json). \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.html',\n '.htm',\n '.xml',\n '.json',\n )\n\n def is_media_file(self):\n \"\"\" Return True if file is not a documentation or static page. \"\"\"\n return not (self.is_documentation_page() or self.is_static_page())\n\n def is_javascript(self):\n \"\"\" Return True if file is a JavaScript file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.js',\n '.javascript',\n )\n\n def is_css(self):\n \"\"\" Return True if file is a CSS file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.css',\n )\n\n\ndef get_files(config):\n \"\"\" Walk the `docs_dir` and return a Files collection. \"\"\"\n files = []\n exclude = ['.*', '/templates']\n\n for source_dir, dirnames, filenames in os.walk(config['docs_dir'], followlinks=True):\n relative_dir = os.path.relpath(source_dir, config['docs_dir'])\n\n for dirname in list(dirnames):\n path = os.path.normpath(os.path.join(relative_dir, dirname))\n # Skip any excluded directories\n if _filter_paths(basename=dirname, path=path, is_dir=True, exclude=exclude):\n dirnames.remove(dirname)\n dirnames.sort()\n\n for filename in _sort_files(filenames):\n path = os.path.normpath(os.path.join(relative_dir, filename))\n # Skip any excluded files\n if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude):\n continue\n # Skip README.md if an index file also exists in dir\n if filename == 'README.md' and 'index.md' in filenames:\n log.warning(f\"Both index.md and README.md found. Skipping README.md from {source_dir}\")\n continue\n files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls']))\n\n return Files(files)\n\n\ndef _sort_files(filenames):\n \"\"\" Always sort `index` or `README` as first filename in list. \"\"\"\n\n def key(f):\n if os.path.splitext(f)[0] in ['index', 'README']:\n return (0,)\n return (1, f)\n\n return sorted(filenames, key=key)\n\n\ndef _filter_paths(basename, path, is_dir, exclude):\n \"\"\" .gitignore style file filtering. \"\"\"\n for item in exclude:\n # Items ending in '/' apply only to directories.\n if item.endswith('/') and not is_dir:\n continue\n # Items starting with '/' apply to the whole path.\n # In any other cases just the basename is used.\n match = path if item.startswith('/') else basename\n if fnmatch.fnmatch(match, item.strip('/')):\n return True\n return False\n", "path": "mkdocs/structure/files.py"}]} | 3,563 | 198 |
gh_patches_debug_35587 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1345 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update in code http links to current click version help
We have some click related links in code docstring and config.
As we changed click version in last release it will be good to update links too.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/prompt.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Functions for prompting the user for project info."""
4
5 from collections import OrderedDict
6 import json
7
8 import click
9 import six
10
11 from jinja2.exceptions import UndefinedError
12
13 from cookiecutter.exceptions import UndefinedVariableInTemplate
14 from cookiecutter.environment import StrictEnvironment
15
16
17 def read_user_variable(var_name, default_value):
18 """Prompt user for variable and return the entered value or given default.
19
20 :param str var_name: Variable of the context to query the user
21 :param default_value: Value that will be returned if no input happens
22 """
23 # Please see http://click.pocoo.org/4/api/#click.prompt
24 return click.prompt(var_name, default=default_value)
25
26
27 def read_user_yes_no(question, default_value):
28 """Prompt the user to reply with 'yes' or 'no' (or equivalent values).
29
30 Note:
31 Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'
32
33 :param str question: Question to the user
34 :param default_value: Value that will be returned if no input happens
35 """
36 # Please see http://click.pocoo.org/4/api/#click.prompt
37 return click.prompt(
38 question,
39 default=default_value,
40 type=click.BOOL
41 )
42
43
44 def read_repo_password(question):
45 """Prompt the user to enter a password.
46
47 :param str question: Question to the user
48 """
49 # Please see http://click.pocoo.org/4/api/#click.prompt
50 return click.prompt(question, hide_input=True)
51
52
53 def read_user_choice(var_name, options):
54 """Prompt the user to choose from several options for the given variable.
55
56 The first item will be returned if no input happens.
57
58 :param str var_name: Variable as specified in the context
59 :param list options: Sequence of options that are available to select from
60 :return: Exactly one item of ``options`` that has been chosen by the user
61 """
62 # Please see http://click.pocoo.org/4/api/#click.prompt
63 if not isinstance(options, list):
64 raise TypeError
65
66 if not options:
67 raise ValueError
68
69 choice_map = OrderedDict(
70 (u'{}'.format(i), value) for i, value in enumerate(options, 1)
71 )
72 choices = choice_map.keys()
73 default = u'1'
74
75 choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]
76 prompt = u'\n'.join((
77 u'Select {}:'.format(var_name),
78 u'\n'.join(choice_lines),
79 u'Choose from {}'.format(u', '.join(choices))
80 ))
81
82 user_choice = click.prompt(
83 prompt, type=click.Choice(choices), default=default, show_choices=False
84 )
85 return choice_map[user_choice]
86
87
88 def process_json(user_value):
89 """Load user-supplied value as a JSON dict.
90
91 :param str user_value: User-supplied value to load as a JSON dict
92 """
93 try:
94 user_dict = json.loads(
95 user_value,
96 object_pairs_hook=OrderedDict,
97 )
98 except Exception:
99 # Leave it up to click to ask the user again
100 raise click.UsageError('Unable to decode to JSON.')
101
102 if not isinstance(user_dict, dict):
103 # Leave it up to click to ask the user again
104 raise click.UsageError('Requires JSON dict.')
105
106 return user_dict
107
108
109 def read_user_dict(var_name, default_value):
110 """Prompt the user to provide a dictionary of data.
111
112 :param str var_name: Variable as specified in the context
113 :param default_value: Value that will be returned if no input is provided
114 :return: A Python dictionary to use in the context.
115 """
116 # Please see http://click.pocoo.org/4/api/#click.prompt
117 if not isinstance(default_value, dict):
118 raise TypeError
119
120 default_display = 'default'
121
122 user_value = click.prompt(
123 var_name,
124 default=default_display,
125 type=click.STRING,
126 value_proc=process_json,
127 )
128
129 if user_value == default_display:
130 # Return the given default w/o any processing
131 return default_value
132 return user_value
133
134
135 def render_variable(env, raw, cookiecutter_dict):
136 """Render the next variable to be displayed in the user prompt.
137
138 Inside the prompting taken from the cookiecutter.json file, this renders
139 the next variable. For example, if a project_name is "Peanut Butter
140 Cookie", the repo_name could be be rendered with:
141
142 `{{ cookiecutter.project_name.replace(" ", "_") }}`.
143
144 This is then presented to the user as the default.
145
146 :param Environment env: A Jinja2 Environment object.
147 :param str raw: The next value to be prompted for by the user.
148 :param dict cookiecutter_dict: The current context as it's gradually
149 being populated with variables.
150 :return: The rendered value for the default variable.
151 """
152 if raw is None:
153 return None
154 elif isinstance(raw, dict):
155 return {
156 render_variable(env, k, cookiecutter_dict):
157 render_variable(env, v, cookiecutter_dict)
158 for k, v in raw.items()
159 }
160 elif isinstance(raw, list):
161 return [
162 render_variable(env, v, cookiecutter_dict)
163 for v in raw
164 ]
165 elif not isinstance(raw, six.string_types):
166 raw = str(raw)
167
168 template = env.from_string(raw)
169
170 rendered_template = template.render(cookiecutter=cookiecutter_dict)
171 return rendered_template
172
173
174 def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):
175 """Prompt user with a set of options to choose from.
176
177 Each of the possible choices is rendered beforehand.
178 """
179 rendered_options = [
180 render_variable(env, raw, cookiecutter_dict) for raw in options
181 ]
182
183 if no_input:
184 return rendered_options[0]
185 return read_user_choice(key, rendered_options)
186
187
188 def prompt_for_config(context, no_input=False):
189 """Prompt user to enter a new config.
190
191 :param dict context: Source for field names and sample values.
192 :param no_input: Prompt the user at command line for manual configuration?
193 """
194 cookiecutter_dict = OrderedDict([])
195 env = StrictEnvironment(context=context)
196
197 # First pass: Handle simple and raw variables, plus choices.
198 # These must be done first because the dictionaries keys and
199 # values might refer to them.
200 for key, raw in context[u'cookiecutter'].items():
201 if key.startswith(u'_'):
202 cookiecutter_dict[key] = raw
203 continue
204
205 try:
206 if isinstance(raw, list):
207 # We are dealing with a choice variable
208 val = prompt_choice_for_config(
209 cookiecutter_dict, env, key, raw, no_input
210 )
211 cookiecutter_dict[key] = val
212 elif not isinstance(raw, dict):
213 # We are dealing with a regular variable
214 val = render_variable(env, raw, cookiecutter_dict)
215
216 if not no_input:
217 val = read_user_variable(key, val)
218
219 cookiecutter_dict[key] = val
220 except UndefinedError as err:
221 msg = "Unable to render variable '{}'".format(key)
222 raise UndefinedVariableInTemplate(msg, err, context)
223
224 # Second pass; handle the dictionaries.
225 for key, raw in context[u'cookiecutter'].items():
226
227 try:
228 if isinstance(raw, dict):
229 # We are dealing with a dict variable
230 val = render_variable(env, raw, cookiecutter_dict)
231
232 if not no_input:
233 val = read_user_dict(key, val)
234
235 cookiecutter_dict[key] = val
236 except UndefinedError as err:
237 msg = "Unable to render variable '{}'".format(key)
238 raise UndefinedVariableInTemplate(msg, err, context)
239
240 return cookiecutter_dict
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py
--- a/cookiecutter/prompt.py
+++ b/cookiecutter/prompt.py
@@ -20,7 +20,7 @@
:param str var_name: Variable of the context to query the user
:param default_value: Value that will be returned if no input happens
"""
- # Please see http://click.pocoo.org/4/api/#click.prompt
+ # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
return click.prompt(var_name, default=default_value)
@@ -33,7 +33,7 @@
:param str question: Question to the user
:param default_value: Value that will be returned if no input happens
"""
- # Please see http://click.pocoo.org/4/api/#click.prompt
+ # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
return click.prompt(
question,
default=default_value,
@@ -46,7 +46,7 @@
:param str question: Question to the user
"""
- # Please see http://click.pocoo.org/4/api/#click.prompt
+ # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
return click.prompt(question, hide_input=True)
@@ -59,7 +59,7 @@
:param list options: Sequence of options that are available to select from
:return: Exactly one item of ``options`` that has been chosen by the user
"""
- # Please see http://click.pocoo.org/4/api/#click.prompt
+ # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
if not isinstance(options, list):
raise TypeError
@@ -113,7 +113,7 @@
:param default_value: Value that will be returned if no input is provided
:return: A Python dictionary to use in the context.
"""
- # Please see http://click.pocoo.org/4/api/#click.prompt
+ # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
if not isinstance(default_value, dict):
raise TypeError
| {"golden_diff": "diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py\n--- a/cookiecutter/prompt.py\n+++ b/cookiecutter/prompt.py\n@@ -20,7 +20,7 @@\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n- # Please see http://click.pocoo.org/4/api/#click.prompt\n+ # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n \n \n@@ -33,7 +33,7 @@\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n- # Please see http://click.pocoo.org/4/api/#click.prompt\n+ # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n@@ -46,7 +46,7 @@\n \n :param str question: Question to the user\n \"\"\"\n- # Please see http://click.pocoo.org/4/api/#click.prompt\n+ # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n return click.prompt(question, hide_input=True)\n \n \n@@ -59,7 +59,7 @@\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n- # Please see http://click.pocoo.org/4/api/#click.prompt\n+ # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n \n@@ -113,7 +113,7 @@\n :param default_value: Value that will be returned if no input is provided\n :return: A Python dictionary to use in the context.\n \"\"\"\n- # Please see http://click.pocoo.org/4/api/#click.prompt\n+ # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n if not isinstance(default_value, dict):\n raise TypeError\n", "issue": "Update in code http links to current click version help\nWe have some click related links in code docstring and config.\r\nAs we changed click version in last release it will be good to update links too.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Functions for prompting the user for project info.\"\"\"\n\nfrom collections import OrderedDict\nimport json\n\nimport click\nimport six\n\nfrom jinja2.exceptions import UndefinedError\n\nfrom cookiecutter.exceptions import UndefinedVariableInTemplate\nfrom cookiecutter.environment import StrictEnvironment\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt user for variable and return the entered value or given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n type=click.BOOL\n )\n\n\ndef read_repo_password(question):\n \"\"\"Prompt the user to enter a password.\n\n :param str question: Question to the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(question, hide_input=True)\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n (u'{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = u'1'\n\n choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]\n prompt = u'\\n'.join((\n u'Select {}:'.format(var_name),\n u'\\n'.join(choice_lines),\n u'Choose from {}'.format(u', '.join(choices))\n ))\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default, show_choices=False\n )\n return choice_map[user_choice]\n\n\ndef process_json(user_value):\n \"\"\"Load user-supplied value as a JSON dict.\n\n :param str user_value: User-supplied value to load as a JSON dict\n \"\"\"\n try:\n user_dict = json.loads(\n user_value,\n object_pairs_hook=OrderedDict,\n )\n except Exception:\n # Leave it up to click to ask the user again\n raise click.UsageError('Unable to decode to JSON.')\n\n if not isinstance(user_dict, dict):\n # Leave it up to click to ask the user again\n raise click.UsageError('Requires JSON dict.')\n\n return user_dict\n\n\ndef read_user_dict(var_name, default_value):\n \"\"\"Prompt the user to provide a dictionary of data.\n\n :param str var_name: Variable as specified in the context\n :param default_value: Value that will be returned if no input is provided\n :return: A Python dictionary to use in the context.\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(default_value, dict):\n raise TypeError\n\n default_display = 'default'\n\n user_value = click.prompt(\n var_name,\n default=default_display,\n type=click.STRING,\n value_proc=process_json,\n )\n\n if user_value == default_display:\n # Return the given default w/o any processing\n return default_value\n return user_value\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n \"\"\"Render the next variable to be displayed in the user prompt.\n\n Inside the prompting taken from the cookiecutter.json file, this renders\n the next variable. For example, if a project_name is \"Peanut Butter\n Cookie\", the repo_name could be be rendered with:\n\n `{{ cookiecutter.project_name.replace(\" \", \"_\") }}`.\n\n This is then presented to the user as the default.\n\n :param Environment env: A Jinja2 Environment object.\n :param str raw: The next value to be prompted for by the user.\n :param dict cookiecutter_dict: The current context as it's gradually\n being populated with variables.\n :return: The rendered value for the default variable.\n \"\"\"\n if raw is None:\n return None\n elif isinstance(raw, dict):\n return {\n render_variable(env, k, cookiecutter_dict):\n render_variable(env, v, cookiecutter_dict)\n for k, v in raw.items()\n }\n elif isinstance(raw, list):\n return [\n render_variable(env, v, cookiecutter_dict)\n for v in raw\n ]\n elif not isinstance(raw, six.string_types):\n raw = str(raw)\n\n template = env.from_string(raw)\n\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt user with a set of options to choose from.\n\n Each of the possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"Prompt user to enter a new config.\n\n :param dict context: Source for field names and sample values.\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = OrderedDict([])\n env = StrictEnvironment(context=context)\n\n # First pass: Handle simple and raw variables, plus choices.\n # These must be done first because the dictionaries keys and\n # values might refer to them.\n for key, raw in context[u'cookiecutter'].items():\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n continue\n\n try:\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n cookiecutter_dict[key] = val\n elif not isinstance(raw, dict):\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n\n cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n # Second pass; handle the dictionaries.\n for key, raw in context[u'cookiecutter'].items():\n\n try:\n if isinstance(raw, dict):\n # We are dealing with a dict variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_dict(key, val)\n\n cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n return cookiecutter_dict\n", "path": "cookiecutter/prompt.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Functions for prompting the user for project info.\"\"\"\n\nfrom collections import OrderedDict\nimport json\n\nimport click\nimport six\n\nfrom jinja2.exceptions import UndefinedError\n\nfrom cookiecutter.exceptions import UndefinedVariableInTemplate\nfrom cookiecutter.environment import StrictEnvironment\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt user for variable and return the entered value or given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n type=click.BOOL\n )\n\n\ndef read_repo_password(question):\n \"\"\"Prompt the user to enter a password.\n\n :param str question: Question to the user\n \"\"\"\n # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n return click.prompt(question, hide_input=True)\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n (u'{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = u'1'\n\n choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]\n prompt = u'\\n'.join((\n u'Select {}:'.format(var_name),\n u'\\n'.join(choice_lines),\n u'Choose from {}'.format(u', '.join(choices))\n ))\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default, show_choices=False\n )\n return choice_map[user_choice]\n\n\ndef process_json(user_value):\n \"\"\"Load user-supplied value as a JSON dict.\n\n :param str user_value: User-supplied value to load as a JSON dict\n \"\"\"\n try:\n user_dict = json.loads(\n user_value,\n object_pairs_hook=OrderedDict,\n )\n except Exception:\n # Leave it up to click to ask the user again\n raise click.UsageError('Unable to decode to JSON.')\n\n if not isinstance(user_dict, dict):\n # Leave it up to click to ask the user again\n raise click.UsageError('Requires JSON dict.')\n\n return user_dict\n\n\ndef read_user_dict(var_name, default_value):\n \"\"\"Prompt the user to provide a dictionary of data.\n\n :param str var_name: Variable as specified in the context\n :param default_value: Value that will be returned if no input is provided\n :return: A Python dictionary to use in the context.\n \"\"\"\n # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n if not isinstance(default_value, dict):\n raise TypeError\n\n default_display = 'default'\n\n user_value = click.prompt(\n var_name,\n default=default_display,\n type=click.STRING,\n value_proc=process_json,\n )\n\n if user_value == default_display:\n # Return the given default w/o any processing\n return default_value\n return user_value\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n \"\"\"Render the next variable to be displayed in the user prompt.\n\n Inside the prompting taken from the cookiecutter.json file, this renders\n the next variable. For example, if a project_name is \"Peanut Butter\n Cookie\", the repo_name could be be rendered with:\n\n `{{ cookiecutter.project_name.replace(\" \", \"_\") }}`.\n\n This is then presented to the user as the default.\n\n :param Environment env: A Jinja2 Environment object.\n :param str raw: The next value to be prompted for by the user.\n :param dict cookiecutter_dict: The current context as it's gradually\n being populated with variables.\n :return: The rendered value for the default variable.\n \"\"\"\n if raw is None:\n return None\n elif isinstance(raw, dict):\n return {\n render_variable(env, k, cookiecutter_dict):\n render_variable(env, v, cookiecutter_dict)\n for k, v in raw.items()\n }\n elif isinstance(raw, list):\n return [\n render_variable(env, v, cookiecutter_dict)\n for v in raw\n ]\n elif not isinstance(raw, six.string_types):\n raw = str(raw)\n\n template = env.from_string(raw)\n\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt user with a set of options to choose from.\n\n Each of the possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"Prompt user to enter a new config.\n\n :param dict context: Source for field names and sample values.\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = OrderedDict([])\n env = StrictEnvironment(context=context)\n\n # First pass: Handle simple and raw variables, plus choices.\n # These must be done first because the dictionaries keys and\n # values might refer to them.\n for key, raw in context[u'cookiecutter'].items():\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n continue\n\n try:\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n cookiecutter_dict[key] = val\n elif not isinstance(raw, dict):\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n\n cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n # Second pass; handle the dictionaries.\n for key, raw in context[u'cookiecutter'].items():\n\n try:\n if isinstance(raw, dict):\n # We are dealing with a dict variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_dict(key, val)\n\n cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n return cookiecutter_dict\n", "path": "cookiecutter/prompt.py"}]} | 2,663 | 514 |
gh_patches_debug_16209 | rasdani/github-patches | git_diff | networkx__networkx-818 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mistake in computation of average neighbor degree for directed networks (migrated from Trac #639)
Original ticket https://networkx.lanl.gov/trac/ticket/639
Reported 2011-09-27 by Oleguer Sagarra <oleguer.sagarra, assigned to @hagberg.
There is a problem in the algorithm to compute the negihbor degree (and average neighbor degree) for directed networks. The thing is that as networkx is based on a dict of dicts one may follow the neighbors of a node "forward" but not "backwards", meaning that with the functions such as G.in_degree(G[n]) one lists the incoming degree of the neighbors of the node "n", neighbors meaning destination of directed edges starting at node n. For all this, the normalization factor used is wrong. Additionally, it has some bugs with division over zero values.
These features need to be better redefined for various reasons:
a) With directed networks one can compute 4 types of features divided into two subgroups:
a.1) For the neighbors of a node n (by neighbors meaning ending points of directed edges starting at node n) one can compute the Incoming our outgoing degree of a node.
a.2) For all predecessors of node n (meaning all sources of edges finishing in node n) one can compute the very same thing.
b) Additionally, accounting for the average connectivity (or average neighbor degree as a function of degree of a node) things get messy, because then one may compute 8 different features divided into two groups, providing different information:
b.1) Consider all the neighbors (defined earlier) of node n. Consider all the nodes n in the net. For each node n compute the (in-degree, out-degree) of their neighbors, then average over all n nodes of (in-degree, out-degree) 'k'.
b.2) The same for predecessors of degree n.
The current code can be adapted to do so, using the function G.reverse() to overcome the difficulty in quickly computing the predecessors of a node while keeping the code structure. But the documentation need to be rewritten and the normalization of the averages redone.
Reference : Serrano, M.A., Maguitman, A., Boguna, M., Fortunato, S. & Vespignani, A. Decoding the structure of the WWW: facts versus sampling biases. Main 10 (2005).at http://arxiv.org/abs/cs/0511035
Simple example showing what I mean:
Case 1: Error in normalization division by 0
In [1]: import networkx as nx
In [2]: G=nx.DiGraph()
In [3]: G.add_edges_from(([1,2],[1,3],[1,4]))
## In [6]: nx.average_in_degree_connectivity(G)
ZeroDivisionError Traceback (most recent call last)
/Users/ugalss/<ipython-input-6-e2e455d286f8> in <module>()
----> 1 nx.average_in_degree_connectivity(G)
/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/networkx-1.5-py2.7.egg/networkx/algorithms/neighbor_degree.pyc in average_in_degree_connectivity(G, nodes, weighted)
205 raise nx.NetworkXError("Not defined for undirected graphs.")
206 degree_method = G.in_degree
--> 207 return _avg_deg_conn(G, degree_method, nodes=nodes, weighted=weighted)
208 average_in_degree_connectivity.__doc__=average_degree_connectivity.**doc**
209
/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/networkx-1.5-py2.7.egg/networkx/algorithms/neighbor_degree.pyc in _avg_deg_conn(G, degree_method, nodes, weighted)
141 dc[k]=avg
142 if avg > 0:
--> 143 dc[k]/=dnorm[k]
144 return dc
145
ZeroDivisionError: float division by zero
Case 2: Wrong computation of neighbor degree (incoming):
In [7]: G.add_edges_from(([2,3],[3,4],[4,2]))
In [8]: nx.average_neighbor_in_degree(G)
Out[8]: {1: 6.0, 2: 1.0, 3: 1.0, 4: 1.0} # This is wrong. The average in-degree should be 2 for neighbors of node 1.
I attach a new version of neighbor_degree.py with proposed (and commented) changes...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/algorithms/assortativity/connectivity.py`
Content:
```
1 #-*- coding: utf-8 -*-
2 # Copyright (C) 2011 by
3 # Jordi Torrents <[email protected]>
4 # Aric Hagberg <[email protected]>
5 # All rights reserved.
6 # BSD license.
7 from collections import defaultdict
8 import networkx as nx
9 __author__ = """\n""".join(['Jordi Torrents <[email protected]>',
10 'Aric Hagberg ([email protected])'])
11 __all__ = ['average_degree_connectivity',
12 'k_nearest_neighbors']
13
14 def _avg_deg_conn(G, neighbors, source_degree, target_degree,
15 nodes=None, weight=None):
16 # "k nearest neighbors, or neighbor_connectivity
17 dsum = defaultdict(float)
18 dnorm = defaultdict(float)
19 for n,k in source_degree(nodes).items():
20 nbrdeg = target_degree(neighbors(n))
21 if weight is None:
22 s = float(sum(nbrdeg.values()))
23 else: # weight nbr degree by weight of (n,nbr) edge
24 s = float(sum((G[n][nbr].get(weight,1)*d
25 for nbr,d in nbrdeg.items())))
26 dnorm[k] += source_degree(n, weight=weight)
27 dsum[k] += s
28
29 # normalize
30 dc = {}
31 for k,avg in dsum.items():
32 dc[k]=avg
33 norm = dnorm[k]
34 if avg > 0 and norm > 0:
35 dc[k]/=norm
36 return dc
37
38 def average_degree_connectivity(G, source="in+out", target="in+out",
39 nodes=None, weight=None):
40 r"""Compute the average degree connectivity of graph.
41
42 The average degree connectivity is the average nearest neighbor degree of
43 nodes with degree k. For weighted graphs, an analogous measure can
44 be computed using the weighted average neighbors degree defined in
45 [1]_, for a node `i`, as:
46
47 .. math::
48
49 k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j
50
51 where `s_i` is the weighted degree of node `i`,
52 `w_{ij}` is the weight of the edge that links `i` and `j`,
53 and `N(i)` are the neighbors of node `i`.
54
55 Parameters
56 ----------
57 G : NetworkX graph
58
59 source : "in"|"out"|"in+out" (default:"in+out")
60 Directed graphs only. Use "in"- or "out"-degree for source node.
61
62 target : "in"|"out"|"in+out" (default:"in+out"
63 Directed graphs only. Use "in"- or "out"-degree for target node.
64
65 nodes: list or iterable (optional)
66 Compute neighbor connectivity for these nodes. The default is all nodes.
67
68 weight : string or None, optional (default=None)
69 The edge attribute that holds the numerical value used as a weight.
70 If None, then each edge has weight 1.
71
72 Returns
73 -------
74 d: dict
75 A dictionary keyed by degree k with the value of average connectivity.
76
77 Examples
78 --------
79 >>> G=nx.path_graph(4)
80 >>> G.edge[1][2]['weight'] = 3
81 >>> nx.k_nearest_neighbors(G)
82 {1: 2.0, 2: 1.5}
83 >>> nx.k_nearest_neighbors(G, weight='weight')
84 {1: 2.0, 2: 1.75}
85
86 See also
87 --------
88 neighbors_average_degree
89
90 Notes
91 -----
92 This algorithm is sometimes called "k nearest neighbors'.
93
94 References
95 ----------
96 .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani,
97 "The architecture of complex weighted networks".
98 PNAS 101 (11): 3747–3752 (2004).
99 """
100 source_degree = G.degree
101 target_degree = G.degree
102 neighbors = G.neighbors
103 if G.is_directed():
104 direction = {'out':G.out_degree,
105 'in':G.in_degree,
106 'in+out': G.degree}
107 source_degree = direction[source]
108 target_degree = direction[target]
109 if source == 'in':
110 neighbors=G.predecessors
111 elif source == 'out':
112 neighbors=G.successors
113 return _avg_deg_conn(G, neighbors, source_degree, target_degree,
114 nodes=nodes, weight=weight)
115
116 k_nearest_neighbors=average_degree_connectivity
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/networkx/algorithms/assortativity/connectivity.py b/networkx/algorithms/assortativity/connectivity.py
--- a/networkx/algorithms/assortativity/connectivity.py
+++ b/networkx/algorithms/assortativity/connectivity.py
@@ -21,8 +21,15 @@
if weight is None:
s = float(sum(nbrdeg.values()))
else: # weight nbr degree by weight of (n,nbr) edge
- s = float(sum((G[n][nbr].get(weight,1)*d
- for nbr,d in nbrdeg.items())))
+ if neighbors == G.neighbors:
+ s = float(sum((G[n][nbr].get(weight,1)*d
+ for nbr,d in nbrdeg.items())))
+ elif neighbors == G.successors:
+ s = float(sum((G[n][nbr].get(weight,1)*d
+ for nbr,d in nbrdeg.items())))
+ elif neighbors == G.predecessors:
+ s = float(sum((G[nbr][n].get(weight,1)*d
+ for nbr,d in nbrdeg.items())))
dnorm[k] += source_degree(n, weight=weight)
dsum[k] += s
| {"golden_diff": "diff --git a/networkx/algorithms/assortativity/connectivity.py b/networkx/algorithms/assortativity/connectivity.py\n--- a/networkx/algorithms/assortativity/connectivity.py\n+++ b/networkx/algorithms/assortativity/connectivity.py\n@@ -21,8 +21,15 @@\n if weight is None:\n s = float(sum(nbrdeg.values()))\n else: # weight nbr degree by weight of (n,nbr) edge\n- s = float(sum((G[n][nbr].get(weight,1)*d \n- for nbr,d in nbrdeg.items())))\n+ if neighbors == G.neighbors:\n+ s = float(sum((G[n][nbr].get(weight,1)*d \n+ for nbr,d in nbrdeg.items())))\n+ elif neighbors == G.successors:\n+ s = float(sum((G[n][nbr].get(weight,1)*d \n+ for nbr,d in nbrdeg.items())))\n+ elif neighbors == G.predecessors:\n+ s = float(sum((G[nbr][n].get(weight,1)*d \n+ for nbr,d in nbrdeg.items())))\n dnorm[k] += source_degree(n, weight=weight)\n dsum[k] += s\n", "issue": "Mistake in computation of average neighbor degree for directed networks (migrated from Trac #639)\nOriginal ticket https://networkx.lanl.gov/trac/ticket/639\nReported 2011-09-27 by Oleguer Sagarra <oleguer.sagarra, assigned to @hagberg.\n\nThere is a problem in the algorithm to compute the negihbor degree (and average neighbor degree) for directed networks. The thing is that as networkx is based on a dict of dicts one may follow the neighbors of a node \"forward\" but not \"backwards\", meaning that with the functions such as G.in_degree(G[n]) one lists the incoming degree of the neighbors of the node \"n\", neighbors meaning destination of directed edges starting at node n. For all this, the normalization factor used is wrong. Additionally, it has some bugs with division over zero values.\n\nThese features need to be better redefined for various reasons:\n\na) With directed networks one can compute 4 types of features divided into two subgroups:\n a.1) For the neighbors of a node n (by neighbors meaning ending points of directed edges starting at node n) one can compute the Incoming our outgoing degree of a node.\n a.2) For all predecessors of node n (meaning all sources of edges finishing in node n) one can compute the very same thing.\n\nb) Additionally, accounting for the average connectivity (or average neighbor degree as a function of degree of a node) things get messy, because then one may compute 8 different features divided into two groups, providing different information:\n b.1) Consider all the neighbors (defined earlier) of node n. Consider all the nodes n in the net. For each node n compute the (in-degree, out-degree) of their neighbors, then average over all n nodes of (in-degree, out-degree) 'k'.\n b.2) The same for predecessors of degree n.\n\nThe current code can be adapted to do so, using the function G.reverse() to overcome the difficulty in quickly computing the predecessors of a node while keeping the code structure. But the documentation need to be rewritten and the normalization of the averages redone.\n\nReference : Serrano, M.A., Maguitman, A., Boguna, M., Fortunato, S. & Vespignani, A. Decoding the structure of the WWW: facts versus sampling biases. Main 10 (2005).at http://arxiv.org/abs/cs/0511035\n\nSimple example showing what I mean:\nCase 1: Error in normalization division by 0\n\nIn [1]: import networkx as nx\n\nIn [2]: G=nx.DiGraph()\n\nIn [3]: G.add_edges_from(([1,2],[1,3],[1,4]))\n## In [6]: nx.average_in_degree_connectivity(G)\n\nZeroDivisionError Traceback (most recent call last)\n/Users/ugalss/<ipython-input-6-e2e455d286f8> in <module>()\n----> 1 nx.average_in_degree_connectivity(G)\n\n/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/networkx-1.5-py2.7.egg/networkx/algorithms/neighbor_degree.pyc in average_in_degree_connectivity(G, nodes, weighted)\n 205 raise nx.NetworkXError(\"Not defined for undirected graphs.\")\n 206 degree_method = G.in_degree\n--> 207 return _avg_deg_conn(G, degree_method, nodes=nodes, weighted=weighted)\n 208 average_in_degree_connectivity.__doc__=average_degree_connectivity.**doc**\n 209 \n\n/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/networkx-1.5-py2.7.egg/networkx/algorithms/neighbor_degree.pyc in _avg_deg_conn(G, degree_method, nodes, weighted)\n 141 dc[k]=avg\n 142 if avg > 0:\n--> 143 dc[k]/=dnorm[k]\n 144 return dc\n 145 \n\nZeroDivisionError: float division by zero\n\nCase 2: Wrong computation of neighbor degree (incoming):\n\nIn [7]: G.add_edges_from(([2,3],[3,4],[4,2]))\nIn [8]: nx.average_neighbor_in_degree(G)\nOut[8]: {1: 6.0, 2: 1.0, 3: 1.0, 4: 1.0} # This is wrong. The average in-degree should be 2 for neighbors of node 1.\n\nI attach a new version of neighbor_degree.py with proposed (and commented) changes...\n\n", "before_files": [{"content": "#-*- coding: utf-8 -*-\n# Copyright (C) 2011 by \n# Jordi Torrents <[email protected]>\n# Aric Hagberg <[email protected]>\n# All rights reserved.\n# BSD license.\nfrom collections import defaultdict\nimport networkx as nx\n__author__ = \"\"\"\\n\"\"\".join(['Jordi Torrents <[email protected]>',\n 'Aric Hagberg ([email protected])'])\n__all__ = ['average_degree_connectivity',\n 'k_nearest_neighbors']\n\ndef _avg_deg_conn(G, neighbors, source_degree, target_degree, \n nodes=None, weight=None):\n # \"k nearest neighbors, or neighbor_connectivity\n dsum = defaultdict(float)\n dnorm = defaultdict(float)\n for n,k in source_degree(nodes).items():\n nbrdeg = target_degree(neighbors(n))\n if weight is None:\n s = float(sum(nbrdeg.values()))\n else: # weight nbr degree by weight of (n,nbr) edge\n s = float(sum((G[n][nbr].get(weight,1)*d \n for nbr,d in nbrdeg.items())))\n dnorm[k] += source_degree(n, weight=weight)\n dsum[k] += s\n \n # normalize\n dc = {}\n for k,avg in dsum.items():\n dc[k]=avg\n norm = dnorm[k]\n if avg > 0 and norm > 0:\n dc[k]/=norm\n return dc\n\ndef average_degree_connectivity(G, source=\"in+out\", target=\"in+out\",\n nodes=None, weight=None):\n r\"\"\"Compute the average degree connectivity of graph.\n\n The average degree connectivity is the average nearest neighbor degree of\n nodes with degree k. For weighted graphs, an analogous measure can \n be computed using the weighted average neighbors degree defined in \n [1]_, for a node `i`, as:\n\n .. math::\n\n k_{nn,i}^{w} = \\frac{1}{s_i} \\sum_{j \\in N(i)} w_{ij} k_j\n\n where `s_i` is the weighted degree of node `i`, \n `w_{ij}` is the weight of the edge that links `i` and `j`,\n and `N(i)` are the neighbors of node `i`.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : \"in\"|\"out\"|\"in+out\" (default:\"in+out\")\n Directed graphs only. Use \"in\"- or \"out\"-degree for source node.\n\n target : \"in\"|\"out\"|\"in+out\" (default:\"in+out\"\n Directed graphs only. Use \"in\"- or \"out\"-degree for target node.\n\n nodes: list or iterable (optional)\n Compute neighbor connectivity for these nodes. The default is all nodes.\n\n weight : string or None, optional (default=None)\n The edge attribute that holds the numerical value used as a weight.\n If None, then each edge has weight 1.\n\n Returns\n -------\n d: dict\n A dictionary keyed by degree k with the value of average connectivity.\n \n Examples\n --------\n >>> G=nx.path_graph(4)\n >>> G.edge[1][2]['weight'] = 3\n >>> nx.k_nearest_neighbors(G)\n {1: 2.0, 2: 1.5}\n >>> nx.k_nearest_neighbors(G, weight='weight')\n {1: 2.0, 2: 1.75}\n\n See also\n --------\n neighbors_average_degree\n\n Notes\n -----\n This algorithm is sometimes called \"k nearest neighbors'.\n\n References\n ---------- \n .. [1] A. Barrat, M. Barth\u00e9lemy, R. Pastor-Satorras, and A. Vespignani, \n \"The architecture of complex weighted networks\". \n PNAS 101 (11): 3747\u20133752 (2004).\n \"\"\"\n source_degree = G.degree\n target_degree = G.degree\n neighbors = G.neighbors\n if G.is_directed():\n direction = {'out':G.out_degree,\n 'in':G.in_degree,\n 'in+out': G.degree}\n source_degree = direction[source]\n target_degree = direction[target]\n if source == 'in':\n neighbors=G.predecessors\n elif source == 'out':\n neighbors=G.successors\n return _avg_deg_conn(G, neighbors, source_degree, target_degree,\n nodes=nodes, weight=weight)\n\nk_nearest_neighbors=average_degree_connectivity\n", "path": "networkx/algorithms/assortativity/connectivity.py"}], "after_files": [{"content": "#-*- coding: utf-8 -*-\n# Copyright (C) 2011 by \n# Jordi Torrents <[email protected]>\n# Aric Hagberg <[email protected]>\n# All rights reserved.\n# BSD license.\nfrom collections import defaultdict\nimport networkx as nx\n__author__ = \"\"\"\\n\"\"\".join(['Jordi Torrents <[email protected]>',\n 'Aric Hagberg ([email protected])'])\n__all__ = ['average_degree_connectivity',\n 'k_nearest_neighbors']\n\ndef _avg_deg_conn(G, neighbors, source_degree, target_degree, \n nodes=None, weight=None):\n # \"k nearest neighbors, or neighbor_connectivity\n dsum = defaultdict(float)\n dnorm = defaultdict(float)\n for n,k in source_degree(nodes).items():\n nbrdeg = target_degree(neighbors(n))\n if weight is None:\n s = float(sum(nbrdeg.values()))\n else: # weight nbr degree by weight of (n,nbr) edge\n if neighbors == G.neighbors:\n s = float(sum((G[n][nbr].get(weight,1)*d \n for nbr,d in nbrdeg.items())))\n elif neighbors == G.successors:\n s = float(sum((G[n][nbr].get(weight,1)*d \n for nbr,d in nbrdeg.items())))\n elif neighbors == G.predecessors:\n s = float(sum((G[nbr][n].get(weight,1)*d \n for nbr,d in nbrdeg.items())))\n dnorm[k] += source_degree(n, weight=weight)\n dsum[k] += s\n \n # normalize\n dc = {}\n for k,avg in dsum.items():\n dc[k]=avg\n norm = dnorm[k]\n if avg > 0 and norm > 0:\n dc[k]/=norm\n return dc\n\ndef average_degree_connectivity(G, source=\"in+out\", target=\"in+out\",\n nodes=None, weight=None):\n r\"\"\"Compute the average degree connectivity of graph.\n\n The average degree connectivity is the average nearest neighbor degree of\n nodes with degree k. For weighted graphs, an analogous measure can \n be computed using the weighted average neighbors degree defined in \n [1]_, for a node `i`, as:\n\n .. math::\n\n k_{nn,i}^{w} = \\frac{1}{s_i} \\sum_{j \\in N(i)} w_{ij} k_j\n\n where `s_i` is the weighted degree of node `i`, \n `w_{ij}` is the weight of the edge that links `i` and `j`,\n and `N(i)` are the neighbors of node `i`.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : \"in\"|\"out\"|\"in+out\" (default:\"in+out\")\n Directed graphs only. Use \"in\"- or \"out\"-degree for source node.\n\n target : \"in\"|\"out\"|\"in+out\" (default:\"in+out\"\n Directed graphs only. Use \"in\"- or \"out\"-degree for target node.\n\n nodes: list or iterable (optional)\n Compute neighbor connectivity for these nodes. The default is all nodes.\n\n weight : string or None, optional (default=None)\n The edge attribute that holds the numerical value used as a weight.\n If None, then each edge has weight 1.\n\n Returns\n -------\n d: dict\n A dictionary keyed by degree k with the value of average connectivity.\n \n Examples\n --------\n >>> G=nx.path_graph(4)\n >>> G.edge[1][2]['weight'] = 3\n >>> nx.k_nearest_neighbors(G)\n {1: 2.0, 2: 1.5}\n >>> nx.k_nearest_neighbors(G, weight='weight')\n {1: 2.0, 2: 1.75}\n\n See also\n --------\n neighbors_average_degree\n\n Notes\n -----\n This algorithm is sometimes called \"k nearest neighbors'.\n\n References\n ---------- \n .. [1] A. Barrat, M. Barth\u00e9lemy, R. Pastor-Satorras, and A. Vespignani, \n \"The architecture of complex weighted networks\". \n PNAS 101 (11): 3747\u20133752 (2004).\n \"\"\"\n source_degree = G.degree\n target_degree = G.degree\n neighbors = G.neighbors\n if G.is_directed():\n direction = {'out':G.out_degree,\n 'in':G.in_degree,\n 'in+out': G.degree}\n source_degree = direction[source]\n target_degree = direction[target]\n if source == 'in':\n neighbors=G.predecessors\n elif source == 'out':\n neighbors=G.successors\n return _avg_deg_conn(G, neighbors, source_degree, target_degree,\n nodes=nodes, weight=weight)\n\nk_nearest_neighbors=average_degree_connectivity\n", "path": "networkx/algorithms/assortativity/connectivity.py"}]} | 2,575 | 268 |
gh_patches_debug_17765 | rasdani/github-patches | git_diff | fedora-infra__bodhi-3329 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add ctype to the ComposeComplete schema
Currently the regindexer code that maintains an index of Flatpaks and containers in the Fedora registry looks at the 'ctype' field of a ~mashtask~compose.complete message to know whether the index needs to be rebuilt:
``` python
match_topic = conf.daemon.topic_prefix + '.' + conf.daemon.environment + '.bodhi.masktask.complete'
for name, endpoint, topic, raw_msg in fedmsg.tail_messages(topic=match_topic):
msg = raw_msg['msg']
if msg.get('ctype') in ('container', 'flatpak'):
indexer_thread.reindex('Bodhi mash, ctype="%s"' % msg['ctype'])
```
The ctype field is not part of the ComposeCompleteV1 schema - it would be great if it could be added, since I'd like to continue using it when switching to fedora-messaging.
(@bowlofeggs suggested possibly extending the topic - I suppose like `compose.complete.flatpak` - I don't see a reason to go that fancy - there are few compose.complete messages.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/messages/schemas/compose.py`
Content:
```
1 # Copyright (C) 2018-2019 Red Hat, Inc.
2 #
3 # This program is free software; you can redistribute it and/or modify
4 # it under the terms of the GNU General Public License as published by
5 # the Free Software Foundation; either version 2 of the License, or
6 # (at your option) any later version.
7 #
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU General Public License for more details.
12 #
13 # You should have received a copy of the GNU General Public License along
14 # with this program; if not, write to the Free Software Foundation, Inc.,
15 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 """
17 Message schema for Composes.
18
19 Each message is defined as a Python class. For details, see `fedora-messaging
20 <https://fedora-messaging.readthedocs.io/en/stable/>`_ documentation on
21 messages.
22 """
23
24 from .base import BodhiMessage, SCHEMA_URL
25
26
27 class ComposeCompleteV1(BodhiMessage):
28 """Sent when a compose task completes."""
29
30 body_schema = {
31 'id': f'{SCHEMA_URL}/v1/bodhi.compose.complete#',
32 '$schema': 'http://json-schema.org/draft-04/schema#',
33 'description': 'Schema for message sent when composes finish',
34 'type': 'object',
35 'properties': {
36 'agent': {
37 'type': 'string',
38 'description': 'The name of the user who started this compose.'
39 },
40 'repo': {
41 'type': 'string',
42 'description': 'The name of the repository being composed.'
43 },
44 'success': {
45 'type': 'boolean',
46 'description': 'true if the compose was successful, false otherwise.'
47 }
48 },
49 'required': ['agent', 'repo', 'success'],
50 }
51
52 topic = "bodhi.compose.complete"
53
54 @property
55 def repo(self) -> str:
56 """Return the name of the repository being composed."""
57 return self.body.get('repo')
58
59 @property
60 def success(self) -> bool:
61 """Return the name of the repository being composed."""
62 return self.body.get('success')
63
64 @property
65 def summary(self) -> str:
66 """
67 Return a short, human-readable representation of this message.
68
69 This should provide a short summary of the message, much like the subject line
70 of an email.
71
72 Returns:
73 The message summary.
74 """
75 if self.success:
76 return f"bodhi composer successfully composed {self.repo}"
77 else:
78 return f"bodhi composer failed to compose {self.repo}"
79
80
81 class ComposeComposingV1(BodhiMessage):
82 """Sent when the compose task composes."""
83
84 body_schema = {
85 'id': f'{SCHEMA_URL}/v1/bodhi.compose.composing#',
86 '$schema': 'http://json-schema.org/draft-04/schema#',
87 'description': 'Schema for message sent when composes start',
88 'type': 'object',
89 'properties': {
90 'agent': {
91 'type': 'string',
92 'description': 'The name of the user who started this compose.'
93 },
94 'repo': {
95 'type': 'string',
96 'description': 'The name of the repository being composed.'
97 },
98 },
99 'required': ['agent', 'repo'],
100 }
101
102 topic = "bodhi.compose.composing"
103
104 @property
105 def repo(self) -> str:
106 """Return the name of the repository being composed."""
107 return self.body.get('repo')
108
109 @property
110 def summary(self) -> str:
111 """
112 Return a short, human-readable representation of this message.
113
114 This should provide a short summary of the message, much like the subject line
115 of an email.
116
117 Returns:
118 A summary for this message.
119 """
120 return f"bodhi composer started composing {self.repo}"
121
122
123 class ComposeStartV1(BodhiMessage):
124 """Sent when a compose task starts."""
125
126 body_schema = {
127 'id': f'{SCHEMA_URL}/v1/bodhi.compose.start#',
128 '$schema': 'http://json-schema.org/draft-04/schema#',
129 'description': 'Schema for message sent when composes start',
130 'type': 'object',
131 'properties': {
132 'agent': {
133 'type': 'string',
134 'description': 'The name of the user who started this compose.'
135 },
136 },
137 'required': ['agent'],
138 }
139
140 topic = "bodhi.compose.start"
141
142 @property
143 def summary(self) -> str:
144 """
145 Return a short, human-readable representation of this message.
146
147 This should provide a short summary of the message, much like the subject line
148 of an email.
149
150 Returns:
151 A summary for this message.
152 """
153 return "bodhi composer started a push"
154
155
156 class ComposeSyncDoneV1(BodhiMessage):
157 """Sent when a compose task sync is done."""
158
159 body_schema = {
160 'id': f'{SCHEMA_URL}/v1/bodhi.compose.sync.done#',
161 '$schema': 'http://json-schema.org/draft-04/schema#',
162 'description': ('Schema for message sent when the composer is done waiting to sync to '
163 'mirrors'),
164 'type': 'object',
165 'properties': {
166 'agent': {
167 'type': 'string',
168 'description': 'The name of the user who started this compose.'
169 },
170 'repo': {
171 'type': 'string',
172 'description': 'The name of the repository being composed.'
173 },
174 },
175 'required': ['agent', 'repo'],
176 }
177
178 topic = "bodhi.compose.sync.done"
179
180 @property
181 def repo(self) -> str:
182 """Return the name of the repository being composed."""
183 return self.body.get('repo')
184
185 @property
186 def summary(self) -> str:
187 """
188 Return a short, human-readable representation of this message.
189
190 This should provide a short summary of the message, much like the subject line
191 of an email.
192
193 Returns:
194 A summary for this message.
195 """
196 return f"bodhi composer finished waiting for {self.repo} to hit the master mirror"
197
198
199 class ComposeSyncWaitV1(BodhiMessage):
200 """Sent when a compose task sync is waiting."""
201
202 body_schema = {
203 'id': f'{SCHEMA_URL}/v1/bodhi.compose.sync.wait#',
204 '$schema': 'http://json-schema.org/draft-04/schema#',
205 'description': 'Schema for message sent when the composer is waiting to sync to mirrors',
206 'type': 'object',
207 'properties': {
208 'agent': {
209 'type': 'string',
210 'description': 'The name of the user who started this compose.'
211 },
212 'repo': {
213 'type': 'string',
214 'description': 'The name of the repository being composed.'
215 },
216 },
217 'required': ['agent', 'repo'],
218 }
219
220 topic = "bodhi.compose.sync.wait"
221
222 @property
223 def repo(self) -> str:
224 """Return the name of the repository being composed."""
225 return self.body.get('repo')
226
227 @property
228 def summary(self) -> str:
229 """
230 Return a short, human-readable representation of this message.
231
232 This should provide a short summary of the message, much like the subject line
233 of an email.
234
235 Returns:
236 A summary for this message.
237 """
238 return f"bodhi composer is waiting for {self.repo} to hit the master mirror"
239
240
241 class RepoDoneV1(BodhiMessage):
242 """Sent when a repo is created and ready to be signed or otherwise processed."""
243
244 body_schema = {
245 'id': f'{SCHEMA_URL}/v1/bodhi.repo.done#',
246 '$schema': 'http://json-schema.org/draft-04/schema#',
247 'description': 'Schema for message sent when a repo is created and ready to be signed',
248 'type': 'object',
249 'properties': {
250 'agent': {
251 'type': 'string',
252 'description': 'The name of the user who started this compose.'
253 },
254 'path': {
255 'type': 'string',
256 'description': 'The path of the repository that was composed.'
257 },
258 'repo': {
259 'type': 'string',
260 'description': 'The name of the repository that was composed.'
261 },
262 },
263 'required': ['agent', 'path', 'repo'],
264 }
265
266 topic = "bodhi.repo.done"
267
268 @property
269 def repo(self) -> str:
270 """Return the name of the repository being composed."""
271 return self.body.get('repo')
272
273 @property
274 def summary(self) -> str:
275 """
276 Return a short, human-readable representation of this message.
277
278 This should provide a short summary of the message, much like the subject line
279 of an email.
280
281 Returns:
282 A summary for this message.
283 """
284 return f"bodhi composer is finished building {self.repo}"
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/messages/schemas/compose.py b/bodhi/messages/schemas/compose.py
--- a/bodhi/messages/schemas/compose.py
+++ b/bodhi/messages/schemas/compose.py
@@ -44,6 +44,10 @@
'success': {
'type': 'boolean',
'description': 'true if the compose was successful, false otherwise.'
+ },
+ 'ctype': {
+ 'type': 'string',
+ 'description': 'Type of the compose.'
}
},
'required': ['agent', 'repo', 'success'],
@@ -61,6 +65,11 @@
"""Return the name of the repository being composed."""
return self.body.get('success')
+ @property
+ def ctype(self) -> str:
+ """Return the compose type."""
+ return self.body.get('ctype')
+
@property
def summary(self) -> str:
"""
| {"golden_diff": "diff --git a/bodhi/messages/schemas/compose.py b/bodhi/messages/schemas/compose.py\n--- a/bodhi/messages/schemas/compose.py\n+++ b/bodhi/messages/schemas/compose.py\n@@ -44,6 +44,10 @@\n 'success': {\n 'type': 'boolean',\n 'description': 'true if the compose was successful, false otherwise.'\n+ },\n+ 'ctype': {\n+ 'type': 'string',\n+ 'description': 'Type of the compose.'\n }\n },\n 'required': ['agent', 'repo', 'success'],\n@@ -61,6 +65,11 @@\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('success')\n \n+ @property\n+ def ctype(self) -> str:\n+ \"\"\"Return the compose type.\"\"\"\n+ return self.body.get('ctype')\n+\n @property\n def summary(self) -> str:\n \"\"\"\n", "issue": "Add ctype to the ComposeComplete schema\nCurrently the regindexer code that maintains an index of Flatpaks and containers in the Fedora registry looks at the 'ctype' field of a ~mashtask~compose.complete message to know whether the index needs to be rebuilt:\r\n\r\n``` python\r\n match_topic = conf.daemon.topic_prefix + '.' + conf.daemon.environment + '.bodhi.masktask.complete'\r\n for name, endpoint, topic, raw_msg in fedmsg.tail_messages(topic=match_topic):\r\n msg = raw_msg['msg']\r\n if msg.get('ctype') in ('container', 'flatpak'):\r\n indexer_thread.reindex('Bodhi mash, ctype=\"%s\"' % msg['ctype'])\r\n```\r\n\r\nThe ctype field is not part of the ComposeCompleteV1 schema - it would be great if it could be added, since I'd like to continue using it when switching to fedora-messaging.\r\n\r\n(@bowlofeggs suggested possibly extending the topic - I suppose like `compose.complete.flatpak` - I don't see a reason to go that fancy - there are few compose.complete messages.)\n", "before_files": [{"content": "# Copyright (C) 2018-2019 Red Hat, Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\"\"\"\nMessage schema for Composes.\n\nEach message is defined as a Python class. For details, see `fedora-messaging\n<https://fedora-messaging.readthedocs.io/en/stable/>`_ documentation on\nmessages.\n\"\"\"\n\nfrom .base import BodhiMessage, SCHEMA_URL\n\n\nclass ComposeCompleteV1(BodhiMessage):\n \"\"\"Sent when a compose task completes.\"\"\"\n\n body_schema = {\n 'id': f'{SCHEMA_URL}/v1/bodhi.compose.complete#',\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'description': 'Schema for message sent when composes finish',\n 'type': 'object',\n 'properties': {\n 'agent': {\n 'type': 'string',\n 'description': 'The name of the user who started this compose.'\n },\n 'repo': {\n 'type': 'string',\n 'description': 'The name of the repository being composed.'\n },\n 'success': {\n 'type': 'boolean',\n 'description': 'true if the compose was successful, false otherwise.'\n }\n },\n 'required': ['agent', 'repo', 'success'],\n }\n\n topic = \"bodhi.compose.complete\"\n\n @property\n def repo(self) -> str:\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('repo')\n\n @property\n def success(self) -> bool:\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('success')\n\n @property\n def summary(self) -> str:\n \"\"\"\n Return a short, human-readable representation of this message.\n\n This should provide a short summary of the message, much like the subject line\n of an email.\n\n Returns:\n The message summary.\n \"\"\"\n if self.success:\n return f\"bodhi composer successfully composed {self.repo}\"\n else:\n return f\"bodhi composer failed to compose {self.repo}\"\n\n\nclass ComposeComposingV1(BodhiMessage):\n \"\"\"Sent when the compose task composes.\"\"\"\n\n body_schema = {\n 'id': f'{SCHEMA_URL}/v1/bodhi.compose.composing#',\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'description': 'Schema for message sent when composes start',\n 'type': 'object',\n 'properties': {\n 'agent': {\n 'type': 'string',\n 'description': 'The name of the user who started this compose.'\n },\n 'repo': {\n 'type': 'string',\n 'description': 'The name of the repository being composed.'\n },\n },\n 'required': ['agent', 'repo'],\n }\n\n topic = \"bodhi.compose.composing\"\n\n @property\n def repo(self) -> str:\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('repo')\n\n @property\n def summary(self) -> str:\n \"\"\"\n Return a short, human-readable representation of this message.\n\n This should provide a short summary of the message, much like the subject line\n of an email.\n\n Returns:\n A summary for this message.\n \"\"\"\n return f\"bodhi composer started composing {self.repo}\"\n\n\nclass ComposeStartV1(BodhiMessage):\n \"\"\"Sent when a compose task starts.\"\"\"\n\n body_schema = {\n 'id': f'{SCHEMA_URL}/v1/bodhi.compose.start#',\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'description': 'Schema for message sent when composes start',\n 'type': 'object',\n 'properties': {\n 'agent': {\n 'type': 'string',\n 'description': 'The name of the user who started this compose.'\n },\n },\n 'required': ['agent'],\n }\n\n topic = \"bodhi.compose.start\"\n\n @property\n def summary(self) -> str:\n \"\"\"\n Return a short, human-readable representation of this message.\n\n This should provide a short summary of the message, much like the subject line\n of an email.\n\n Returns:\n A summary for this message.\n \"\"\"\n return \"bodhi composer started a push\"\n\n\nclass ComposeSyncDoneV1(BodhiMessage):\n \"\"\"Sent when a compose task sync is done.\"\"\"\n\n body_schema = {\n 'id': f'{SCHEMA_URL}/v1/bodhi.compose.sync.done#',\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'description': ('Schema for message sent when the composer is done waiting to sync to '\n 'mirrors'),\n 'type': 'object',\n 'properties': {\n 'agent': {\n 'type': 'string',\n 'description': 'The name of the user who started this compose.'\n },\n 'repo': {\n 'type': 'string',\n 'description': 'The name of the repository being composed.'\n },\n },\n 'required': ['agent', 'repo'],\n }\n\n topic = \"bodhi.compose.sync.done\"\n\n @property\n def repo(self) -> str:\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('repo')\n\n @property\n def summary(self) -> str:\n \"\"\"\n Return a short, human-readable representation of this message.\n\n This should provide a short summary of the message, much like the subject line\n of an email.\n\n Returns:\n A summary for this message.\n \"\"\"\n return f\"bodhi composer finished waiting for {self.repo} to hit the master mirror\"\n\n\nclass ComposeSyncWaitV1(BodhiMessage):\n \"\"\"Sent when a compose task sync is waiting.\"\"\"\n\n body_schema = {\n 'id': f'{SCHEMA_URL}/v1/bodhi.compose.sync.wait#',\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'description': 'Schema for message sent when the composer is waiting to sync to mirrors',\n 'type': 'object',\n 'properties': {\n 'agent': {\n 'type': 'string',\n 'description': 'The name of the user who started this compose.'\n },\n 'repo': {\n 'type': 'string',\n 'description': 'The name of the repository being composed.'\n },\n },\n 'required': ['agent', 'repo'],\n }\n\n topic = \"bodhi.compose.sync.wait\"\n\n @property\n def repo(self) -> str:\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('repo')\n\n @property\n def summary(self) -> str:\n \"\"\"\n Return a short, human-readable representation of this message.\n\n This should provide a short summary of the message, much like the subject line\n of an email.\n\n Returns:\n A summary for this message.\n \"\"\"\n return f\"bodhi composer is waiting for {self.repo} to hit the master mirror\"\n\n\nclass RepoDoneV1(BodhiMessage):\n \"\"\"Sent when a repo is created and ready to be signed or otherwise processed.\"\"\"\n\n body_schema = {\n 'id': f'{SCHEMA_URL}/v1/bodhi.repo.done#',\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'description': 'Schema for message sent when a repo is created and ready to be signed',\n 'type': 'object',\n 'properties': {\n 'agent': {\n 'type': 'string',\n 'description': 'The name of the user who started this compose.'\n },\n 'path': {\n 'type': 'string',\n 'description': 'The path of the repository that was composed.'\n },\n 'repo': {\n 'type': 'string',\n 'description': 'The name of the repository that was composed.'\n },\n },\n 'required': ['agent', 'path', 'repo'],\n }\n\n topic = \"bodhi.repo.done\"\n\n @property\n def repo(self) -> str:\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('repo')\n\n @property\n def summary(self) -> str:\n \"\"\"\n Return a short, human-readable representation of this message.\n\n This should provide a short summary of the message, much like the subject line\n of an email.\n\n Returns:\n A summary for this message.\n \"\"\"\n return f\"bodhi composer is finished building {self.repo}\"\n", "path": "bodhi/messages/schemas/compose.py"}], "after_files": [{"content": "# Copyright (C) 2018-2019 Red Hat, Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\"\"\"\nMessage schema for Composes.\n\nEach message is defined as a Python class. For details, see `fedora-messaging\n<https://fedora-messaging.readthedocs.io/en/stable/>`_ documentation on\nmessages.\n\"\"\"\n\nfrom .base import BodhiMessage, SCHEMA_URL\n\n\nclass ComposeCompleteV1(BodhiMessage):\n \"\"\"Sent when a compose task completes.\"\"\"\n\n body_schema = {\n 'id': f'{SCHEMA_URL}/v1/bodhi.compose.complete#',\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'description': 'Schema for message sent when composes finish',\n 'type': 'object',\n 'properties': {\n 'agent': {\n 'type': 'string',\n 'description': 'The name of the user who started this compose.'\n },\n 'repo': {\n 'type': 'string',\n 'description': 'The name of the repository being composed.'\n },\n 'success': {\n 'type': 'boolean',\n 'description': 'true if the compose was successful, false otherwise.'\n },\n 'ctype': {\n 'type': 'string',\n 'description': 'Type of the compose.'\n }\n },\n 'required': ['agent', 'repo', 'success'],\n }\n\n topic = \"bodhi.compose.complete\"\n\n @property\n def repo(self) -> str:\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('repo')\n\n @property\n def success(self) -> bool:\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('success')\n\n @property\n def ctype(self) -> str:\n \"\"\"Return the compose type.\"\"\"\n return self.body.get('ctype')\n\n @property\n def summary(self) -> str:\n \"\"\"\n Return a short, human-readable representation of this message.\n\n This should provide a short summary of the message, much like the subject line\n of an email.\n\n Returns:\n The message summary.\n \"\"\"\n if self.success:\n return f\"bodhi composer successfully composed {self.repo}\"\n else:\n return f\"bodhi composer failed to compose {self.repo}\"\n\n\nclass ComposeComposingV1(BodhiMessage):\n \"\"\"Sent when the compose task composes.\"\"\"\n\n body_schema = {\n 'id': f'{SCHEMA_URL}/v1/bodhi.compose.composing#',\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'description': 'Schema for message sent when composes start',\n 'type': 'object',\n 'properties': {\n 'agent': {\n 'type': 'string',\n 'description': 'The name of the user who started this compose.'\n },\n 'repo': {\n 'type': 'string',\n 'description': 'The name of the repository being composed.'\n },\n },\n 'required': ['agent', 'repo'],\n }\n\n topic = \"bodhi.compose.composing\"\n\n @property\n def repo(self) -> str:\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('repo')\n\n @property\n def summary(self) -> str:\n \"\"\"\n Return a short, human-readable representation of this message.\n\n This should provide a short summary of the message, much like the subject line\n of an email.\n\n Returns:\n A summary for this message.\n \"\"\"\n return f\"bodhi composer started composing {self.repo}\"\n\n\nclass ComposeStartV1(BodhiMessage):\n \"\"\"Sent when a compose task starts.\"\"\"\n\n body_schema = {\n 'id': f'{SCHEMA_URL}/v1/bodhi.compose.start#',\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'description': 'Schema for message sent when composes start',\n 'type': 'object',\n 'properties': {\n 'agent': {\n 'type': 'string',\n 'description': 'The name of the user who started this compose.'\n },\n },\n 'required': ['agent'],\n }\n\n topic = \"bodhi.compose.start\"\n\n @property\n def summary(self) -> str:\n \"\"\"\n Return a short, human-readable representation of this message.\n\n This should provide a short summary of the message, much like the subject line\n of an email.\n\n Returns:\n A summary for this message.\n \"\"\"\n return \"bodhi composer started a push\"\n\n\nclass ComposeSyncDoneV1(BodhiMessage):\n \"\"\"Sent when a compose task sync is done.\"\"\"\n\n body_schema = {\n 'id': f'{SCHEMA_URL}/v1/bodhi.compose.sync.done#',\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'description': ('Schema for message sent when the composer is done waiting to sync to '\n 'mirrors'),\n 'type': 'object',\n 'properties': {\n 'agent': {\n 'type': 'string',\n 'description': 'The name of the user who started this compose.'\n },\n 'repo': {\n 'type': 'string',\n 'description': 'The name of the repository being composed.'\n },\n },\n 'required': ['agent', 'repo'],\n }\n\n topic = \"bodhi.compose.sync.done\"\n\n @property\n def repo(self) -> str:\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('repo')\n\n @property\n def summary(self) -> str:\n \"\"\"\n Return a short, human-readable representation of this message.\n\n This should provide a short summary of the message, much like the subject line\n of an email.\n\n Returns:\n A summary for this message.\n \"\"\"\n return f\"bodhi composer finished waiting for {self.repo} to hit the master mirror\"\n\n\nclass ComposeSyncWaitV1(BodhiMessage):\n \"\"\"Sent when a compose task sync is waiting.\"\"\"\n\n body_schema = {\n 'id': f'{SCHEMA_URL}/v1/bodhi.compose.sync.wait#',\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'description': 'Schema for message sent when the composer is waiting to sync to mirrors',\n 'type': 'object',\n 'properties': {\n 'agent': {\n 'type': 'string',\n 'description': 'The name of the user who started this compose.'\n },\n 'repo': {\n 'type': 'string',\n 'description': 'The name of the repository being composed.'\n },\n },\n 'required': ['agent', 'repo'],\n }\n\n topic = \"bodhi.compose.sync.wait\"\n\n @property\n def repo(self) -> str:\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('repo')\n\n @property\n def summary(self) -> str:\n \"\"\"\n Return a short, human-readable representation of this message.\n\n This should provide a short summary of the message, much like the subject line\n of an email.\n\n Returns:\n A summary for this message.\n \"\"\"\n return f\"bodhi composer is waiting for {self.repo} to hit the master mirror\"\n\n\nclass RepoDoneV1(BodhiMessage):\n \"\"\"Sent when a repo is created and ready to be signed or otherwise processed.\"\"\"\n\n body_schema = {\n 'id': f'{SCHEMA_URL}/v1/bodhi.repo.done#',\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'description': 'Schema for message sent when a repo is created and ready to be signed',\n 'type': 'object',\n 'properties': {\n 'agent': {\n 'type': 'string',\n 'description': 'The name of the user who started this compose.'\n },\n 'path': {\n 'type': 'string',\n 'description': 'The path of the repository that was composed.'\n },\n 'repo': {\n 'type': 'string',\n 'description': 'The name of the repository that was composed.'\n },\n },\n 'required': ['agent', 'path', 'repo'],\n }\n\n topic = \"bodhi.repo.done\"\n\n @property\n def repo(self) -> str:\n \"\"\"Return the name of the repository being composed.\"\"\"\n return self.body.get('repo')\n\n @property\n def summary(self) -> str:\n \"\"\"\n Return a short, human-readable representation of this message.\n\n This should provide a short summary of the message, much like the subject line\n of an email.\n\n Returns:\n A summary for this message.\n \"\"\"\n return f\"bodhi composer is finished building {self.repo}\"\n", "path": "bodhi/messages/schemas/compose.py"}]} | 3,285 | 212 |
gh_patches_debug_39450 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-229 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature: check `lambda` functions for the number of arguments
We already have the logic to check regular an `async` functions for the number of arguments.
https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/complexity/function.py#L155
But we need to check `lambda` functions as well. Since one can create a `lambda` function that receives too many arguments.
What needs to be done:
1. We need to implement a new handler for `Lambda` node in the visitor above
2. We need to unit test this behavior here: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_checkers/test_high_complexity.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/types.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """
4 This module contains custom ``mypy`` types that we commonly use.
5
6 Policy
7 ------
8
9 If any of the following statements is true, move the type to this file:
10
11 - if type is used in multiple files
12 - if type is complex enough it has to be documented
13 - if type is very important for the public API
14
15 """
16
17 import ast
18 from typing import TYPE_CHECKING, Tuple, Type, Union
19
20 if TYPE_CHECKING: # pragma: no cover
21 # TODO: use Final types to annotate all constants
22 from typing_extensions import Protocol # noqa: Z435
23
24 # This solves cycle imports problem:
25 from .visitors import base # noqa: F401,Z300,Z435
26 else:
27 # We do not need to do anything if type checker is not working:
28 Protocol = object
29
30 #: Visitor type definition:
31 VisitorClass = Type['base.BaseVisitor']
32
33 #: In cases we need to work with both import types:
34 AnyImport = Union[ast.Import, ast.ImportFrom]
35
36 #: In cases we need to work with both function definitions:
37 AnyFunctionDef = Union[ast.FunctionDef, ast.AsyncFunctionDef]
38
39 #: Flake8 API format to return error messages:
40 CheckResult = Tuple[int, int, str, type]
41
42 #: Tuple of AST node types for declarative syntax:
43 AnyNodes = Tuple[Type[ast.AST], ...]
44
45
46 class ConfigurationOptions(Protocol):
47 """
48 Provides structure for the options we use in our checker.
49
50 Then this protocol is passed to each individual visitor and used there.
51 It uses structural sub-typing, and does not represent any kind of a real
52 class or structure.
53
54 This class actually works only when running type check.
55 At other cases it is just an ``object``.
56
57 See also:
58 https://mypy.readthedocs.io/en/latest/protocols.html
59
60 """
61
62 # General:
63 min_variable_length: int
64 i_control_code: bool
65
66 # Complexity:
67 max_arguments: int
68 max_local_variables: int
69 max_returns: int
70 max_expressions: int
71 max_offset_blocks: int
72 max_elifs: int
73 max_module_members: int
74 max_methods: int
75 max_line_complexity: int
76 max_jones_score: int
77 max_imports: int
78 max_conditions: int
79
80 # File names:
81 min_module_name_length: int
82
```
Path: `wemake_python_styleguide/visitors/ast/complexity/function.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from collections import defaultdict
5 from typing import DefaultDict, List
6
7 from wemake_python_styleguide.logics.functions import is_method
8 from wemake_python_styleguide.types import AnyFunctionDef
9 from wemake_python_styleguide.violations.complexity import (
10 TooManyArgumentsViolation,
11 TooManyElifsViolation,
12 TooManyExpressionsViolation,
13 TooManyLocalsViolation,
14 TooManyReturnsViolation,
15 )
16 from wemake_python_styleguide.visitors.base import BaseNodeVisitor
17 from wemake_python_styleguide.visitors.decorators import alias
18
19 FunctionCounter = DefaultDict[AnyFunctionDef, int]
20
21
22 class _ComplexityCounter(object):
23 """Helper class to encapsulate logic from the visitor."""
24
25 def __init__(self) -> None:
26 self.arguments: FunctionCounter = defaultdict(int)
27 self.elifs: FunctionCounter = defaultdict(int)
28 self.returns: FunctionCounter = defaultdict(int)
29 self.expressions: FunctionCounter = defaultdict(int)
30 self.variables: DefaultDict[
31 AnyFunctionDef, List[str],
32 ] = defaultdict(list)
33
34 def _update_variables(
35 self,
36 function: AnyFunctionDef,
37 variable_name: str,
38 ) -> None:
39 """
40 Increases the counter of local variables.
41
42 What is treated as a local variable?
43 Check ``TooManyLocalsViolation`` documentation.
44 """
45 function_variables = self.variables[function]
46 if variable_name not in function_variables and variable_name != '_':
47 function_variables.append(variable_name)
48
49 def _update_elifs(self, node: AnyFunctionDef, sub_node: ast.If) -> None:
50 has_elif = any(
51 isinstance(if_node, ast.If) for if_node in sub_node.orelse
52 )
53
54 if has_elif:
55 self.elifs[node] += 1
56
57 def _check_sub_node(self, node: AnyFunctionDef, sub_node) -> None:
58 is_variable = isinstance(sub_node, ast.Name)
59 context = getattr(sub_node, 'ctx', None)
60
61 if is_variable and isinstance(context, ast.Store):
62 self._update_variables(node, sub_node.id)
63 elif isinstance(sub_node, ast.Return):
64 self.returns[node] += 1
65 elif isinstance(sub_node, ast.Expr):
66 self.expressions[node] += 1
67 elif isinstance(sub_node, ast.If):
68 self._update_elifs(node, sub_node)
69
70 def check_arguments_count(self, node: AnyFunctionDef) -> None:
71 """Checks the number of the arguments in a function."""
72 counter = 0
73 has_extra_arg = 0
74 if is_method(getattr(node, 'function_type', None)):
75 has_extra_arg = 1
76
77 counter += len(node.args.args) + len(node.args.kwonlyargs)
78 if node.args.vararg:
79 counter += 1
80 if node.args.kwarg:
81 counter += 1
82
83 self.arguments[node] = counter - has_extra_arg
84
85 def check_function_complexity(self, node: AnyFunctionDef) -> None:
86 """
87 In this function we iterate all the internal body's node.
88
89 We check different complexity metrics based on these internals.
90 """
91 for body_item in node.body:
92 for sub_node in ast.walk(body_item):
93 self._check_sub_node(node, sub_node)
94
95
96 @alias('visit_any_function', (
97 'visit_AsyncFunctionDef',
98 'visit_FunctionDef',
99 ))
100 class FunctionComplexityVisitor(BaseNodeVisitor):
101 """
102 This class checks for complexity inside functions.
103
104 This includes:
105
106 1. Number of arguments
107 2. Number of `return` statements
108 3. Number of expressions
109 4. Number of local variables
110 5. Number of `elif` branches
111
112 """
113
114 def __init__(self, *args, **kwargs) -> None:
115 """Creates a counter for tracked metrics."""
116 super().__init__(*args, **kwargs)
117 self._counter = _ComplexityCounter()
118
119 def _check_possible_switch(self) -> None:
120 for node, elifs in self._counter.elifs.items():
121 if elifs > self.options.max_elifs:
122 self.add_violation(TooManyElifsViolation(node))
123
124 def _check_function_internals(self) -> None:
125 for node, variables in self._counter.variables.items():
126 if len(variables) > self.options.max_local_variables:
127 self.add_violation(
128 TooManyLocalsViolation(node, text=node.name),
129 )
130
131 for node, expressions in self._counter.expressions.items():
132 if expressions > self.options.max_expressions:
133 self.add_violation(
134 TooManyExpressionsViolation(node, text=node.name),
135 )
136
137 def _check_function_signature(self) -> None:
138 for node, arguments in self._counter.arguments.items():
139 if arguments > self.options.max_arguments:
140 self.add_violation(
141 TooManyArgumentsViolation(node, text=str(arguments)),
142 )
143
144 for node, returns in self._counter.returns.items():
145 if returns > self.options.max_returns:
146 self.add_violation(
147 TooManyReturnsViolation(node, text=node.name),
148 )
149
150 def _post_visit(self) -> None:
151 self._check_function_signature()
152 self._check_function_internals()
153 self._check_possible_switch()
154
155 def visit_any_function(self, node: AnyFunctionDef) -> None:
156 """
157 Checks function's internal complexity.
158
159 Raises:
160 TooManyExpressionsViolation
161 TooManyReturnsViolation
162 TooManyLocalsViolation
163 TooManyArgumentsViolation
164 TooManyElifsViolation
165
166 """
167 self._counter.check_arguments_count(node)
168 self._counter.check_function_complexity(node)
169 self.generic_visit(node)
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wemake_python_styleguide/types.py b/wemake_python_styleguide/types.py
--- a/wemake_python_styleguide/types.py
+++ b/wemake_python_styleguide/types.py
@@ -36,6 +36,13 @@
#: In cases we need to work with both function definitions:
AnyFunctionDef = Union[ast.FunctionDef, ast.AsyncFunctionDef]
+#: In cases we need to work with all function definitions (including Lambda)
+AnyFunctionDefAndLambda = Union[
+ ast.FunctionDef,
+ ast.AsyncFunctionDef,
+ ast.Lambda,
+]
+
#: Flake8 API format to return error messages:
CheckResult = Tuple[int, int, str, type]
diff --git a/wemake_python_styleguide/visitors/ast/complexity/function.py b/wemake_python_styleguide/visitors/ast/complexity/function.py
--- a/wemake_python_styleguide/visitors/ast/complexity/function.py
+++ b/wemake_python_styleguide/visitors/ast/complexity/function.py
@@ -5,7 +5,10 @@
from typing import DefaultDict, List
from wemake_python_styleguide.logics.functions import is_method
-from wemake_python_styleguide.types import AnyFunctionDef
+from wemake_python_styleguide.types import (
+ AnyFunctionDef,
+ AnyFunctionDefAndLambda,
+)
from wemake_python_styleguide.violations.complexity import (
TooManyArgumentsViolation,
TooManyElifsViolation,
@@ -17,13 +20,14 @@
from wemake_python_styleguide.visitors.decorators import alias
FunctionCounter = DefaultDict[AnyFunctionDef, int]
+FunctionCounterWithLambda = DefaultDict[AnyFunctionDefAndLambda, int]
class _ComplexityCounter(object):
"""Helper class to encapsulate logic from the visitor."""
def __init__(self) -> None:
- self.arguments: FunctionCounter = defaultdict(int)
+ self.arguments: FunctionCounterWithLambda = defaultdict(int)
self.elifs: FunctionCounter = defaultdict(int)
self.returns: FunctionCounter = defaultdict(int)
self.expressions: FunctionCounter = defaultdict(int)
@@ -67,7 +71,7 @@
elif isinstance(sub_node, ast.If):
self._update_elifs(node, sub_node)
- def check_arguments_count(self, node: AnyFunctionDef) -> None:
+ def check_arguments_count(self, node: AnyFunctionDefAndLambda) -> None:
"""Checks the number of the arguments in a function."""
counter = 0
has_extra_arg = 0
@@ -167,3 +171,14 @@
self._counter.check_arguments_count(node)
self._counter.check_function_complexity(node)
self.generic_visit(node)
+
+ def visit_Lambda(self, node: ast.Lambda) -> None:
+ """
+ Checks lambda function's internal complexity.
+
+ Raises:
+ TooManyArgumentsViolation
+
+ """
+ self._counter.check_arguments_count(node)
+ self.generic_visit(node)
| {"golden_diff": "diff --git a/wemake_python_styleguide/types.py b/wemake_python_styleguide/types.py\n--- a/wemake_python_styleguide/types.py\n+++ b/wemake_python_styleguide/types.py\n@@ -36,6 +36,13 @@\n #: In cases we need to work with both function definitions:\n AnyFunctionDef = Union[ast.FunctionDef, ast.AsyncFunctionDef]\n \n+#: In cases we need to work with all function definitions (including Lambda)\n+AnyFunctionDefAndLambda = Union[\n+ ast.FunctionDef,\n+ ast.AsyncFunctionDef,\n+ ast.Lambda,\n+]\n+\n #: Flake8 API format to return error messages:\n CheckResult = Tuple[int, int, str, type]\n \ndiff --git a/wemake_python_styleguide/visitors/ast/complexity/function.py b/wemake_python_styleguide/visitors/ast/complexity/function.py\n--- a/wemake_python_styleguide/visitors/ast/complexity/function.py\n+++ b/wemake_python_styleguide/visitors/ast/complexity/function.py\n@@ -5,7 +5,10 @@\n from typing import DefaultDict, List\n \n from wemake_python_styleguide.logics.functions import is_method\n-from wemake_python_styleguide.types import AnyFunctionDef\n+from wemake_python_styleguide.types import (\n+ AnyFunctionDef,\n+ AnyFunctionDefAndLambda,\n+)\n from wemake_python_styleguide.violations.complexity import (\n TooManyArgumentsViolation,\n TooManyElifsViolation,\n@@ -17,13 +20,14 @@\n from wemake_python_styleguide.visitors.decorators import alias\n \n FunctionCounter = DefaultDict[AnyFunctionDef, int]\n+FunctionCounterWithLambda = DefaultDict[AnyFunctionDefAndLambda, int]\n \n \n class _ComplexityCounter(object):\n \"\"\"Helper class to encapsulate logic from the visitor.\"\"\"\n \n def __init__(self) -> None:\n- self.arguments: FunctionCounter = defaultdict(int)\n+ self.arguments: FunctionCounterWithLambda = defaultdict(int)\n self.elifs: FunctionCounter = defaultdict(int)\n self.returns: FunctionCounter = defaultdict(int)\n self.expressions: FunctionCounter = defaultdict(int)\n@@ -67,7 +71,7 @@\n elif isinstance(sub_node, ast.If):\n self._update_elifs(node, sub_node)\n \n- def check_arguments_count(self, node: AnyFunctionDef) -> None:\n+ def check_arguments_count(self, node: AnyFunctionDefAndLambda) -> None:\n \"\"\"Checks the number of the arguments in a function.\"\"\"\n counter = 0\n has_extra_arg = 0\n@@ -167,3 +171,14 @@\n self._counter.check_arguments_count(node)\n self._counter.check_function_complexity(node)\n self.generic_visit(node)\n+\n+ def visit_Lambda(self, node: ast.Lambda) -> None:\n+ \"\"\"\n+ Checks lambda function's internal complexity.\n+\n+ Raises:\n+ TooManyArgumentsViolation\n+\n+ \"\"\"\n+ self._counter.check_arguments_count(node)\n+ self.generic_visit(node)\n", "issue": "Feature: check `lambda` functions for the number of arguments\nWe already have the logic to check regular an `async` functions for the number of arguments.\r\nhttps://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/complexity/function.py#L155\r\n\r\nBut we need to check `lambda` functions as well. Since one can create a `lambda` function that receives too many arguments. \r\n\r\nWhat needs to be done:\r\n1. We need to implement a new handler for `Lambda` node in the visitor above\r\n2. We need to unit test this behavior here: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_checkers/test_high_complexity.py\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThis module contains custom ``mypy`` types that we commonly use.\n\nPolicy\n------\n\nIf any of the following statements is true, move the type to this file:\n\n- if type is used in multiple files\n- if type is complex enough it has to be documented\n- if type is very important for the public API\n\n\"\"\"\n\nimport ast\nfrom typing import TYPE_CHECKING, Tuple, Type, Union\n\nif TYPE_CHECKING: # pragma: no cover\n # TODO: use Final types to annotate all constants\n from typing_extensions import Protocol # noqa: Z435\n\n # This solves cycle imports problem:\n from .visitors import base # noqa: F401,Z300,Z435\nelse:\n # We do not need to do anything if type checker is not working:\n Protocol = object\n\n#: Visitor type definition:\nVisitorClass = Type['base.BaseVisitor']\n\n#: In cases we need to work with both import types:\nAnyImport = Union[ast.Import, ast.ImportFrom]\n\n#: In cases we need to work with both function definitions:\nAnyFunctionDef = Union[ast.FunctionDef, ast.AsyncFunctionDef]\n\n#: Flake8 API format to return error messages:\nCheckResult = Tuple[int, int, str, type]\n\n#: Tuple of AST node types for declarative syntax:\nAnyNodes = Tuple[Type[ast.AST], ...]\n\n\nclass ConfigurationOptions(Protocol):\n \"\"\"\n Provides structure for the options we use in our checker.\n\n Then this protocol is passed to each individual visitor and used there.\n It uses structural sub-typing, and does not represent any kind of a real\n class or structure.\n\n This class actually works only when running type check.\n At other cases it is just an ``object``.\n\n See also:\n https://mypy.readthedocs.io/en/latest/protocols.html\n\n \"\"\"\n\n # General:\n min_variable_length: int\n i_control_code: bool\n\n # Complexity:\n max_arguments: int\n max_local_variables: int\n max_returns: int\n max_expressions: int\n max_offset_blocks: int\n max_elifs: int\n max_module_members: int\n max_methods: int\n max_line_complexity: int\n max_jones_score: int\n max_imports: int\n max_conditions: int\n\n # File names:\n min_module_name_length: int\n", "path": "wemake_python_styleguide/types.py"}, {"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom collections import defaultdict\nfrom typing import DefaultDict, List\n\nfrom wemake_python_styleguide.logics.functions import is_method\nfrom wemake_python_styleguide.types import AnyFunctionDef\nfrom wemake_python_styleguide.violations.complexity import (\n TooManyArgumentsViolation,\n TooManyElifsViolation,\n TooManyExpressionsViolation,\n TooManyLocalsViolation,\n TooManyReturnsViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\nfrom wemake_python_styleguide.visitors.decorators import alias\n\nFunctionCounter = DefaultDict[AnyFunctionDef, int]\n\n\nclass _ComplexityCounter(object):\n \"\"\"Helper class to encapsulate logic from the visitor.\"\"\"\n\n def __init__(self) -> None:\n self.arguments: FunctionCounter = defaultdict(int)\n self.elifs: FunctionCounter = defaultdict(int)\n self.returns: FunctionCounter = defaultdict(int)\n self.expressions: FunctionCounter = defaultdict(int)\n self.variables: DefaultDict[\n AnyFunctionDef, List[str],\n ] = defaultdict(list)\n\n def _update_variables(\n self,\n function: AnyFunctionDef,\n variable_name: str,\n ) -> None:\n \"\"\"\n Increases the counter of local variables.\n\n What is treated as a local variable?\n Check ``TooManyLocalsViolation`` documentation.\n \"\"\"\n function_variables = self.variables[function]\n if variable_name not in function_variables and variable_name != '_':\n function_variables.append(variable_name)\n\n def _update_elifs(self, node: AnyFunctionDef, sub_node: ast.If) -> None:\n has_elif = any(\n isinstance(if_node, ast.If) for if_node in sub_node.orelse\n )\n\n if has_elif:\n self.elifs[node] += 1\n\n def _check_sub_node(self, node: AnyFunctionDef, sub_node) -> None:\n is_variable = isinstance(sub_node, ast.Name)\n context = getattr(sub_node, 'ctx', None)\n\n if is_variable and isinstance(context, ast.Store):\n self._update_variables(node, sub_node.id)\n elif isinstance(sub_node, ast.Return):\n self.returns[node] += 1\n elif isinstance(sub_node, ast.Expr):\n self.expressions[node] += 1\n elif isinstance(sub_node, ast.If):\n self._update_elifs(node, sub_node)\n\n def check_arguments_count(self, node: AnyFunctionDef) -> None:\n \"\"\"Checks the number of the arguments in a function.\"\"\"\n counter = 0\n has_extra_arg = 0\n if is_method(getattr(node, 'function_type', None)):\n has_extra_arg = 1\n\n counter += len(node.args.args) + len(node.args.kwonlyargs)\n if node.args.vararg:\n counter += 1\n if node.args.kwarg:\n counter += 1\n\n self.arguments[node] = counter - has_extra_arg\n\n def check_function_complexity(self, node: AnyFunctionDef) -> None:\n \"\"\"\n In this function we iterate all the internal body's node.\n\n We check different complexity metrics based on these internals.\n \"\"\"\n for body_item in node.body:\n for sub_node in ast.walk(body_item):\n self._check_sub_node(node, sub_node)\n\n\n@alias('visit_any_function', (\n 'visit_AsyncFunctionDef',\n 'visit_FunctionDef',\n))\nclass FunctionComplexityVisitor(BaseNodeVisitor):\n \"\"\"\n This class checks for complexity inside functions.\n\n This includes:\n\n 1. Number of arguments\n 2. Number of `return` statements\n 3. Number of expressions\n 4. Number of local variables\n 5. Number of `elif` branches\n\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Creates a counter for tracked metrics.\"\"\"\n super().__init__(*args, **kwargs)\n self._counter = _ComplexityCounter()\n\n def _check_possible_switch(self) -> None:\n for node, elifs in self._counter.elifs.items():\n if elifs > self.options.max_elifs:\n self.add_violation(TooManyElifsViolation(node))\n\n def _check_function_internals(self) -> None:\n for node, variables in self._counter.variables.items():\n if len(variables) > self.options.max_local_variables:\n self.add_violation(\n TooManyLocalsViolation(node, text=node.name),\n )\n\n for node, expressions in self._counter.expressions.items():\n if expressions > self.options.max_expressions:\n self.add_violation(\n TooManyExpressionsViolation(node, text=node.name),\n )\n\n def _check_function_signature(self) -> None:\n for node, arguments in self._counter.arguments.items():\n if arguments > self.options.max_arguments:\n self.add_violation(\n TooManyArgumentsViolation(node, text=str(arguments)),\n )\n\n for node, returns in self._counter.returns.items():\n if returns > self.options.max_returns:\n self.add_violation(\n TooManyReturnsViolation(node, text=node.name),\n )\n\n def _post_visit(self) -> None:\n self._check_function_signature()\n self._check_function_internals()\n self._check_possible_switch()\n\n def visit_any_function(self, node: AnyFunctionDef) -> None:\n \"\"\"\n Checks function's internal complexity.\n\n Raises:\n TooManyExpressionsViolation\n TooManyReturnsViolation\n TooManyLocalsViolation\n TooManyArgumentsViolation\n TooManyElifsViolation\n\n \"\"\"\n self._counter.check_arguments_count(node)\n self._counter.check_function_complexity(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/complexity/function.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThis module contains custom ``mypy`` types that we commonly use.\n\nPolicy\n------\n\nIf any of the following statements is true, move the type to this file:\n\n- if type is used in multiple files\n- if type is complex enough it has to be documented\n- if type is very important for the public API\n\n\"\"\"\n\nimport ast\nfrom typing import TYPE_CHECKING, Tuple, Type, Union\n\nif TYPE_CHECKING: # pragma: no cover\n # TODO: use Final types to annotate all constants\n from typing_extensions import Protocol # noqa: Z435\n\n # This solves cycle imports problem:\n from .visitors import base # noqa: F401,Z300,Z435\nelse:\n # We do not need to do anything if type checker is not working:\n Protocol = object\n\n#: Visitor type definition:\nVisitorClass = Type['base.BaseVisitor']\n\n#: In cases we need to work with both import types:\nAnyImport = Union[ast.Import, ast.ImportFrom]\n\n#: In cases we need to work with both function definitions:\nAnyFunctionDef = Union[ast.FunctionDef, ast.AsyncFunctionDef]\n\n#: In cases we need to work with all function definitions (including Lambda)\nAnyFunctionDefAndLambda = Union[\n ast.FunctionDef,\n ast.AsyncFunctionDef,\n ast.Lambda,\n]\n\n#: Flake8 API format to return error messages:\nCheckResult = Tuple[int, int, str, type]\n\n#: Tuple of AST node types for declarative syntax:\nAnyNodes = Tuple[Type[ast.AST], ...]\n\n\nclass ConfigurationOptions(Protocol):\n \"\"\"\n Provides structure for the options we use in our checker.\n\n Then this protocol is passed to each individual visitor and used there.\n It uses structural sub-typing, and does not represent any kind of a real\n class or structure.\n\n This class actually works only when running type check.\n At other cases it is just an ``object``.\n\n See also:\n https://mypy.readthedocs.io/en/latest/protocols.html\n\n \"\"\"\n\n # General:\n min_variable_length: int\n i_control_code: bool\n\n # Complexity:\n max_arguments: int\n max_local_variables: int\n max_returns: int\n max_expressions: int\n max_offset_blocks: int\n max_elifs: int\n max_module_members: int\n max_methods: int\n max_line_complexity: int\n max_jones_score: int\n max_imports: int\n max_conditions: int\n\n # File names:\n min_module_name_length: int\n", "path": "wemake_python_styleguide/types.py"}, {"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom collections import defaultdict\nfrom typing import DefaultDict, List\n\nfrom wemake_python_styleguide.logics.functions import is_method\nfrom wemake_python_styleguide.types import (\n AnyFunctionDef,\n AnyFunctionDefAndLambda,\n)\nfrom wemake_python_styleguide.violations.complexity import (\n TooManyArgumentsViolation,\n TooManyElifsViolation,\n TooManyExpressionsViolation,\n TooManyLocalsViolation,\n TooManyReturnsViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\nfrom wemake_python_styleguide.visitors.decorators import alias\n\nFunctionCounter = DefaultDict[AnyFunctionDef, int]\nFunctionCounterWithLambda = DefaultDict[AnyFunctionDefAndLambda, int]\n\n\nclass _ComplexityCounter(object):\n \"\"\"Helper class to encapsulate logic from the visitor.\"\"\"\n\n def __init__(self) -> None:\n self.arguments: FunctionCounterWithLambda = defaultdict(int)\n self.elifs: FunctionCounter = defaultdict(int)\n self.returns: FunctionCounter = defaultdict(int)\n self.expressions: FunctionCounter = defaultdict(int)\n self.variables: DefaultDict[\n AnyFunctionDef, List[str],\n ] = defaultdict(list)\n\n def _update_variables(\n self,\n function: AnyFunctionDef,\n variable_name: str,\n ) -> None:\n \"\"\"\n Increases the counter of local variables.\n\n What is treated as a local variable?\n Check ``TooManyLocalsViolation`` documentation.\n \"\"\"\n function_variables = self.variables[function]\n if variable_name not in function_variables and variable_name != '_':\n function_variables.append(variable_name)\n\n def _update_elifs(self, node: AnyFunctionDef, sub_node: ast.If) -> None:\n has_elif = any(\n isinstance(if_node, ast.If) for if_node in sub_node.orelse\n )\n\n if has_elif:\n self.elifs[node] += 1\n\n def _check_sub_node(self, node: AnyFunctionDef, sub_node) -> None:\n is_variable = isinstance(sub_node, ast.Name)\n context = getattr(sub_node, 'ctx', None)\n\n if is_variable and isinstance(context, ast.Store):\n self._update_variables(node, sub_node.id)\n elif isinstance(sub_node, ast.Return):\n self.returns[node] += 1\n elif isinstance(sub_node, ast.Expr):\n self.expressions[node] += 1\n elif isinstance(sub_node, ast.If):\n self._update_elifs(node, sub_node)\n\n def check_arguments_count(self, node: AnyFunctionDefAndLambda) -> None:\n \"\"\"Checks the number of the arguments in a function.\"\"\"\n counter = 0\n has_extra_arg = 0\n if is_method(getattr(node, 'function_type', None)):\n has_extra_arg = 1\n\n counter += len(node.args.args) + len(node.args.kwonlyargs)\n if node.args.vararg:\n counter += 1\n if node.args.kwarg:\n counter += 1\n\n self.arguments[node] = counter - has_extra_arg\n\n def check_function_complexity(self, node: AnyFunctionDef) -> None:\n \"\"\"\n In this function we iterate all the internal body's node.\n\n We check different complexity metrics based on these internals.\n \"\"\"\n for body_item in node.body:\n for sub_node in ast.walk(body_item):\n self._check_sub_node(node, sub_node)\n\n\n@alias('visit_any_function', (\n 'visit_AsyncFunctionDef',\n 'visit_FunctionDef',\n))\nclass FunctionComplexityVisitor(BaseNodeVisitor):\n \"\"\"\n This class checks for complexity inside functions.\n\n This includes:\n\n 1. Number of arguments\n 2. Number of `return` statements\n 3. Number of expressions\n 4. Number of local variables\n 5. Number of `elif` branches\n\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Creates a counter for tracked metrics.\"\"\"\n super().__init__(*args, **kwargs)\n self._counter = _ComplexityCounter()\n\n def _check_possible_switch(self) -> None:\n for node, elifs in self._counter.elifs.items():\n if elifs > self.options.max_elifs:\n self.add_violation(TooManyElifsViolation(node))\n\n def _check_function_internals(self) -> None:\n for node, variables in self._counter.variables.items():\n if len(variables) > self.options.max_local_variables:\n self.add_violation(\n TooManyLocalsViolation(node, text=node.name),\n )\n\n for node, expressions in self._counter.expressions.items():\n if expressions > self.options.max_expressions:\n self.add_violation(\n TooManyExpressionsViolation(node, text=node.name),\n )\n\n def _check_function_signature(self) -> None:\n for node, arguments in self._counter.arguments.items():\n if arguments > self.options.max_arguments:\n self.add_violation(\n TooManyArgumentsViolation(node, text=str(arguments)),\n )\n\n for node, returns in self._counter.returns.items():\n if returns > self.options.max_returns:\n self.add_violation(\n TooManyReturnsViolation(node, text=node.name),\n )\n\n def _post_visit(self) -> None:\n self._check_function_signature()\n self._check_function_internals()\n self._check_possible_switch()\n\n def visit_any_function(self, node: AnyFunctionDef) -> None:\n \"\"\"\n Checks function's internal complexity.\n\n Raises:\n TooManyExpressionsViolation\n TooManyReturnsViolation\n TooManyLocalsViolation\n TooManyArgumentsViolation\n TooManyElifsViolation\n\n \"\"\"\n self._counter.check_arguments_count(node)\n self._counter.check_function_complexity(node)\n self.generic_visit(node)\n\n def visit_Lambda(self, node: ast.Lambda) -> None:\n \"\"\"\n Checks lambda function's internal complexity.\n\n Raises:\n TooManyArgumentsViolation\n\n \"\"\"\n self._counter.check_arguments_count(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/complexity/function.py"}]} | 2,791 | 674 |
gh_patches_debug_38788 | rasdani/github-patches | git_diff | spack__spack-1699 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adios does not build with HDF5 1.10
The `adios` variant `+hdf5` is broken. The error is
```
In file included from /xfs1/eschnetter/src/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.1.1/hdf5-1.10.0-patch1-ijwvzsvpcbne33bffg6nrefdwgguofji/include/H5public.h:145:0,
from /xfs1/eschnetter/src/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.1.1/hdf5-1.10.0-patch1-ijwvzsvpcbne33bffg6nrefdwgguofji/include/hdf5.h:24,
from bp2h5.c:39:
bp2h5.c:46:13: error: two or more data types in declaration specifiers
typedef int bool;
^
bp2h5.c:46:1: warning: useless type name in empty declaration
typedef int bool;
^~~~~~~
bp2h5.c: In function ‘main’:
bp2h5.c:173:17: warning: implicit declaration of function ‘H5LTmake_dataset’ [-Wimplicit-function-declaration]
H5LTmake_dataset(HDF5_FILE,h5name,1,count,h5_type_id,v->value);
^~~~~~~~~~~~~~~~
make[3]: *** [bp2h5-bp2h5.o] Error 1
make[3]: Leaving directory `/tmp/eschnetter/spack-stage/spack-stage-0LLmsR/ADIOS-1.10.0/utils/bp2h5'
make[2]: *** [all-recursive] Error 1
make[2]: Leaving directory `/tmp/eschnetter/spack-stage/spack-stage-0LLmsR/ADIOS-1.10.0/utils'
make[1]: *** [all-recursive] Error 1
make[1]: Leaving directory `/tmp/eschnetter/spack-stage/spack-stage-0LLmsR/ADIOS-1.10.0'
```
It seems that Adios expects `bool` to be undefined, but it is already defined. It might be that HDF5 1.10 includes `<stdbool.h>`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/adios/package.py`
Content:
```
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25
26 from spack import *
27
28
29 class Adios(Package):
30 """The Adaptable IO System (ADIOS) provides a simple,
31 flexible way for scientists to describe the
32 data in their code that may need to be written,
33 read, or processed outside of the running simulation.
34 """
35
36 homepage = "http://www.olcf.ornl.gov/center-projects/adios/"
37 url = "https://github.com/ornladios/ADIOS/archive/v1.10.0.tar.gz"
38
39 version('1.10.0', 'eff450a4c0130479417cfd63186957f3')
40 version('1.9.0', '310ff02388bbaa2b1c1710ee970b5678')
41
42 variant('shared', default=True,
43 description='Builds a shared version of the library')
44
45 variant('fortran', default=False,
46 description='Enable Fortran bindings support')
47
48 variant('mpi', default=True, description='Enable MPI support')
49 variant('infiniband', default=False, description='Enable infiniband support')
50
51 variant('zlib', default=True, description='Enable szip transform support')
52 variant('szip', default=False, description='Enable szip transform support')
53 variant('hdf5', default=False, description='Enable HDF5 transport support')
54 variant('netcdf', default=False, description='Enable NetCDF transport support')
55
56 # Lots of setting up here for this package
57 # module swap PrgEnv-intel PrgEnv-$COMP
58 # module load cray-netcdf/4.3.3.1
59 # module load cray-hdf5/1.8.14
60 # module load python/2.7.10
61
62 depends_on('autoconf', type='build')
63 depends_on('automake', type='build')
64 depends_on('libtool', type='build')
65 depends_on('python', type='build')
66
67 depends_on('mpi', when='+mpi')
68 depends_on('[email protected]:')
69 # optional transformations
70 depends_on('zlib', when='+zlib')
71 depends_on('szip', when='+szip')
72 # optional transports
73 depends_on('hdf5', when='+hdf5')
74 depends_on('netcdf', when='+netcdf')
75
76 def validate(self, spec):
77 """
78 Checks if incompatible variants have been activated at the same time
79 :param spec: spec of the package
80 :raises RuntimeError: in case of inconsistencies
81 """
82 if '+fortran' in spec and not self.compiler.fc:
83 msg = 'cannot build a fortran variant without a fortran compiler'
84 raise RuntimeError(msg)
85
86 def install(self, spec, prefix):
87 self.validate(spec)
88 # Handle compilation after spec validation
89 extra_args = []
90
91 # required, otherwise building its python bindings on ADIOS will fail
92 extra_args.append("CFLAGS=-fPIC")
93
94 # always build external MXML, even in ADIOS 1.10.0+
95 extra_args.append('--with-mxml=%s' % spec['mxml'].prefix)
96
97 if '+shared' in spec:
98 extra_args.append('--enable-shared')
99
100 if '+mpi' in spec:
101 extra_args.append('--with-mpi')
102 if '+infiniband' in spec:
103 extra_args.append('--with-infiniband')
104 else:
105 extra_args.append('--with-infiniband=no')
106
107 if '+fortran' in spec:
108 extra_args.append('--enable-fortran')
109 else:
110 extra_args.append('--disable-fortran')
111
112 if '+zlib' in spec:
113 extra_args.append('--with-zlib=%s' % spec['zlib'].prefix)
114 if '+szip' in spec:
115 extra_args.append('--with-szip=%s' % spec['szip'].prefix)
116 if '+hdf5' in spec:
117 extra_args.append('--with-hdf5=%s' % spec['hdf5'].prefix)
118 if '+netcdf' in spec:
119 extra_args.append('--with-netcdf=%s' % spec['netcdf'].prefix)
120
121 sh = which('sh')
122 sh('./autogen.sh')
123
124 configure("--prefix=%s" % prefix,
125 *extra_args)
126 make()
127 make("install")
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/adios/package.py b/var/spack/repos/builtin/packages/adios/package.py
--- a/var/spack/repos/builtin/packages/adios/package.py
+++ b/var/spack/repos/builtin/packages/adios/package.py
@@ -36,6 +36,8 @@
homepage = "http://www.olcf.ornl.gov/center-projects/adios/"
url = "https://github.com/ornladios/ADIOS/archive/v1.10.0.tar.gz"
+ version('develop', git='https://github.com/ornladios/ADIOS.git',
+ branch='master')
version('1.10.0', 'eff450a4c0130479417cfd63186957f3')
version('1.9.0', '310ff02388bbaa2b1c1710ee970b5678')
@@ -48,14 +50,14 @@
variant('mpi', default=True, description='Enable MPI support')
variant('infiniband', default=False, description='Enable infiniband support')
+ # transforms
variant('zlib', default=True, description='Enable szip transform support')
variant('szip', default=False, description='Enable szip transform support')
- variant('hdf5', default=False, description='Enable HDF5 transport support')
- variant('netcdf', default=False, description='Enable NetCDF transport support')
+ # transports and serial file converters
+ variant('hdf5', default=False, description='Enable parallel HDF5 transport and serial bp2h5 converter')
# Lots of setting up here for this package
# module swap PrgEnv-intel PrgEnv-$COMP
- # module load cray-netcdf/4.3.3.1
# module load cray-hdf5/1.8.14
# module load python/2.7.10
@@ -69,9 +71,13 @@
# optional transformations
depends_on('zlib', when='+zlib')
depends_on('szip', when='+szip')
- # optional transports
- depends_on('hdf5', when='+hdf5')
- depends_on('netcdf', when='+netcdf')
+ # optional transports & file converters
+ depends_on('[email protected]:+mpi', when='+hdf5')
+
+ # Fix ADIOS <=1.10.0 compile error on HDF5 1.10+
+ # https://github.com/ornladios/ADIOS/commit/3b21a8a41509
+ # https://github.com/LLNL/spack/issues/1683
+ patch('adios_1100.patch', when='@:1.10.0^[email protected]:')
def validate(self, spec):
"""
@@ -114,9 +120,7 @@
if '+szip' in spec:
extra_args.append('--with-szip=%s' % spec['szip'].prefix)
if '+hdf5' in spec:
- extra_args.append('--with-hdf5=%s' % spec['hdf5'].prefix)
- if '+netcdf' in spec:
- extra_args.append('--with-netcdf=%s' % spec['netcdf'].prefix)
+ extra_args.append('--with-phdf5=%s' % spec['hdf5'].prefix)
sh = which('sh')
sh('./autogen.sh')
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/adios/package.py b/var/spack/repos/builtin/packages/adios/package.py\n--- a/var/spack/repos/builtin/packages/adios/package.py\n+++ b/var/spack/repos/builtin/packages/adios/package.py\n@@ -36,6 +36,8 @@\n homepage = \"http://www.olcf.ornl.gov/center-projects/adios/\"\n url = \"https://github.com/ornladios/ADIOS/archive/v1.10.0.tar.gz\"\n \n+ version('develop', git='https://github.com/ornladios/ADIOS.git',\n+ branch='master')\n version('1.10.0', 'eff450a4c0130479417cfd63186957f3')\n version('1.9.0', '310ff02388bbaa2b1c1710ee970b5678')\n \n@@ -48,14 +50,14 @@\n variant('mpi', default=True, description='Enable MPI support')\n variant('infiniband', default=False, description='Enable infiniband support')\n \n+ # transforms\n variant('zlib', default=True, description='Enable szip transform support')\n variant('szip', default=False, description='Enable szip transform support')\n- variant('hdf5', default=False, description='Enable HDF5 transport support')\n- variant('netcdf', default=False, description='Enable NetCDF transport support')\n+ # transports and serial file converters\n+ variant('hdf5', default=False, description='Enable parallel HDF5 transport and serial bp2h5 converter')\n \n # Lots of setting up here for this package\n # module swap PrgEnv-intel PrgEnv-$COMP\n- # module load cray-netcdf/4.3.3.1\n # module load cray-hdf5/1.8.14\n # module load python/2.7.10\n \n@@ -69,9 +71,13 @@\n # optional transformations\n depends_on('zlib', when='+zlib')\n depends_on('szip', when='+szip')\n- # optional transports\n- depends_on('hdf5', when='+hdf5')\n- depends_on('netcdf', when='+netcdf')\n+ # optional transports & file converters\n+ depends_on('[email protected]:+mpi', when='+hdf5')\n+\n+ # Fix ADIOS <=1.10.0 compile error on HDF5 1.10+\n+ # https://github.com/ornladios/ADIOS/commit/3b21a8a41509\n+ # https://github.com/LLNL/spack/issues/1683\n+ patch('adios_1100.patch', when='@:1.10.0^[email protected]:')\n \n def validate(self, spec):\n \"\"\"\n@@ -114,9 +120,7 @@\n if '+szip' in spec:\n extra_args.append('--with-szip=%s' % spec['szip'].prefix)\n if '+hdf5' in spec:\n- extra_args.append('--with-hdf5=%s' % spec['hdf5'].prefix)\n- if '+netcdf' in spec:\n- extra_args.append('--with-netcdf=%s' % spec['netcdf'].prefix)\n+ extra_args.append('--with-phdf5=%s' % spec['hdf5'].prefix)\n \n sh = which('sh')\n sh('./autogen.sh')\n", "issue": "Adios does not build with HDF5 1.10\nThe `adios` variant `+hdf5` is broken. The error is\n\n```\nIn file included from /xfs1/eschnetter/src/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.1.1/hdf5-1.10.0-patch1-ijwvzsvpcbne33bffg6nrefdwgguofji/include/H5public.h:145:0,\n from /xfs1/eschnetter/src/spack/opt/spack/linux-Ubuntu14-x86_64/gcc-6.1.1/hdf5-1.10.0-patch1-ijwvzsvpcbne33bffg6nrefdwgguofji/include/hdf5.h:24,\n from bp2h5.c:39:\nbp2h5.c:46:13: error: two or more data types in declaration specifiers\n typedef int bool;\n ^\nbp2h5.c:46:1: warning: useless type name in empty declaration\n typedef int bool;\n ^~~~~~~\nbp2h5.c: In function \u2018main\u2019:\nbp2h5.c:173:17: warning: implicit declaration of function \u2018H5LTmake_dataset\u2019 [-Wimplicit-function-declaration]\n H5LTmake_dataset(HDF5_FILE,h5name,1,count,h5_type_id,v->value);\n ^~~~~~~~~~~~~~~~\nmake[3]: *** [bp2h5-bp2h5.o] Error 1\nmake[3]: Leaving directory `/tmp/eschnetter/spack-stage/spack-stage-0LLmsR/ADIOS-1.10.0/utils/bp2h5'\nmake[2]: *** [all-recursive] Error 1\nmake[2]: Leaving directory `/tmp/eschnetter/spack-stage/spack-stage-0LLmsR/ADIOS-1.10.0/utils'\nmake[1]: *** [all-recursive] Error 1\nmake[1]: Leaving directory `/tmp/eschnetter/spack-stage/spack-stage-0LLmsR/ADIOS-1.10.0'\n```\n\nIt seems that Adios expects `bool` to be undefined, but it is already defined. It might be that HDF5 1.10 includes `<stdbool.h>`.\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\n\nfrom spack import *\n\n\nclass Adios(Package):\n \"\"\"The Adaptable IO System (ADIOS) provides a simple,\n flexible way for scientists to describe the\n data in their code that may need to be written,\n read, or processed outside of the running simulation.\n \"\"\"\n\n homepage = \"http://www.olcf.ornl.gov/center-projects/adios/\"\n url = \"https://github.com/ornladios/ADIOS/archive/v1.10.0.tar.gz\"\n\n version('1.10.0', 'eff450a4c0130479417cfd63186957f3')\n version('1.9.0', '310ff02388bbaa2b1c1710ee970b5678')\n\n variant('shared', default=True,\n description='Builds a shared version of the library')\n\n variant('fortran', default=False,\n description='Enable Fortran bindings support')\n\n variant('mpi', default=True, description='Enable MPI support')\n variant('infiniband', default=False, description='Enable infiniband support')\n\n variant('zlib', default=True, description='Enable szip transform support')\n variant('szip', default=False, description='Enable szip transform support')\n variant('hdf5', default=False, description='Enable HDF5 transport support')\n variant('netcdf', default=False, description='Enable NetCDF transport support')\n\n # Lots of setting up here for this package\n # module swap PrgEnv-intel PrgEnv-$COMP\n # module load cray-netcdf/4.3.3.1\n # module load cray-hdf5/1.8.14\n # module load python/2.7.10\n\n depends_on('autoconf', type='build')\n depends_on('automake', type='build')\n depends_on('libtool', type='build')\n depends_on('python', type='build')\n\n depends_on('mpi', when='+mpi')\n depends_on('[email protected]:')\n # optional transformations\n depends_on('zlib', when='+zlib')\n depends_on('szip', when='+szip')\n # optional transports\n depends_on('hdf5', when='+hdf5')\n depends_on('netcdf', when='+netcdf')\n\n def validate(self, spec):\n \"\"\"\n Checks if incompatible variants have been activated at the same time\n :param spec: spec of the package\n :raises RuntimeError: in case of inconsistencies\n \"\"\"\n if '+fortran' in spec and not self.compiler.fc:\n msg = 'cannot build a fortran variant without a fortran compiler'\n raise RuntimeError(msg)\n\n def install(self, spec, prefix):\n self.validate(spec)\n # Handle compilation after spec validation\n extra_args = []\n\n # required, otherwise building its python bindings on ADIOS will fail\n extra_args.append(\"CFLAGS=-fPIC\")\n\n # always build external MXML, even in ADIOS 1.10.0+\n extra_args.append('--with-mxml=%s' % spec['mxml'].prefix)\n\n if '+shared' in spec:\n extra_args.append('--enable-shared')\n\n if '+mpi' in spec:\n extra_args.append('--with-mpi')\n if '+infiniband' in spec:\n extra_args.append('--with-infiniband')\n else:\n extra_args.append('--with-infiniband=no')\n\n if '+fortran' in spec:\n extra_args.append('--enable-fortran')\n else:\n extra_args.append('--disable-fortran')\n\n if '+zlib' in spec:\n extra_args.append('--with-zlib=%s' % spec['zlib'].prefix)\n if '+szip' in spec:\n extra_args.append('--with-szip=%s' % spec['szip'].prefix)\n if '+hdf5' in spec:\n extra_args.append('--with-hdf5=%s' % spec['hdf5'].prefix)\n if '+netcdf' in spec:\n extra_args.append('--with-netcdf=%s' % spec['netcdf'].prefix)\n\n sh = which('sh')\n sh('./autogen.sh')\n\n configure(\"--prefix=%s\" % prefix,\n *extra_args)\n make()\n make(\"install\")\n", "path": "var/spack/repos/builtin/packages/adios/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\n\nfrom spack import *\n\n\nclass Adios(Package):\n \"\"\"The Adaptable IO System (ADIOS) provides a simple,\n flexible way for scientists to describe the\n data in their code that may need to be written,\n read, or processed outside of the running simulation.\n \"\"\"\n\n homepage = \"http://www.olcf.ornl.gov/center-projects/adios/\"\n url = \"https://github.com/ornladios/ADIOS/archive/v1.10.0.tar.gz\"\n\n version('develop', git='https://github.com/ornladios/ADIOS.git',\n branch='master')\n version('1.10.0', 'eff450a4c0130479417cfd63186957f3')\n version('1.9.0', '310ff02388bbaa2b1c1710ee970b5678')\n\n variant('shared', default=True,\n description='Builds a shared version of the library')\n\n variant('fortran', default=False,\n description='Enable Fortran bindings support')\n\n variant('mpi', default=True, description='Enable MPI support')\n variant('infiniband', default=False, description='Enable infiniband support')\n\n # transforms\n variant('zlib', default=True, description='Enable szip transform support')\n variant('szip', default=False, description='Enable szip transform support')\n # transports and serial file converters\n variant('hdf5', default=False, description='Enable parallel HDF5 transport and serial bp2h5 converter')\n\n # Lots of setting up here for this package\n # module swap PrgEnv-intel PrgEnv-$COMP\n # module load cray-hdf5/1.8.14\n # module load python/2.7.10\n\n depends_on('autoconf', type='build')\n depends_on('automake', type='build')\n depends_on('libtool', type='build')\n depends_on('python', type='build')\n\n depends_on('mpi', when='+mpi')\n depends_on('[email protected]:')\n # optional transformations\n depends_on('zlib', when='+zlib')\n depends_on('szip', when='+szip')\n # optional transports & file converters\n depends_on('[email protected]:+mpi', when='+hdf5')\n\n # Fix ADIOS <=1.10.0 compile error on HDF5 1.10+\n # https://github.com/ornladios/ADIOS/commit/3b21a8a41509\n # https://github.com/LLNL/spack/issues/1683\n patch('adios_1100.patch', when='@:1.10.0^[email protected]:')\n\n def validate(self, spec):\n \"\"\"\n Checks if incompatible variants have been activated at the same time\n :param spec: spec of the package\n :raises RuntimeError: in case of inconsistencies\n \"\"\"\n if '+fortran' in spec and not self.compiler.fc:\n msg = 'cannot build a fortran variant without a fortran compiler'\n raise RuntimeError(msg)\n\n def install(self, spec, prefix):\n self.validate(spec)\n # Handle compilation after spec validation\n extra_args = []\n\n # required, otherwise building its python bindings on ADIOS will fail\n extra_args.append(\"CFLAGS=-fPIC\")\n\n # always build external MXML, even in ADIOS 1.10.0+\n extra_args.append('--with-mxml=%s' % spec['mxml'].prefix)\n\n if '+shared' in spec:\n extra_args.append('--enable-shared')\n\n if '+mpi' in spec:\n extra_args.append('--with-mpi')\n if '+infiniband' in spec:\n extra_args.append('--with-infiniband')\n else:\n extra_args.append('--with-infiniband=no')\n\n if '+fortran' in spec:\n extra_args.append('--enable-fortran')\n else:\n extra_args.append('--disable-fortran')\n\n if '+zlib' in spec:\n extra_args.append('--with-zlib=%s' % spec['zlib'].prefix)\n if '+szip' in spec:\n extra_args.append('--with-szip=%s' % spec['szip'].prefix)\n if '+hdf5' in spec:\n extra_args.append('--with-phdf5=%s' % spec['hdf5'].prefix)\n\n sh = which('sh')\n sh('./autogen.sh')\n\n configure(\"--prefix=%s\" % prefix,\n *extra_args)\n make()\n make(\"install\")\n", "path": "var/spack/repos/builtin/packages/adios/package.py"}]} | 2,324 | 830 |
gh_patches_debug_30824 | rasdani/github-patches | git_diff | pypa__cibuildwheel-906 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Travis ppc64le network issue work-around
### Description
Hi,
I currently see network issues with docker containers on Travis CI ppc64le builders.
Essentially, connections seem to time out, e.g., on `yum update`.
The same problem seems to be known in conda-forge and adding `--network=host` to `docker run` seems to solve the issue.
Do you know how I can most simply apply the same work-around in cibuildwheel?
X-ref:
- https://github.com/conda-forge/conda-smithy/pull/1520
### Build log
X-ref:
- https://github.com/openPMD/openPMD-api/pull/1136
### CI config
- https://github.com/openPMD/openPMD-api/tree/wheels
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cibuildwheel/docker_container.py`
Content:
```
1 import io
2 import json
3 import os
4 import shlex
5 import subprocess
6 import sys
7 import uuid
8 from pathlib import Path, PurePath
9 from types import TracebackType
10 from typing import IO, Dict, List, Optional, Sequence, Type, cast
11
12 from .typing import PathOrStr, PopenBytes
13
14
15 class DockerContainer:
16 """
17 An object that represents a running Docker container.
18
19 Intended for use as a context manager e.g.
20 `with DockerContainer(docker_image = 'ubuntu') as docker:`
21
22 A bash shell is running in the remote container. When `call()` is invoked,
23 the command is relayed to the remote shell, and the results are streamed
24 back to cibuildwheel.
25 """
26
27 UTILITY_PYTHON = "/opt/python/cp38-cp38/bin/python"
28
29 process: PopenBytes
30 bash_stdin: IO[bytes]
31 bash_stdout: IO[bytes]
32
33 def __init__(
34 self, *, docker_image: str, simulate_32_bit: bool = False, cwd: Optional[PathOrStr] = None
35 ):
36 if not docker_image:
37 raise ValueError("Must have a non-empty docker image to run.")
38
39 self.docker_image = docker_image
40 self.simulate_32_bit = simulate_32_bit
41 self.cwd = cwd
42 self.name: Optional[str] = None
43
44 def __enter__(self) -> "DockerContainer":
45 self.name = f"cibuildwheel-{uuid.uuid4()}"
46 cwd_args = ["-w", str(self.cwd)] if self.cwd else []
47 shell_args = ["linux32", "/bin/bash"] if self.simulate_32_bit else ["/bin/bash"]
48 subprocess.run(
49 [
50 "docker",
51 "create",
52 "--env=CIBUILDWHEEL",
53 f"--name={self.name}",
54 "--interactive",
55 "--volume=/:/host", # ignored on CircleCI
56 *cwd_args,
57 self.docker_image,
58 *shell_args,
59 ],
60 check=True,
61 )
62 self.process = subprocess.Popen(
63 [
64 "docker",
65 "start",
66 "--attach",
67 "--interactive",
68 self.name,
69 ],
70 stdin=subprocess.PIPE,
71 stdout=subprocess.PIPE,
72 )
73
74 assert self.process.stdin and self.process.stdout
75 self.bash_stdin = self.process.stdin
76 self.bash_stdout = self.process.stdout
77
78 # run a noop command to block until the container is responding
79 self.call(["/bin/true"])
80
81 return self
82
83 def __exit__(
84 self,
85 exc_type: Optional[Type[BaseException]],
86 exc_val: Optional[BaseException],
87 exc_tb: Optional[TracebackType],
88 ) -> None:
89
90 self.bash_stdin.close()
91 self.process.terminate()
92 self.process.wait()
93
94 assert isinstance(self.name, str)
95
96 subprocess.run(["docker", "rm", "--force", "-v", self.name], stdout=subprocess.DEVNULL)
97 self.name = None
98
99 def copy_into(self, from_path: Path, to_path: PurePath) -> None:
100 # `docker cp` causes 'no space left on device' error when
101 # a container is running and the host filesystem is
102 # mounted. https://github.com/moby/moby/issues/38995
103 # Use `docker exec` instead.
104
105 if from_path.is_dir():
106 self.call(["mkdir", "-p", to_path])
107 subprocess.run(
108 f"tar cf - . | docker exec -i {self.name} tar -xC {shell_quote(to_path)} -f -",
109 shell=True,
110 check=True,
111 cwd=from_path,
112 )
113 else:
114 subprocess.run(
115 f'cat {shell_quote(from_path)} | docker exec -i {self.name} sh -c "cat > {shell_quote(to_path)}"',
116 shell=True,
117 check=True,
118 )
119
120 def copy_out(self, from_path: PurePath, to_path: Path) -> None:
121 # note: we assume from_path is a dir
122 to_path.mkdir(parents=True, exist_ok=True)
123
124 subprocess.run(
125 f"docker exec -i {self.name} tar -cC {shell_quote(from_path)} -f - . | tar -xf -",
126 shell=True,
127 check=True,
128 cwd=to_path,
129 )
130
131 def glob(self, path: PurePath, pattern: str) -> List[PurePath]:
132 glob_pattern = os.path.join(str(path), pattern)
133
134 path_strings = json.loads(
135 self.call(
136 [
137 self.UTILITY_PYTHON,
138 "-c",
139 f"import sys, json, glob; json.dump(glob.glob({glob_pattern!r}), sys.stdout)",
140 ],
141 capture_output=True,
142 )
143 )
144
145 return [PurePath(p) for p in path_strings]
146
147 def call(
148 self,
149 args: Sequence[PathOrStr],
150 env: Optional[Dict[str, str]] = None,
151 capture_output: bool = False,
152 cwd: Optional[PathOrStr] = None,
153 ) -> str:
154
155 chdir = f"cd {cwd}" if cwd else ""
156 env_assignments = (
157 " ".join(f"{shlex.quote(k)}={shlex.quote(v)}" for k, v in env.items())
158 if env is not None
159 else ""
160 )
161 command = " ".join(shlex.quote(str(a)) for a in args)
162 end_of_message = str(uuid.uuid4())
163
164 # log the command we're executing
165 print(f" + {command}")
166
167 # Write a command to the remote shell. First we change the
168 # cwd, if that's required. Then, we use the `env` utility to run
169 # `command` inside the specified environment. We use `env` because it
170 # can cope with spaces and strange characters in the name or value.
171 # Finally, the remote shell is told to write a footer - this will show
172 # up in the output so we know when to stop reading, and will include
173 # the return code of `command`.
174 self.bash_stdin.write(
175 bytes(
176 f"""(
177 {chdir}
178 env {env_assignments} {command}
179 printf "%04d%s\n" $? {end_of_message}
180 )
181 """,
182 encoding="utf8",
183 errors="surrogateescape",
184 )
185 )
186 self.bash_stdin.flush()
187
188 if capture_output:
189 output_io: IO[bytes] = io.BytesIO()
190 else:
191 output_io = sys.stdout.buffer
192
193 while True:
194 line = self.bash_stdout.readline()
195
196 if line.endswith(bytes(end_of_message, encoding="utf8") + b"\n"):
197 # fmt: off
198 footer_offset = (
199 len(line)
200 - 1 # newline character
201 - len(end_of_message) # delimiter
202 - 4 # 4 return code decimals
203 )
204 # fmt: on
205 return_code_str = line[footer_offset : footer_offset + 4]
206 return_code = int(return_code_str)
207 # add the last line to output, without the footer
208 output_io.write(line[0:footer_offset])
209 break
210 else:
211 output_io.write(line)
212
213 if isinstance(output_io, io.BytesIO):
214 output = str(output_io.getvalue(), encoding="utf8", errors="surrogateescape")
215 else:
216 output = ""
217
218 if return_code != 0:
219 raise subprocess.CalledProcessError(return_code, args, output)
220
221 return output
222
223 def get_environment(self) -> Dict[str, str]:
224 env = json.loads(
225 self.call(
226 [
227 self.UTILITY_PYTHON,
228 "-c",
229 "import sys, json, os; json.dump(os.environ.copy(), sys.stdout)",
230 ],
231 capture_output=True,
232 )
233 )
234 return cast(Dict[str, str], env)
235
236 def environment_executor(self, command: List[str], environment: Dict[str, str]) -> str:
237 # used as an EnvironmentExecutor to evaluate commands and capture output
238 return self.call(command, env=environment, capture_output=True)
239
240
241 def shell_quote(path: PurePath) -> str:
242 return shlex.quote(str(path))
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cibuildwheel/docker_container.py b/cibuildwheel/docker_container.py
--- a/cibuildwheel/docker_container.py
+++ b/cibuildwheel/docker_container.py
@@ -1,6 +1,7 @@
import io
import json
import os
+import platform
import shlex
import subprocess
import sys
@@ -9,6 +10,8 @@
from types import TracebackType
from typing import IO, Dict, List, Optional, Sequence, Type, cast
+from cibuildwheel.util import CIProvider, detect_ci_provider
+
from .typing import PathOrStr, PopenBytes
@@ -44,6 +47,15 @@
def __enter__(self) -> "DockerContainer":
self.name = f"cibuildwheel-{uuid.uuid4()}"
cwd_args = ["-w", str(self.cwd)] if self.cwd else []
+
+ # work-around for Travis-CI PPC64le Docker runs since 2021:
+ # this avoids network splits
+ # https://github.com/pypa/cibuildwheel/issues/904
+ # https://github.com/conda-forge/conda-smithy/pull/1520
+ network_args = []
+ if detect_ci_provider() == CIProvider.travis_ci and platform.machine() == "ppc64le":
+ network_args = ["--network=host"]
+
shell_args = ["linux32", "/bin/bash"] if self.simulate_32_bit else ["/bin/bash"]
subprocess.run(
[
@@ -53,6 +65,7 @@
f"--name={self.name}",
"--interactive",
"--volume=/:/host", # ignored on CircleCI
+ *network_args,
*cwd_args,
self.docker_image,
*shell_args,
| {"golden_diff": "diff --git a/cibuildwheel/docker_container.py b/cibuildwheel/docker_container.py\n--- a/cibuildwheel/docker_container.py\n+++ b/cibuildwheel/docker_container.py\n@@ -1,6 +1,7 @@\n import io\n import json\n import os\n+import platform\n import shlex\n import subprocess\n import sys\n@@ -9,6 +10,8 @@\n from types import TracebackType\n from typing import IO, Dict, List, Optional, Sequence, Type, cast\n \n+from cibuildwheel.util import CIProvider, detect_ci_provider\n+\n from .typing import PathOrStr, PopenBytes\n \n \n@@ -44,6 +47,15 @@\n def __enter__(self) -> \"DockerContainer\":\n self.name = f\"cibuildwheel-{uuid.uuid4()}\"\n cwd_args = [\"-w\", str(self.cwd)] if self.cwd else []\n+\n+ # work-around for Travis-CI PPC64le Docker runs since 2021:\n+ # this avoids network splits\n+ # https://github.com/pypa/cibuildwheel/issues/904\n+ # https://github.com/conda-forge/conda-smithy/pull/1520\n+ network_args = []\n+ if detect_ci_provider() == CIProvider.travis_ci and platform.machine() == \"ppc64le\":\n+ network_args = [\"--network=host\"]\n+\n shell_args = [\"linux32\", \"/bin/bash\"] if self.simulate_32_bit else [\"/bin/bash\"]\n subprocess.run(\n [\n@@ -53,6 +65,7 @@\n f\"--name={self.name}\",\n \"--interactive\",\n \"--volume=/:/host\", # ignored on CircleCI\n+ *network_args,\n *cwd_args,\n self.docker_image,\n *shell_args,\n", "issue": "Travis ppc64le network issue work-around\n### Description\r\n\r\nHi,\r\n\r\nI currently see network issues with docker containers on Travis CI ppc64le builders.\r\nEssentially, connections seem to time out, e.g., on `yum update`.\r\n\r\nThe same problem seems to be known in conda-forge and adding `--network=host` to `docker run` seems to solve the issue.\r\n\r\nDo you know how I can most simply apply the same work-around in cibuildwheel?\r\n\r\nX-ref:\r\n- https://github.com/conda-forge/conda-smithy/pull/1520\r\n\r\n### Build log\r\n\r\nX-ref:\r\n- https://github.com/openPMD/openPMD-api/pull/1136\r\n\r\n### CI config\r\n\r\n- https://github.com/openPMD/openPMD-api/tree/wheels\n", "before_files": [{"content": "import io\nimport json\nimport os\nimport shlex\nimport subprocess\nimport sys\nimport uuid\nfrom pathlib import Path, PurePath\nfrom types import TracebackType\nfrom typing import IO, Dict, List, Optional, Sequence, Type, cast\n\nfrom .typing import PathOrStr, PopenBytes\n\n\nclass DockerContainer:\n \"\"\"\n An object that represents a running Docker container.\n\n Intended for use as a context manager e.g.\n `with DockerContainer(docker_image = 'ubuntu') as docker:`\n\n A bash shell is running in the remote container. When `call()` is invoked,\n the command is relayed to the remote shell, and the results are streamed\n back to cibuildwheel.\n \"\"\"\n\n UTILITY_PYTHON = \"/opt/python/cp38-cp38/bin/python\"\n\n process: PopenBytes\n bash_stdin: IO[bytes]\n bash_stdout: IO[bytes]\n\n def __init__(\n self, *, docker_image: str, simulate_32_bit: bool = False, cwd: Optional[PathOrStr] = None\n ):\n if not docker_image:\n raise ValueError(\"Must have a non-empty docker image to run.\")\n\n self.docker_image = docker_image\n self.simulate_32_bit = simulate_32_bit\n self.cwd = cwd\n self.name: Optional[str] = None\n\n def __enter__(self) -> \"DockerContainer\":\n self.name = f\"cibuildwheel-{uuid.uuid4()}\"\n cwd_args = [\"-w\", str(self.cwd)] if self.cwd else []\n shell_args = [\"linux32\", \"/bin/bash\"] if self.simulate_32_bit else [\"/bin/bash\"]\n subprocess.run(\n [\n \"docker\",\n \"create\",\n \"--env=CIBUILDWHEEL\",\n f\"--name={self.name}\",\n \"--interactive\",\n \"--volume=/:/host\", # ignored on CircleCI\n *cwd_args,\n self.docker_image,\n *shell_args,\n ],\n check=True,\n )\n self.process = subprocess.Popen(\n [\n \"docker\",\n \"start\",\n \"--attach\",\n \"--interactive\",\n self.name,\n ],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n )\n\n assert self.process.stdin and self.process.stdout\n self.bash_stdin = self.process.stdin\n self.bash_stdout = self.process.stdout\n\n # run a noop command to block until the container is responding\n self.call([\"/bin/true\"])\n\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n\n self.bash_stdin.close()\n self.process.terminate()\n self.process.wait()\n\n assert isinstance(self.name, str)\n\n subprocess.run([\"docker\", \"rm\", \"--force\", \"-v\", self.name], stdout=subprocess.DEVNULL)\n self.name = None\n\n def copy_into(self, from_path: Path, to_path: PurePath) -> None:\n # `docker cp` causes 'no space left on device' error when\n # a container is running and the host filesystem is\n # mounted. https://github.com/moby/moby/issues/38995\n # Use `docker exec` instead.\n\n if from_path.is_dir():\n self.call([\"mkdir\", \"-p\", to_path])\n subprocess.run(\n f\"tar cf - . | docker exec -i {self.name} tar -xC {shell_quote(to_path)} -f -\",\n shell=True,\n check=True,\n cwd=from_path,\n )\n else:\n subprocess.run(\n f'cat {shell_quote(from_path)} | docker exec -i {self.name} sh -c \"cat > {shell_quote(to_path)}\"',\n shell=True,\n check=True,\n )\n\n def copy_out(self, from_path: PurePath, to_path: Path) -> None:\n # note: we assume from_path is a dir\n to_path.mkdir(parents=True, exist_ok=True)\n\n subprocess.run(\n f\"docker exec -i {self.name} tar -cC {shell_quote(from_path)} -f - . | tar -xf -\",\n shell=True,\n check=True,\n cwd=to_path,\n )\n\n def glob(self, path: PurePath, pattern: str) -> List[PurePath]:\n glob_pattern = os.path.join(str(path), pattern)\n\n path_strings = json.loads(\n self.call(\n [\n self.UTILITY_PYTHON,\n \"-c\",\n f\"import sys, json, glob; json.dump(glob.glob({glob_pattern!r}), sys.stdout)\",\n ],\n capture_output=True,\n )\n )\n\n return [PurePath(p) for p in path_strings]\n\n def call(\n self,\n args: Sequence[PathOrStr],\n env: Optional[Dict[str, str]] = None,\n capture_output: bool = False,\n cwd: Optional[PathOrStr] = None,\n ) -> str:\n\n chdir = f\"cd {cwd}\" if cwd else \"\"\n env_assignments = (\n \" \".join(f\"{shlex.quote(k)}={shlex.quote(v)}\" for k, v in env.items())\n if env is not None\n else \"\"\n )\n command = \" \".join(shlex.quote(str(a)) for a in args)\n end_of_message = str(uuid.uuid4())\n\n # log the command we're executing\n print(f\" + {command}\")\n\n # Write a command to the remote shell. First we change the\n # cwd, if that's required. Then, we use the `env` utility to run\n # `command` inside the specified environment. We use `env` because it\n # can cope with spaces and strange characters in the name or value.\n # Finally, the remote shell is told to write a footer - this will show\n # up in the output so we know when to stop reading, and will include\n # the return code of `command`.\n self.bash_stdin.write(\n bytes(\n f\"\"\"(\n {chdir}\n env {env_assignments} {command}\n printf \"%04d%s\\n\" $? {end_of_message}\n )\n \"\"\",\n encoding=\"utf8\",\n errors=\"surrogateescape\",\n )\n )\n self.bash_stdin.flush()\n\n if capture_output:\n output_io: IO[bytes] = io.BytesIO()\n else:\n output_io = sys.stdout.buffer\n\n while True:\n line = self.bash_stdout.readline()\n\n if line.endswith(bytes(end_of_message, encoding=\"utf8\") + b\"\\n\"):\n # fmt: off\n footer_offset = (\n len(line)\n - 1 # newline character\n - len(end_of_message) # delimiter\n - 4 # 4 return code decimals\n )\n # fmt: on\n return_code_str = line[footer_offset : footer_offset + 4]\n return_code = int(return_code_str)\n # add the last line to output, without the footer\n output_io.write(line[0:footer_offset])\n break\n else:\n output_io.write(line)\n\n if isinstance(output_io, io.BytesIO):\n output = str(output_io.getvalue(), encoding=\"utf8\", errors=\"surrogateescape\")\n else:\n output = \"\"\n\n if return_code != 0:\n raise subprocess.CalledProcessError(return_code, args, output)\n\n return output\n\n def get_environment(self) -> Dict[str, str]:\n env = json.loads(\n self.call(\n [\n self.UTILITY_PYTHON,\n \"-c\",\n \"import sys, json, os; json.dump(os.environ.copy(), sys.stdout)\",\n ],\n capture_output=True,\n )\n )\n return cast(Dict[str, str], env)\n\n def environment_executor(self, command: List[str], environment: Dict[str, str]) -> str:\n # used as an EnvironmentExecutor to evaluate commands and capture output\n return self.call(command, env=environment, capture_output=True)\n\n\ndef shell_quote(path: PurePath) -> str:\n return shlex.quote(str(path))\n", "path": "cibuildwheel/docker_container.py"}], "after_files": [{"content": "import io\nimport json\nimport os\nimport platform\nimport shlex\nimport subprocess\nimport sys\nimport uuid\nfrom pathlib import Path, PurePath\nfrom types import TracebackType\nfrom typing import IO, Dict, List, Optional, Sequence, Type, cast\n\nfrom cibuildwheel.util import CIProvider, detect_ci_provider\n\nfrom .typing import PathOrStr, PopenBytes\n\n\nclass DockerContainer:\n \"\"\"\n An object that represents a running Docker container.\n\n Intended for use as a context manager e.g.\n `with DockerContainer(docker_image = 'ubuntu') as docker:`\n\n A bash shell is running in the remote container. When `call()` is invoked,\n the command is relayed to the remote shell, and the results are streamed\n back to cibuildwheel.\n \"\"\"\n\n UTILITY_PYTHON = \"/opt/python/cp38-cp38/bin/python\"\n\n process: PopenBytes\n bash_stdin: IO[bytes]\n bash_stdout: IO[bytes]\n\n def __init__(\n self, *, docker_image: str, simulate_32_bit: bool = False, cwd: Optional[PathOrStr] = None\n ):\n if not docker_image:\n raise ValueError(\"Must have a non-empty docker image to run.\")\n\n self.docker_image = docker_image\n self.simulate_32_bit = simulate_32_bit\n self.cwd = cwd\n self.name: Optional[str] = None\n\n def __enter__(self) -> \"DockerContainer\":\n self.name = f\"cibuildwheel-{uuid.uuid4()}\"\n cwd_args = [\"-w\", str(self.cwd)] if self.cwd else []\n\n # work-around for Travis-CI PPC64le Docker runs since 2021:\n # this avoids network splits\n # https://github.com/pypa/cibuildwheel/issues/904\n # https://github.com/conda-forge/conda-smithy/pull/1520\n network_args = []\n if detect_ci_provider() == CIProvider.travis_ci and platform.machine() == \"ppc64le\":\n network_args = [\"--network=host\"]\n\n shell_args = [\"linux32\", \"/bin/bash\"] if self.simulate_32_bit else [\"/bin/bash\"]\n subprocess.run(\n [\n \"docker\",\n \"create\",\n \"--env=CIBUILDWHEEL\",\n f\"--name={self.name}\",\n \"--interactive\",\n \"--volume=/:/host\", # ignored on CircleCI\n *network_args,\n *cwd_args,\n self.docker_image,\n *shell_args,\n ],\n check=True,\n )\n self.process = subprocess.Popen(\n [\n \"docker\",\n \"start\",\n \"--attach\",\n \"--interactive\",\n self.name,\n ],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n )\n\n assert self.process.stdin and self.process.stdout\n self.bash_stdin = self.process.stdin\n self.bash_stdout = self.process.stdout\n\n # run a noop command to block until the container is responding\n self.call([\"/bin/true\"])\n\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n\n self.bash_stdin.close()\n self.process.terminate()\n self.process.wait()\n\n assert isinstance(self.name, str)\n\n subprocess.run([\"docker\", \"rm\", \"--force\", \"-v\", self.name], stdout=subprocess.DEVNULL)\n self.name = None\n\n def copy_into(self, from_path: Path, to_path: PurePath) -> None:\n # `docker cp` causes 'no space left on device' error when\n # a container is running and the host filesystem is\n # mounted. https://github.com/moby/moby/issues/38995\n # Use `docker exec` instead.\n\n if from_path.is_dir():\n self.call([\"mkdir\", \"-p\", to_path])\n subprocess.run(\n f\"tar cf - . | docker exec -i {self.name} tar -xC {shell_quote(to_path)} -f -\",\n shell=True,\n check=True,\n cwd=from_path,\n )\n else:\n subprocess.run(\n f'cat {shell_quote(from_path)} | docker exec -i {self.name} sh -c \"cat > {shell_quote(to_path)}\"',\n shell=True,\n check=True,\n )\n\n def copy_out(self, from_path: PurePath, to_path: Path) -> None:\n # note: we assume from_path is a dir\n to_path.mkdir(parents=True, exist_ok=True)\n\n subprocess.run(\n f\"docker exec -i {self.name} tar -cC {shell_quote(from_path)} -f - . | tar -xf -\",\n shell=True,\n check=True,\n cwd=to_path,\n )\n\n def glob(self, path: PurePath, pattern: str) -> List[PurePath]:\n glob_pattern = os.path.join(str(path), pattern)\n\n path_strs = json.loads(\n self.call(\n [\n self.UTILITY_PYTHON,\n \"-c\",\n f\"import sys, json, glob; json.dump(glob.glob({glob_pattern!r}), sys.stdout)\",\n ],\n capture_output=True,\n )\n )\n\n return [PurePath(p) for p in path_strs]\n\n def call(\n self,\n args: Sequence[PathOrStr],\n env: Optional[Dict[str, str]] = None,\n capture_output: bool = False,\n cwd: Optional[PathOrStr] = None,\n ) -> str:\n\n chdir = f\"cd {cwd}\" if cwd else \"\"\n env_assignments = (\n \" \".join(f\"{shlex.quote(k)}={shlex.quote(v)}\" for k, v in env.items())\n if env is not None\n else \"\"\n )\n command = \" \".join(shlex.quote(str(a)) for a in args)\n end_of_message = str(uuid.uuid4())\n\n # log the command we're executing\n print(f\" + {command}\")\n\n # Write a command to the remote shell. First we change the\n # cwd, if that's required. Then, we use the `env` utility to run\n # `command` inside the specified environment. We use `env` because it\n # can cope with spaces and strange characters in the name or value.\n # Finally, the remote shell is told to write a footer - this will show\n # up in the output so we know when to stop reading, and will include\n # the returncode of `command`.\n self.bash_stdin.write(\n bytes(\n f\"\"\"(\n {chdir}\n env {env_assignments} {command}\n printf \"%04d%s\\n\" $? {end_of_message}\n )\n \"\"\",\n encoding=\"utf8\",\n errors=\"surrogateescape\",\n )\n )\n self.bash_stdin.flush()\n\n if capture_output:\n output_io: IO[bytes] = io.BytesIO()\n else:\n output_io = sys.stdout.buffer\n\n while True:\n line = self.bash_stdout.readline()\n\n if line.endswith(bytes(end_of_message, encoding=\"utf8\") + b\"\\n\"):\n # fmt: off\n footer_offset = (\n len(line)\n - 1 # newline character\n - len(end_of_message) # delimiter\n - 4 # 4 returncode decimals\n )\n # fmt: on\n returncode_str = line[footer_offset : footer_offset + 4]\n returncode = int(returncode_str)\n # add the last line to output, without the footer\n output_io.write(line[0:footer_offset])\n break\n else:\n output_io.write(line)\n\n if isinstance(output_io, io.BytesIO):\n output = str(output_io.getvalue(), encoding=\"utf8\", errors=\"surrogateescape\")\n else:\n output = \"\"\n\n if returncode != 0:\n raise subprocess.CalledProcessError(returncode, args, output)\n\n return output\n\n def get_environment(self) -> Dict[str, str]:\n env = json.loads(\n self.call(\n [\n self.UTILITY_PYTHON,\n \"-c\",\n \"import sys, json, os; json.dump(os.environ.copy(), sys.stdout)\",\n ],\n capture_output=True,\n )\n )\n return cast(Dict[str, str], env)\n\n def environment_executor(self, command: List[str], environment: Dict[str, str]) -> str:\n # used as an EnvironmentExecutor to evaluate commands and capture output\n return self.call(command, env=environment, capture_output=True)\n\n\ndef shell_quote(path: PurePath) -> str:\n return shlex.quote(str(path))\n", "path": "cibuildwheel/docker_container.py"}]} | 2,858 | 406 |
gh_patches_debug_24880 | rasdani/github-patches | git_diff | nextcloud__appstore-441 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
At least a single resource for support or contact should be required
This is simply asking us for trouble. No repo. No website. Nothing. The app was provided only over an archive on Google Drive (https://github.com/nextcloud/app-certificate-requests/pull/52) ... this is not offending developers, google drive or anything 😅 But - you know what I mean I hope 😢
<img width="919" alt="bildschirmfoto 2017-03-01 um 11 44 38" src="https://cloud.githubusercontent.com/assets/19711361/23456641/8c1a9b1c-fe74-11e6-9894-e0fec0f622e7.png">
At least a single resource for support or contact should be required
This is simply asking us for trouble. No repo. No website. Nothing. The app was provided only over an archive on Google Drive (https://github.com/nextcloud/app-certificate-requests/pull/52) ... this is not offending developers, google drive or anything 😅 But - you know what I mean I hope 😢
<img width="919" alt="bildschirmfoto 2017-03-01 um 11 44 38" src="https://cloud.githubusercontent.com/assets/19711361/23456641/8c1a9b1c-fe74-11e6-9894-e0fec0f622e7.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nextcloudappstore/core/scaffolding/forms.py`
Content:
```
1 import re
2 from os import listdir
3
4 from django.core.exceptions import ValidationError
5 from django.forms import Textarea, Form, URLField, MultipleChoiceField
6 from django.utils.translation import ugettext_lazy as _ # type: ignore
7 from django.forms.fields import EmailField, CharField, ChoiceField
8 from django.conf import settings
9
10 from nextcloudappstore.core.facades import resolve_file_relative_path
11 from nextcloudappstore.core.models import Category
12 from django.utils.functional import lazy
13
14
15 def get_categories():
16 return [(cat.id, cat.name) for cat in Category.objects.all()]
17
18
19 def get_versions():
20 tpls = listdir(resolve_file_relative_path(__file__, 'app-templates'))
21 return [(v, v) for v in tpls]
22
23
24 def validate_id(input: str) -> str:
25 regex = r'^([A-Z][a-z]*)+$'
26 if not re.match(regex, input):
27 raise ValidationError(_('The app name must be camel case e.g. MyApp'))
28
29
30 class AppScaffoldingForm(Form):
31 name = CharField(max_length=80, label=_('App name'),
32 validators=[validate_id],
33 help_text=_('The app name must be camel case e.g. MyApp'))
34 platform = ChoiceField(choices=lazy(get_versions, list), required=True,
35 label=_('Nextcloud version'))
36 author_name = CharField(max_length=80, label=_('Author\'s full name'))
37 author_email = EmailField(label=_('Author\'s e-mail'))
38 author_homepage = URLField(label=_('Author\'s homepage'), required=False)
39 categories = MultipleChoiceField(required=True, label=_('Categories'),
40 choices=lazy(get_categories, list),
41 help_text=_('Hold down CTRL and click to '
42 'select multiple entries'))
43 summary = CharField(max_length=256, label=_('Summary'), help_text=_(
44 'Short description of your app that will be rendered as short teaser'))
45 description = CharField(widget=Textarea, label=_('Description'),
46 help_text=_('Full description of what your app '
47 'does. Can contain Markdown.'))
48
```
Path: `nextcloudappstore/core/scaffolding/archive.py`
Content:
```
1 import re
2 import tarfile
3 from io import BytesIO, StringIO
4 from typing import Dict
5 from os.path import join, isdir, relpath
6 from os import walk
7
8 from django.template import Context
9 from django.template import Template
10
11 from nextcloudappstore.core.facades import resolve_file_relative_path
12 from nextcloudappstore.settings.base import APP_SCAFFOLDING_PROFILES
13
14
15 def build_files(args: Dict[str, str]) -> Dict[str, str]:
16 platform = int(args['platform']) # prevent path traversal
17 vars = {
18 'id': args['name'].lower(),
19 'summary': args['summary'],
20 'description': args['description'],
21 'name': ' '.join(re.findall(r'[A-Z][^A-Z]*', args['name'])),
22 'namespace': args['name'],
23 'author_name': args['author_name'],
24 'author_mail': args['author_email'],
25 'author_homepage': args['author_homepage'],
26 'categories': args['categories'],
27 'nextcloud_version': platform
28 }
29 vars.update(APP_SCAFFOLDING_PROFILES.get(platform, {}))
30 relative_base = 'app-templates/%i/app/' % platform
31 base = resolve_file_relative_path(__file__, relative_base)
32
33 context = Context({'app': vars})
34 result = {}
35 if isdir(base):
36 for root, dirs, files in walk(base):
37 for file in files:
38 file_path = join(root, file)
39 rel_file_path = '%s/%s' % (
40 vars['id'], relpath(file_path, base)
41 )
42 with open(file_path) as f:
43 t = Template(f.read())
44 result[rel_file_path] = t.render(context)
45
46 return result
47
48
49 def build_archive(parameters: Dict[str, str]) -> BytesIO:
50 buffer = BytesIO()
51 with tarfile.open(fileobj=buffer, mode='w:gz') as f:
52 files = build_files(parameters)
53 for path, contents in files.items():
54 info = tarfile.TarInfo(path)
55 info.size = len(contents)
56 f.addfile(info, BytesIO(contents.encode()))
57 buffer.seek(0)
58 return buffer
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nextcloudappstore/core/scaffolding/archive.py b/nextcloudappstore/core/scaffolding/archive.py
--- a/nextcloudappstore/core/scaffolding/archive.py
+++ b/nextcloudappstore/core/scaffolding/archive.py
@@ -23,6 +23,7 @@
'author_name': args['author_name'],
'author_mail': args['author_email'],
'author_homepage': args['author_homepage'],
+ 'issue_tracker': args['issue_tracker'],
'categories': args['categories'],
'nextcloud_version': platform
}
diff --git a/nextcloudappstore/core/scaffolding/forms.py b/nextcloudappstore/core/scaffolding/forms.py
--- a/nextcloudappstore/core/scaffolding/forms.py
+++ b/nextcloudappstore/core/scaffolding/forms.py
@@ -36,6 +36,8 @@
author_name = CharField(max_length=80, label=_('Author\'s full name'))
author_email = EmailField(label=_('Author\'s e-mail'))
author_homepage = URLField(label=_('Author\'s homepage'), required=False)
+ issue_tracker = URLField(label=_('Issue tracker URL'), required=True,
+ help_text=_('Bug reports and feature requests'))
categories = MultipleChoiceField(required=True, label=_('Categories'),
choices=lazy(get_categories, list),
help_text=_('Hold down CTRL and click to '
| {"golden_diff": "diff --git a/nextcloudappstore/core/scaffolding/archive.py b/nextcloudappstore/core/scaffolding/archive.py\n--- a/nextcloudappstore/core/scaffolding/archive.py\n+++ b/nextcloudappstore/core/scaffolding/archive.py\n@@ -23,6 +23,7 @@\n 'author_name': args['author_name'],\n 'author_mail': args['author_email'],\n 'author_homepage': args['author_homepage'],\n+ 'issue_tracker': args['issue_tracker'],\n 'categories': args['categories'],\n 'nextcloud_version': platform\n }\ndiff --git a/nextcloudappstore/core/scaffolding/forms.py b/nextcloudappstore/core/scaffolding/forms.py\n--- a/nextcloudappstore/core/scaffolding/forms.py\n+++ b/nextcloudappstore/core/scaffolding/forms.py\n@@ -36,6 +36,8 @@\n author_name = CharField(max_length=80, label=_('Author\\'s full name'))\n author_email = EmailField(label=_('Author\\'s e-mail'))\n author_homepage = URLField(label=_('Author\\'s homepage'), required=False)\n+ issue_tracker = URLField(label=_('Issue tracker URL'), required=True,\n+ help_text=_('Bug reports and feature requests'))\n categories = MultipleChoiceField(required=True, label=_('Categories'),\n choices=lazy(get_categories, list),\n help_text=_('Hold down CTRL and click to '\n", "issue": "At least a single resource for support or contact should be required\nThis is simply asking us for trouble. No repo. No website. Nothing. The app was provided only over an archive on Google Drive (https://github.com/nextcloud/app-certificate-requests/pull/52) ... this is not offending developers, google drive or anything \ud83d\ude05 But - you know what I mean I hope \ud83d\ude22 \r\n\r\n<img width=\"919\" alt=\"bildschirmfoto 2017-03-01 um 11 44 38\" src=\"https://cloud.githubusercontent.com/assets/19711361/23456641/8c1a9b1c-fe74-11e6-9894-e0fec0f622e7.png\">\nAt least a single resource for support or contact should be required\nThis is simply asking us for trouble. No repo. No website. Nothing. The app was provided only over an archive on Google Drive (https://github.com/nextcloud/app-certificate-requests/pull/52) ... this is not offending developers, google drive or anything \ud83d\ude05 But - you know what I mean I hope \ud83d\ude22 \r\n\r\n<img width=\"919\" alt=\"bildschirmfoto 2017-03-01 um 11 44 38\" src=\"https://cloud.githubusercontent.com/assets/19711361/23456641/8c1a9b1c-fe74-11e6-9894-e0fec0f622e7.png\">\n", "before_files": [{"content": "import re\nfrom os import listdir\n\nfrom django.core.exceptions import ValidationError\nfrom django.forms import Textarea, Form, URLField, MultipleChoiceField\nfrom django.utils.translation import ugettext_lazy as _ # type: ignore\nfrom django.forms.fields import EmailField, CharField, ChoiceField\nfrom django.conf import settings\n\nfrom nextcloudappstore.core.facades import resolve_file_relative_path\nfrom nextcloudappstore.core.models import Category\nfrom django.utils.functional import lazy\n\n\ndef get_categories():\n return [(cat.id, cat.name) for cat in Category.objects.all()]\n\n\ndef get_versions():\n tpls = listdir(resolve_file_relative_path(__file__, 'app-templates'))\n return [(v, v) for v in tpls]\n\n\ndef validate_id(input: str) -> str:\n regex = r'^([A-Z][a-z]*)+$'\n if not re.match(regex, input):\n raise ValidationError(_('The app name must be camel case e.g. MyApp'))\n\n\nclass AppScaffoldingForm(Form):\n name = CharField(max_length=80, label=_('App name'),\n validators=[validate_id],\n help_text=_('The app name must be camel case e.g. MyApp'))\n platform = ChoiceField(choices=lazy(get_versions, list), required=True,\n label=_('Nextcloud version'))\n author_name = CharField(max_length=80, label=_('Author\\'s full name'))\n author_email = EmailField(label=_('Author\\'s e-mail'))\n author_homepage = URLField(label=_('Author\\'s homepage'), required=False)\n categories = MultipleChoiceField(required=True, label=_('Categories'),\n choices=lazy(get_categories, list),\n help_text=_('Hold down CTRL and click to '\n 'select multiple entries'))\n summary = CharField(max_length=256, label=_('Summary'), help_text=_(\n 'Short description of your app that will be rendered as short teaser'))\n description = CharField(widget=Textarea, label=_('Description'),\n help_text=_('Full description of what your app '\n 'does. Can contain Markdown.'))\n", "path": "nextcloudappstore/core/scaffolding/forms.py"}, {"content": "import re\nimport tarfile\nfrom io import BytesIO, StringIO\nfrom typing import Dict\nfrom os.path import join, isdir, relpath\nfrom os import walk\n\nfrom django.template import Context\nfrom django.template import Template\n\nfrom nextcloudappstore.core.facades import resolve_file_relative_path\nfrom nextcloudappstore.settings.base import APP_SCAFFOLDING_PROFILES\n\n\ndef build_files(args: Dict[str, str]) -> Dict[str, str]:\n platform = int(args['platform']) # prevent path traversal\n vars = {\n 'id': args['name'].lower(),\n 'summary': args['summary'],\n 'description': args['description'],\n 'name': ' '.join(re.findall(r'[A-Z][^A-Z]*', args['name'])),\n 'namespace': args['name'],\n 'author_name': args['author_name'],\n 'author_mail': args['author_email'],\n 'author_homepage': args['author_homepage'],\n 'categories': args['categories'],\n 'nextcloud_version': platform\n }\n vars.update(APP_SCAFFOLDING_PROFILES.get(platform, {}))\n relative_base = 'app-templates/%i/app/' % platform\n base = resolve_file_relative_path(__file__, relative_base)\n\n context = Context({'app': vars})\n result = {}\n if isdir(base):\n for root, dirs, files in walk(base):\n for file in files:\n file_path = join(root, file)\n rel_file_path = '%s/%s' % (\n vars['id'], relpath(file_path, base)\n )\n with open(file_path) as f:\n t = Template(f.read())\n result[rel_file_path] = t.render(context)\n\n return result\n\n\ndef build_archive(parameters: Dict[str, str]) -> BytesIO:\n buffer = BytesIO()\n with tarfile.open(fileobj=buffer, mode='w:gz') as f:\n files = build_files(parameters)\n for path, contents in files.items():\n info = tarfile.TarInfo(path)\n info.size = len(contents)\n f.addfile(info, BytesIO(contents.encode()))\n buffer.seek(0)\n return buffer\n", "path": "nextcloudappstore/core/scaffolding/archive.py"}], "after_files": [{"content": "import re\nfrom os import listdir\n\nfrom django.core.exceptions import ValidationError\nfrom django.forms import Textarea, Form, URLField, MultipleChoiceField\nfrom django.utils.translation import ugettext_lazy as _ # type: ignore\nfrom django.forms.fields import EmailField, CharField, ChoiceField\nfrom django.conf import settings\n\nfrom nextcloudappstore.core.facades import resolve_file_relative_path\nfrom nextcloudappstore.core.models import Category\nfrom django.utils.functional import lazy\n\n\ndef get_categories():\n return [(cat.id, cat.name) for cat in Category.objects.all()]\n\n\ndef get_versions():\n tpls = listdir(resolve_file_relative_path(__file__, 'app-templates'))\n return [(v, v) for v in tpls]\n\n\ndef validate_id(input: str) -> str:\n regex = r'^([A-Z][a-z]*)+$'\n if not re.match(regex, input):\n raise ValidationError(_('The app name must be camel case e.g. MyApp'))\n\n\nclass AppScaffoldingForm(Form):\n name = CharField(max_length=80, label=_('App name'),\n validators=[validate_id],\n help_text=_('The app name must be camel case e.g. MyApp'))\n platform = ChoiceField(choices=lazy(get_versions, list), required=True,\n label=_('Nextcloud version'))\n author_name = CharField(max_length=80, label=_('Author\\'s full name'))\n author_email = EmailField(label=_('Author\\'s e-mail'))\n author_homepage = URLField(label=_('Author\\'s homepage'), required=False)\n issue_tracker = URLField(label=_('Issue tracker URL'), required=True,\n help_text=_('Bug reports and feature requests'))\n categories = MultipleChoiceField(required=True, label=_('Categories'),\n choices=lazy(get_categories, list),\n help_text=_('Hold down CTRL and click to '\n 'select multiple entries'))\n summary = CharField(max_length=256, label=_('Summary'), help_text=_(\n 'Short description of your app that will be rendered as short teaser'))\n description = CharField(widget=Textarea, label=_('Description'),\n help_text=_('Full description of what your app '\n 'does. Can contain Markdown.'))\n", "path": "nextcloudappstore/core/scaffolding/forms.py"}, {"content": "import re\nimport tarfile\nfrom io import BytesIO, StringIO\nfrom typing import Dict\nfrom os.path import join, isdir, relpath\nfrom os import walk\n\nfrom django.template import Context\nfrom django.template import Template\n\nfrom nextcloudappstore.core.facades import resolve_file_relative_path\nfrom nextcloudappstore.settings.base import APP_SCAFFOLDING_PROFILES\n\n\ndef build_files(args: Dict[str, str]) -> Dict[str, str]:\n platform = int(args['platform']) # prevent path traversal\n vars = {\n 'id': args['name'].lower(),\n 'summary': args['summary'],\n 'description': args['description'],\n 'name': ' '.join(re.findall(r'[A-Z][^A-Z]*', args['name'])),\n 'namespace': args['name'],\n 'author_name': args['author_name'],\n 'author_mail': args['author_email'],\n 'author_homepage': args['author_homepage'],\n 'issue_tracker': args['issue_tracker'],\n 'categories': args['categories'],\n 'nextcloud_version': platform\n }\n vars.update(APP_SCAFFOLDING_PROFILES.get(platform, {}))\n relative_base = 'app-templates/%i/app/' % platform\n base = resolve_file_relative_path(__file__, relative_base)\n\n context = Context({'app': vars})\n result = {}\n if isdir(base):\n for root, dirs, files in walk(base):\n for file in files:\n file_path = join(root, file)\n rel_file_path = '%s/%s' % (\n vars['id'], relpath(file_path, base)\n )\n with open(file_path) as f:\n t = Template(f.read())\n result[rel_file_path] = t.render(context)\n\n return result\n\n\ndef build_archive(parameters: Dict[str, str]) -> BytesIO:\n buffer = BytesIO()\n with tarfile.open(fileobj=buffer, mode='w:gz') as f:\n files = build_files(parameters)\n for path, contents in files.items():\n info = tarfile.TarInfo(path)\n info.size = len(contents)\n f.addfile(info, BytesIO(contents.encode()))\n buffer.seek(0)\n return buffer\n", "path": "nextcloudappstore/core/scaffolding/archive.py"}]} | 1,749 | 308 |
gh_patches_debug_18788 | rasdani/github-patches | git_diff | easybuilders__easybuild-framework-4377 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OneAPI ifort: '-fiopenmp' not supported
I am using `intel-2022b` toolchain with the traditional `ifort` compiler. EasyBuild is adding the `-fiopenmp` flag which, according to [here](https://www.intel.com/content/www/us/en/docs/fortran-compiler/developer-guide-reference/2023-2/fiopenmp-qiopenmp.html) is only supported for `ifx` but not `ifort`. From discussions on Slack it appears that the problem is in `intel_compilers.py` and @bartoldeman suggested this workaround in `toolchainopts`:
```
toolchainopts = {'openmp': False, 'extra_fflags': '-fopenmp' }
```
which seems to do the trick. Maybe that could be fixed for the upcoming 4.9 release?
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `easybuild/toolchains/compiler/intel_compilers.py`
Content:
```
1 ##
2 # Copyright 2021-2023 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 Support for Intel compilers (icc, ifort) as toolchain compilers, version 2021.x and newer (oneAPI).
27
28 Authors:
29
30 * Kenneth Hoste (Ghent University)
31 """
32 import os
33
34 import easybuild.tools.systemtools as systemtools
35 from easybuild.toolchains.compiler.inteliccifort import IntelIccIfort
36 from easybuild.tools import LooseVersion
37 from easybuild.tools.toolchain.compiler import Compiler
38
39
40 class IntelCompilers(IntelIccIfort):
41 """
42 Compiler class for Intel oneAPI compilers
43 """
44
45 COMPILER_MODULE_NAME = ['intel-compilers']
46 COMPILER_UNIQUE_OPTS = dict(IntelIccIfort.COMPILER_UNIQUE_OPTS)
47 COMPILER_UNIQUE_OPTS.update({
48 'oneapi': (None, "Use oneAPI compilers icx/icpx/ifx instead of classic compilers"),
49 'oneapi_c_cxx': (None, "Use oneAPI C/C++ compilers icx/icpx instead of classic Intel C/C++ compilers "
50 "(auto-enabled for Intel compilers version 2022.2.0, or newer)"),
51 'oneapi_fortran': (False, "Use oneAPI Fortran compiler ifx instead of classic Intel Fortran compiler"),
52 })
53
54 def _set_compiler_vars(self):
55 """Intel compilers-specific adjustments after setting compiler variables."""
56
57 # skip IntelIccIfort._set_compiler_vars (no longer relevant for recent versions)
58 Compiler._set_compiler_vars(self)
59
60 root = self.get_software_root(self.COMPILER_MODULE_NAME)[0]
61 version = self.get_software_version(self.COMPILER_MODULE_NAME)[0]
62
63 libbase = os.path.join('compiler', version, 'linux')
64 libpaths = [
65 os.path.join(libbase, 'compiler', 'lib', 'intel64'),
66 ]
67
68 self.variables.append_subdirs("LDFLAGS", root, subdirs=libpaths)
69
70 def set_variables(self):
71 """Set the variables."""
72
73 oneapi = False
74
75 # auto-enable use of oneAPI C/C++ compilers for sufficiently recent versions of Intel compilers
76 comp_ver = self.get_software_version(self.COMPILER_MODULE_NAME)[0]
77 if LooseVersion(comp_ver) >= LooseVersion('2022.2.0'):
78 if self.options.get('oneapi_c_cxx', None) is None:
79 self.options['oneapi_c_cxx'] = True
80
81 oneapi_tcopt = self.options.get('oneapi')
82 if oneapi_tcopt:
83 oneapi = True
84 self.COMPILER_CXX = 'icpx'
85 self.COMPILER_CC = 'icx'
86 self.COMPILER_F77 = 'ifx'
87 self.COMPILER_F90 = 'ifx'
88 self.COMPILER_FC = 'ifx'
89
90 # if both 'oneapi' and 'oneapi_*' are set, the latter are ignored
91 elif oneapi_tcopt is None:
92 if self.options.get('oneapi_c_cxx', False):
93 oneapi = True
94 self.COMPILER_CC = 'icx'
95 self.COMPILER_CXX = 'icpx'
96
97 if self.options.get('oneapi_fortran', False):
98 oneapi = True
99 self.COMPILER_F77 = 'ifx'
100 self.COMPILER_F90 = 'ifx'
101 self.COMPILER_FC = 'ifx'
102
103 if oneapi:
104 # fp-model source is not supported by icx but is equivalent to precise
105 self.options.options_map['defaultprec'] = ['fp-speculation=safe', 'fp-model precise']
106 if LooseVersion(comp_ver) >= LooseVersion('2022'):
107 self.options.options_map['defaultprec'].insert(0, 'ftz')
108 # icx doesn't like -fp-model fast=1; fp-model fast is equivalent
109 self.options.options_map['loose'] = ['fp-model fast']
110 # fp-model fast=2 gives "warning: overriding '-ffp-model=fast=2' option with '-ffp-model=fast'"
111 self.options.options_map['veryloose'] = ['fp-model fast']
112 # recommended in porting guide
113 self.options.options_map['openmp'] = ['fiopenmp']
114
115 # -xSSE2 is not supported by Intel oneAPI compilers,
116 # so use -march=x86-64 -mtune=generic when using optarch=GENERIC
117 self.COMPILER_GENERIC_OPTION = {
118 (systemtools.X86_64, systemtools.AMD): 'march=x86-64 -mtune=generic',
119 (systemtools.X86_64, systemtools.INTEL): 'march=x86-64 -mtune=generic',
120 }
121
122 # skip IntelIccIfort.set_variables (no longer relevant for recent versions)
123 Compiler.set_variables(self)
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/easybuild/toolchains/compiler/intel_compilers.py b/easybuild/toolchains/compiler/intel_compilers.py
--- a/easybuild/toolchains/compiler/intel_compilers.py
+++ b/easybuild/toolchains/compiler/intel_compilers.py
@@ -109,8 +109,9 @@
self.options.options_map['loose'] = ['fp-model fast']
# fp-model fast=2 gives "warning: overriding '-ffp-model=fast=2' option with '-ffp-model=fast'"
self.options.options_map['veryloose'] = ['fp-model fast']
- # recommended in porting guide
- self.options.options_map['openmp'] = ['fiopenmp']
+ # recommended in porting guide: qopenmp, unlike fiopenmp, works for both classic and oneapi compilers
+ # https://www.intel.com/content/www/us/en/developer/articles/guide/porting-guide-for-ifort-to-ifx.html
+ self.options.options_map['openmp'] = ['qopenmp']
# -xSSE2 is not supported by Intel oneAPI compilers,
# so use -march=x86-64 -mtune=generic when using optarch=GENERIC
| {"golden_diff": "diff --git a/easybuild/toolchains/compiler/intel_compilers.py b/easybuild/toolchains/compiler/intel_compilers.py\n--- a/easybuild/toolchains/compiler/intel_compilers.py\n+++ b/easybuild/toolchains/compiler/intel_compilers.py\n@@ -109,8 +109,9 @@\n self.options.options_map['loose'] = ['fp-model fast']\n # fp-model fast=2 gives \"warning: overriding '-ffp-model=fast=2' option with '-ffp-model=fast'\"\n self.options.options_map['veryloose'] = ['fp-model fast']\n- # recommended in porting guide\n- self.options.options_map['openmp'] = ['fiopenmp']\n+ # recommended in porting guide: qopenmp, unlike fiopenmp, works for both classic and oneapi compilers\n+ # https://www.intel.com/content/www/us/en/developer/articles/guide/porting-guide-for-ifort-to-ifx.html\n+ self.options.options_map['openmp'] = ['qopenmp']\n \n # -xSSE2 is not supported by Intel oneAPI compilers,\n # so use -march=x86-64 -mtune=generic when using optarch=GENERIC\n", "issue": "OneAPI ifort: '-fiopenmp' not supported\nI am using `intel-2022b` toolchain with the traditional `ifort` compiler. EasyBuild is adding the `-fiopenmp` flag which, according to [here](https://www.intel.com/content/www/us/en/docs/fortran-compiler/developer-guide-reference/2023-2/fiopenmp-qiopenmp.html) is only supported for `ifx` but not `ifort`. From discussions on Slack it appears that the problem is in `intel_compilers.py` and @bartoldeman suggested this workaround in `toolchainopts`:\r\n```\r\ntoolchainopts = {'openmp': False, 'extra_fflags': '-fopenmp' }\r\n```\r\nwhich seems to do the trick. Maybe that could be fixed for the upcoming 4.9 release?\r\nThanks.\n", "before_files": [{"content": "##\n# Copyright 2021-2023 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nSupport for Intel compilers (icc, ifort) as toolchain compilers, version 2021.x and newer (oneAPI).\n\nAuthors:\n\n* Kenneth Hoste (Ghent University)\n\"\"\"\nimport os\n\nimport easybuild.tools.systemtools as systemtools\nfrom easybuild.toolchains.compiler.inteliccifort import IntelIccIfort\nfrom easybuild.tools import LooseVersion\nfrom easybuild.tools.toolchain.compiler import Compiler\n\n\nclass IntelCompilers(IntelIccIfort):\n \"\"\"\n Compiler class for Intel oneAPI compilers\n \"\"\"\n\n COMPILER_MODULE_NAME = ['intel-compilers']\n COMPILER_UNIQUE_OPTS = dict(IntelIccIfort.COMPILER_UNIQUE_OPTS)\n COMPILER_UNIQUE_OPTS.update({\n 'oneapi': (None, \"Use oneAPI compilers icx/icpx/ifx instead of classic compilers\"),\n 'oneapi_c_cxx': (None, \"Use oneAPI C/C++ compilers icx/icpx instead of classic Intel C/C++ compilers \"\n \"(auto-enabled for Intel compilers version 2022.2.0, or newer)\"),\n 'oneapi_fortran': (False, \"Use oneAPI Fortran compiler ifx instead of classic Intel Fortran compiler\"),\n })\n\n def _set_compiler_vars(self):\n \"\"\"Intel compilers-specific adjustments after setting compiler variables.\"\"\"\n\n # skip IntelIccIfort._set_compiler_vars (no longer relevant for recent versions)\n Compiler._set_compiler_vars(self)\n\n root = self.get_software_root(self.COMPILER_MODULE_NAME)[0]\n version = self.get_software_version(self.COMPILER_MODULE_NAME)[0]\n\n libbase = os.path.join('compiler', version, 'linux')\n libpaths = [\n os.path.join(libbase, 'compiler', 'lib', 'intel64'),\n ]\n\n self.variables.append_subdirs(\"LDFLAGS\", root, subdirs=libpaths)\n\n def set_variables(self):\n \"\"\"Set the variables.\"\"\"\n\n oneapi = False\n\n # auto-enable use of oneAPI C/C++ compilers for sufficiently recent versions of Intel compilers\n comp_ver = self.get_software_version(self.COMPILER_MODULE_NAME)[0]\n if LooseVersion(comp_ver) >= LooseVersion('2022.2.0'):\n if self.options.get('oneapi_c_cxx', None) is None:\n self.options['oneapi_c_cxx'] = True\n\n oneapi_tcopt = self.options.get('oneapi')\n if oneapi_tcopt:\n oneapi = True\n self.COMPILER_CXX = 'icpx'\n self.COMPILER_CC = 'icx'\n self.COMPILER_F77 = 'ifx'\n self.COMPILER_F90 = 'ifx'\n self.COMPILER_FC = 'ifx'\n\n # if both 'oneapi' and 'oneapi_*' are set, the latter are ignored\n elif oneapi_tcopt is None:\n if self.options.get('oneapi_c_cxx', False):\n oneapi = True\n self.COMPILER_CC = 'icx'\n self.COMPILER_CXX = 'icpx'\n\n if self.options.get('oneapi_fortran', False):\n oneapi = True\n self.COMPILER_F77 = 'ifx'\n self.COMPILER_F90 = 'ifx'\n self.COMPILER_FC = 'ifx'\n\n if oneapi:\n # fp-model source is not supported by icx but is equivalent to precise\n self.options.options_map['defaultprec'] = ['fp-speculation=safe', 'fp-model precise']\n if LooseVersion(comp_ver) >= LooseVersion('2022'):\n self.options.options_map['defaultprec'].insert(0, 'ftz')\n # icx doesn't like -fp-model fast=1; fp-model fast is equivalent\n self.options.options_map['loose'] = ['fp-model fast']\n # fp-model fast=2 gives \"warning: overriding '-ffp-model=fast=2' option with '-ffp-model=fast'\"\n self.options.options_map['veryloose'] = ['fp-model fast']\n # recommended in porting guide\n self.options.options_map['openmp'] = ['fiopenmp']\n\n # -xSSE2 is not supported by Intel oneAPI compilers,\n # so use -march=x86-64 -mtune=generic when using optarch=GENERIC\n self.COMPILER_GENERIC_OPTION = {\n (systemtools.X86_64, systemtools.AMD): 'march=x86-64 -mtune=generic',\n (systemtools.X86_64, systemtools.INTEL): 'march=x86-64 -mtune=generic',\n }\n\n # skip IntelIccIfort.set_variables (no longer relevant for recent versions)\n Compiler.set_variables(self)\n", "path": "easybuild/toolchains/compiler/intel_compilers.py"}], "after_files": [{"content": "##\n# Copyright 2021-2023 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nSupport for Intel compilers (icc, ifort) as toolchain compilers, version 2021.x and newer (oneAPI).\n\nAuthors:\n\n* Kenneth Hoste (Ghent University)\n\"\"\"\nimport os\n\nimport easybuild.tools.systemtools as systemtools\nfrom easybuild.toolchains.compiler.inteliccifort import IntelIccIfort\nfrom easybuild.tools import LooseVersion\nfrom easybuild.tools.toolchain.compiler import Compiler\n\n\nclass IntelCompilers(IntelIccIfort):\n \"\"\"\n Compiler class for Intel oneAPI compilers\n \"\"\"\n\n COMPILER_MODULE_NAME = ['intel-compilers']\n COMPILER_UNIQUE_OPTS = dict(IntelIccIfort.COMPILER_UNIQUE_OPTS)\n COMPILER_UNIQUE_OPTS.update({\n 'oneapi': (None, \"Use oneAPI compilers icx/icpx/ifx instead of classic compilers\"),\n 'oneapi_c_cxx': (None, \"Use oneAPI C/C++ compilers icx/icpx instead of classic Intel C/C++ compilers \"\n \"(auto-enabled for Intel compilers version 2022.2.0, or newer)\"),\n 'oneapi_fortran': (False, \"Use oneAPI Fortran compiler ifx instead of classic Intel Fortran compiler\"),\n })\n\n def _set_compiler_vars(self):\n \"\"\"Intel compilers-specific adjustments after setting compiler variables.\"\"\"\n\n # skip IntelIccIfort._set_compiler_vars (no longer relevant for recent versions)\n Compiler._set_compiler_vars(self)\n\n root = self.get_software_root(self.COMPILER_MODULE_NAME)[0]\n version = self.get_software_version(self.COMPILER_MODULE_NAME)[0]\n\n libbase = os.path.join('compiler', version, 'linux')\n libpaths = [\n os.path.join(libbase, 'compiler', 'lib', 'intel64'),\n ]\n\n self.variables.append_subdirs(\"LDFLAGS\", root, subdirs=libpaths)\n\n def set_variables(self):\n \"\"\"Set the variables.\"\"\"\n\n oneapi = False\n\n # auto-enable use of oneAPI C/C++ compilers for sufficiently recent versions of Intel compilers\n comp_ver = self.get_software_version(self.COMPILER_MODULE_NAME)[0]\n if LooseVersion(comp_ver) >= LooseVersion('2022.2.0'):\n if self.options.get('oneapi_c_cxx', None) is None:\n self.options['oneapi_c_cxx'] = True\n\n oneapi_tcopt = self.options.get('oneapi')\n if oneapi_tcopt:\n oneapi = True\n self.COMPILER_CXX = 'icpx'\n self.COMPILER_CC = 'icx'\n self.COMPILER_F77 = 'ifx'\n self.COMPILER_F90 = 'ifx'\n self.COMPILER_FC = 'ifx'\n\n # if both 'oneapi' and 'oneapi_*' are set, the latter are ignored\n elif oneapi_tcopt is None:\n if self.options.get('oneapi_c_cxx', False):\n oneapi = True\n self.COMPILER_CC = 'icx'\n self.COMPILER_CXX = 'icpx'\n\n if self.options.get('oneapi_fortran', False):\n oneapi = True\n self.COMPILER_F77 = 'ifx'\n self.COMPILER_F90 = 'ifx'\n self.COMPILER_FC = 'ifx'\n\n if oneapi:\n # fp-model source is not supported by icx but is equivalent to precise\n self.options.options_map['defaultprec'] = ['fp-speculation=safe', 'fp-model precise']\n if LooseVersion(comp_ver) >= LooseVersion('2022'):\n self.options.options_map['defaultprec'].insert(0, 'ftz')\n # icx doesn't like -fp-model fast=1; fp-model fast is equivalent\n self.options.options_map['loose'] = ['fp-model fast']\n # fp-model fast=2 gives \"warning: overriding '-ffp-model=fast=2' option with '-ffp-model=fast'\"\n self.options.options_map['veryloose'] = ['fp-model fast']\n # recommended in porting guide: qopenmp, unlike fiopenmp, works for both classic and oneapi compilers\n # https://www.intel.com/content/www/us/en/developer/articles/guide/porting-guide-for-ifort-to-ifx.html\n self.options.options_map['openmp'] = ['qopenmp']\n\n # -xSSE2 is not supported by Intel oneAPI compilers,\n # so use -march=x86-64 -mtune=generic when using optarch=GENERIC\n self.COMPILER_GENERIC_OPTION = {\n (systemtools.X86_64, systemtools.AMD): 'march=x86-64 -mtune=generic',\n (systemtools.X86_64, systemtools.INTEL): 'march=x86-64 -mtune=generic',\n }\n\n # skip IntelIccIfort.set_variables (no longer relevant for recent versions)\n Compiler.set_variables(self)\n", "path": "easybuild/toolchains/compiler/intel_compilers.py"}]} | 2,034 | 271 |
gh_patches_debug_41210 | rasdani/github-patches | git_diff | DDMAL__CantusDB-941 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Source Detail page should not display link to "Manage source editors" to all logged-in users
Currently, in the Source Detail view, if a user is logged in, we display "add new chant", "full text & volpiano editor", "edit source description" and "manage source editors" links in the sidebar. We need to check that the user has permission to edit the source before displaying these links, otherwise, users will click through into 403 errors. An extra check will be required for the "manage source editors" link - only people with "staff status" should see this link, as it leads to the admin area.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/views/source.py`
Content:
```
1 from django.views.generic import DetailView, ListView, CreateView, UpdateView
2 from django.db.models import Q, Prefetch
3 from main_app.models import Source, Provenance, Century
4 from main_app.forms import SourceCreateForm, SourceEditForm
5 from django.contrib import messages
6 from django.urls import reverse
7 from django.contrib.auth.mixins import LoginRequiredMixin
8 from django.http import HttpResponseRedirect
9 from django.contrib.auth.mixins import UserPassesTestMixin
10 from django.core.exceptions import PermissionDenied
11 from django.shortcuts import get_object_or_404
12 from main_app.views.chant import get_feast_selector_options
13
14
15 class SourceDetailView(DetailView):
16 model = Source
17 context_object_name = "source"
18 template_name = "source_detail.html"
19
20 def get_context_data(self, **kwargs):
21 source = self.get_object()
22 display_unpublished = self.request.user.is_authenticated
23 if (source.published is False) and (not display_unpublished):
24 raise PermissionDenied()
25
26 context = super().get_context_data(**kwargs)
27
28 if source.segment and source.segment.id == 4064:
29 # if this is a sequence source
30 context["sequences"] = source.sequence_set.order_by("s_sequence")
31 context["folios"] = (
32 source.sequence_set.values_list("folio", flat=True)
33 .distinct()
34 .order_by("folio")
35 )
36 else:
37 # if this is a chant source
38 folios = (
39 source.chant_set.values_list("folio", flat=True)
40 .distinct()
41 .order_by("folio")
42 )
43 context["folios"] = folios
44 # the options for the feast selector on the right, only chant sources have this
45 context["feasts_with_folios"] = get_feast_selector_options(source, folios)
46 return context
47
48
49 class SourceListView(ListView):
50 paginate_by = 100
51 context_object_name = "sources"
52 template_name = "source_list.html"
53
54 def get_context_data(self, **kwargs):
55 context = super().get_context_data(**kwargs)
56 context["provenances"] = (
57 Provenance.objects.all().order_by("name").values("id", "name")
58 )
59 context["centuries"] = (
60 Century.objects.all().order_by("name").values("id", "name")
61 )
62 return context
63
64 def get_queryset(self):
65 # use select_related() for foreign keys to reduce DB queries
66 queryset = Source.objects.select_related(
67 "rism_siglum", "segment", "provenance"
68 ).order_by("siglum")
69
70 display_unpublished = self.request.user.is_authenticated
71 if display_unpublished:
72 q_obj_filter = Q()
73 else:
74 q_obj_filter = Q(published=True)
75
76 if self.request.GET.get("century"):
77 century_name = Century.objects.get(id=self.request.GET.get("century")).name
78 q_obj_filter &= Q(century__name__icontains=century_name)
79
80 if self.request.GET.get("provenance"):
81 provenance_id = int(self.request.GET.get("provenance"))
82 q_obj_filter &= Q(provenance__id=provenance_id)
83 if self.request.GET.get("segment"):
84 segment_id = int(self.request.GET.get("segment"))
85 q_obj_filter &= Q(segment__id=segment_id)
86 if self.request.GET.get("fullSource") in ["true", "false"]:
87 full_source_str = self.request.GET.get("fullSource")
88 if full_source_str == "true":
89 full_source_q = Q(full_source=True) | Q(full_source=None)
90 q_obj_filter &= full_source_q
91 else:
92 q_obj_filter &= Q(full_source=False)
93
94 if self.request.GET.get("general"):
95 # Strip spaces at the beginning and end. Then make list of terms split on spaces
96 general_search_terms = self.request.GET.get("general").strip(" ").split(" ")
97 # We need a Q Object for each field we're gonna look into
98 title_q = Q()
99 siglum_q = Q()
100 rism_siglum_q = Q()
101 description_q = Q()
102 # it seems that old cantus don't look into title and provenance for the general search terms
103 # cantus.uwaterloo.ca/source/123901 this source cannot be found by searching its provenance 'Kremsmünster' in the general search field
104 # provenance_q = Q()
105 summary_q = Q()
106
107 # For each term, add it to the Q object of each field with an OR operation.
108 # We split the terms so that the words can be separated in the actual
109 # field, allowing for a more flexible search, and a field needs
110 # to match only one of the terms
111 for term in general_search_terms:
112 title_q |= Q(title__icontains=term)
113 siglum_q |= Q(siglum__icontains=term)
114 rism_siglum_q |= Q(rism_siglum__name__icontains=term) | Q(
115 rism_siglum__description__icontains=term
116 )
117 description_q |= Q(description__icontains=term)
118 summary_q |= Q(summary__icontains=term)
119 # provenance_q |= Q(provenance__name__icontains=term)
120 # All the Q objects are put together with OR.
121 # The end result is that at least one term has to match in at least one
122 # field
123 # general_search_q = (
124 # title_q | siglum_q | rism_siglum_q | description_q | provenance_q
125 # )
126 general_search_q = (
127 title_q | siglum_q | rism_siglum_q | description_q | summary_q
128 )
129 q_obj_filter &= general_search_q
130
131 # For the indexing notes search we follow the same procedure as above but with
132 # different fields
133 if self.request.GET.get("indexing"):
134 # Make list of terms split on spaces
135 indexing_search_terms = self.request.GET.get("indexing").split(" ")
136 # We need a Q Object for each field we're gonna look into
137 inventoried_by_q = Q()
138 full_text_entered_by_q = Q()
139 melodies_entered_by_q = Q()
140 proofreaders_q = Q()
141 other_editors_q = Q()
142 indexing_notes_q = Q()
143 # For each term, add it to the Q object of each field with an OR operation.
144 # We split the terms so that the words can be separated in the actual
145 # field, allowing for a more flexible search, and a field needs
146 # to match only one of the terms
147 for term in indexing_search_terms:
148 inventoried_by_q |= Q(inventoried_by__full_name__icontains=term)
149 full_text_entered_by_q |= Q(
150 full_text_entered_by__full_name__icontains=term
151 )
152 melodies_entered_by_q |= Q(
153 melodies_entered_by__full_name__icontains=term
154 )
155 proofreaders_q |= Q(proofreaders__full_name__icontains=term)
156 other_editors_q |= Q(other_editors__full_name__icontains=term)
157 indexing_notes_q |= Q(indexing_notes__icontains=term)
158 # All the Q objects are put together with OR.
159 # The end result is that at least one term has to match in at least one
160 # field
161 indexing_search_q = (
162 inventoried_by_q
163 | full_text_entered_by_q
164 | melodies_entered_by_q
165 | proofreaders_q
166 | other_editors_q
167 | indexing_notes_q
168 )
169 q_obj_filter &= indexing_search_q
170
171 return queryset.filter(q_obj_filter).prefetch_related(
172 Prefetch("century", queryset=Century.objects.all().order_by("id"))
173 )
174
175
176 class SourceCreateView(LoginRequiredMixin, UserPassesTestMixin, CreateView):
177 model = Source
178 template_name = "source_create_form.html"
179 form_class = SourceCreateForm
180
181 def test_func(self):
182 user = self.request.user
183 # checks if the user is allowed to create sources
184 is_authorized = user.groups.filter(
185 Q(name="project manager") | Q(name="editor") | Q(name="contributor")
186 ).exists()
187
188 if is_authorized:
189 return True
190 else:
191 return False
192
193 def get_success_url(self):
194 return reverse("source-detail", args=[self.object.id])
195
196 def form_valid(self, form):
197 form.instance.created_by = self.request.user
198 self.object = form.save()
199
200 # assign this source to the "current_editors"
201 current_editors = self.object.current_editors.all()
202 self.request.user.sources_user_can_edit.add(self.object)
203
204 for editor in current_editors:
205 editor.sources_user_can_edit.add(self.object)
206
207 messages.success(
208 self.request,
209 "Source created successfully!",
210 )
211 return HttpResponseRedirect(self.get_success_url())
212
213
214 class SourceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
215 template_name = "source_edit.html"
216 model = Source
217 form_class = SourceEditForm
218 pk_url_kwarg = "source_id"
219
220 def get_context_data(self, **kwargs):
221 source = self.get_object()
222 context = super().get_context_data(**kwargs)
223
224 if source.segment and source.segment.id == 4064:
225 # if this is a sequence source
226 context["sequences"] = source.sequence_set.order_by("s_sequence")
227 context["folios"] = (
228 source.sequence_set.values_list("folio", flat=True)
229 .distinct()
230 .order_by("folio")
231 )
232 else:
233 # if this is a chant source
234 folios = (
235 source.chant_set.values_list("folio", flat=True)
236 .distinct()
237 .order_by("folio")
238 )
239 context["folios"] = folios
240 # the options for the feast selector on the right, only chant sources have this
241 context["feasts_with_folios"] = get_feast_selector_options(source, folios)
242 return context
243
244 def test_func(self):
245 user = self.request.user
246 source_id = self.kwargs.get(self.pk_url_kwarg)
247 source = get_object_or_404(Source, id=source_id)
248
249 assigned_to_source = user.sources_user_can_edit.filter(id=source_id)
250
251 # checks if the user is a project manager
252 is_project_manager = user.groups.filter(name="project manager").exists()
253 # checks if the user is an editor
254 is_editor = user.groups.filter(name="editor").exists()
255 # checks if the user is a contributor
256 is_contributor = user.groups.filter(name="contributor").exists()
257
258 if (
259 (is_project_manager)
260 or (is_editor and assigned_to_source)
261 or (is_editor and source.created_by == user)
262 or (is_contributor and source.created_by == user)
263 ):
264 return True
265 else:
266 return False
267
268 def form_valid(self, form):
269 form.instance.last_updated_by = self.request.user
270
271 # remove this source from the old "current_editors"
272 # assign this source to the new "current_editors"
273
274 old_current_editors = list(
275 Source.objects.get(id=form.instance.id).current_editors.all()
276 )
277 new_current_editors = form.cleaned_data["current_editors"]
278 source = form.save()
279
280 for old_editor in old_current_editors:
281 old_editor.sources_user_can_edit.remove(source)
282
283 for new_editor in new_current_editors:
284 new_editor.sources_user_can_edit.add(source)
285
286 return HttpResponseRedirect(self.get_success_url())
287
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/main_app/views/source.py b/django/cantusdb_project/main_app/views/source.py
--- a/django/cantusdb_project/main_app/views/source.py
+++ b/django/cantusdb_project/main_app/views/source.py
@@ -9,7 +9,10 @@
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404
-from main_app.views.chant import get_feast_selector_options
+from main_app.views.chant import (
+ get_feast_selector_options,
+ user_can_edit_chants_in_source,
+)
class SourceDetailView(DetailView):
@@ -19,6 +22,7 @@
def get_context_data(self, **kwargs):
source = self.get_object()
+ user = self.request.user
display_unpublished = self.request.user.is_authenticated
if (source.published is False) and (not display_unpublished):
raise PermissionDenied()
@@ -43,6 +47,9 @@
context["folios"] = folios
# the options for the feast selector on the right, only chant sources have this
context["feasts_with_folios"] = get_feast_selector_options(source, folios)
+
+ context["user_can_edit_chants"] = user_can_edit_chants_in_source(user, source)
+ context["user_can_edit_source"] = user_can_edit_source(user, source)
return context
@@ -246,24 +253,7 @@
source_id = self.kwargs.get(self.pk_url_kwarg)
source = get_object_or_404(Source, id=source_id)
- assigned_to_source = user.sources_user_can_edit.filter(id=source_id)
-
- # checks if the user is a project manager
- is_project_manager = user.groups.filter(name="project manager").exists()
- # checks if the user is an editor
- is_editor = user.groups.filter(name="editor").exists()
- # checks if the user is a contributor
- is_contributor = user.groups.filter(name="contributor").exists()
-
- if (
- (is_project_manager)
- or (is_editor and assigned_to_source)
- or (is_editor and source.created_by == user)
- or (is_contributor and source.created_by == user)
- ):
- return True
- else:
- return False
+ return user_can_edit_source(user, source)
def form_valid(self, form):
form.instance.last_updated_by = self.request.user
@@ -284,3 +274,25 @@
new_editor.sources_user_can_edit.add(source)
return HttpResponseRedirect(self.get_success_url())
+
+
+def user_can_edit_source(user, source):
+ source_id = source.id
+ assigned_to_source = user.sources_user_can_edit.filter(id=source_id)
+
+ # checks if the user is a project manager
+ is_project_manager = user.groups.filter(name="project manager").exists()
+ # checks if the user is an editor
+ is_editor = user.groups.filter(name="editor").exists()
+ # checks if the user is a contributor
+ is_contributor = user.groups.filter(name="contributor").exists()
+
+ if (
+ (is_project_manager)
+ or (is_editor and assigned_to_source)
+ or (is_editor and source.created_by == user)
+ or (is_contributor and source.created_by == user)
+ ):
+ return True
+ else:
+ return False
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/source.py b/django/cantusdb_project/main_app/views/source.py\n--- a/django/cantusdb_project/main_app/views/source.py\n+++ b/django/cantusdb_project/main_app/views/source.py\n@@ -9,7 +9,10 @@\n from django.contrib.auth.mixins import UserPassesTestMixin\n from django.core.exceptions import PermissionDenied\n from django.shortcuts import get_object_or_404\n-from main_app.views.chant import get_feast_selector_options\n+from main_app.views.chant import (\n+ get_feast_selector_options,\n+ user_can_edit_chants_in_source,\n+)\n \n \n class SourceDetailView(DetailView):\n@@ -19,6 +22,7 @@\n \n def get_context_data(self, **kwargs):\n source = self.get_object()\n+ user = self.request.user\n display_unpublished = self.request.user.is_authenticated\n if (source.published is False) and (not display_unpublished):\n raise PermissionDenied()\n@@ -43,6 +47,9 @@\n context[\"folios\"] = folios\n # the options for the feast selector on the right, only chant sources have this\n context[\"feasts_with_folios\"] = get_feast_selector_options(source, folios)\n+\n+ context[\"user_can_edit_chants\"] = user_can_edit_chants_in_source(user, source)\n+ context[\"user_can_edit_source\"] = user_can_edit_source(user, source)\n return context\n \n \n@@ -246,24 +253,7 @@\n source_id = self.kwargs.get(self.pk_url_kwarg)\n source = get_object_or_404(Source, id=source_id)\n \n- assigned_to_source = user.sources_user_can_edit.filter(id=source_id)\n-\n- # checks if the user is a project manager\n- is_project_manager = user.groups.filter(name=\"project manager\").exists()\n- # checks if the user is an editor\n- is_editor = user.groups.filter(name=\"editor\").exists()\n- # checks if the user is a contributor\n- is_contributor = user.groups.filter(name=\"contributor\").exists()\n-\n- if (\n- (is_project_manager)\n- or (is_editor and assigned_to_source)\n- or (is_editor and source.created_by == user)\n- or (is_contributor and source.created_by == user)\n- ):\n- return True\n- else:\n- return False\n+ return user_can_edit_source(user, source)\n \n def form_valid(self, form):\n form.instance.last_updated_by = self.request.user\n@@ -284,3 +274,25 @@\n new_editor.sources_user_can_edit.add(source)\n \n return HttpResponseRedirect(self.get_success_url())\n+\n+\n+def user_can_edit_source(user, source):\n+ source_id = source.id\n+ assigned_to_source = user.sources_user_can_edit.filter(id=source_id)\n+\n+ # checks if the user is a project manager\n+ is_project_manager = user.groups.filter(name=\"project manager\").exists()\n+ # checks if the user is an editor\n+ is_editor = user.groups.filter(name=\"editor\").exists()\n+ # checks if the user is a contributor\n+ is_contributor = user.groups.filter(name=\"contributor\").exists()\n+\n+ if (\n+ (is_project_manager)\n+ or (is_editor and assigned_to_source)\n+ or (is_editor and source.created_by == user)\n+ or (is_contributor and source.created_by == user)\n+ ):\n+ return True\n+ else:\n+ return False\n", "issue": "Source Detail page should not display link to \"Manage source editors\" to all logged-in users\nCurrently, in the Source Detail view, if a user is logged in, we display \"add new chant\", \"full text & volpiano editor\", \"edit source description\" and \"manage source editors\" links in the sidebar. We need to check that the user has permission to edit the source before displaying these links, otherwise, users will click through into 403 errors. An extra check will be required for the \"manage source editors\" link - only people with \"staff status\" should see this link, as it leads to the admin area.\n", "before_files": [{"content": "from django.views.generic import DetailView, ListView, CreateView, UpdateView\nfrom django.db.models import Q, Prefetch\nfrom main_app.models import Source, Provenance, Century\nfrom main_app.forms import SourceCreateForm, SourceEditForm\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404\nfrom main_app.views.chant import get_feast_selector_options\n\n\nclass SourceDetailView(DetailView):\n model = Source\n context_object_name = \"source\"\n template_name = \"source_detail.html\"\n\n def get_context_data(self, **kwargs):\n source = self.get_object()\n display_unpublished = self.request.user.is_authenticated\n if (source.published is False) and (not display_unpublished):\n raise PermissionDenied()\n\n context = super().get_context_data(**kwargs)\n\n if source.segment and source.segment.id == 4064:\n # if this is a sequence source\n context[\"sequences\"] = source.sequence_set.order_by(\"s_sequence\")\n context[\"folios\"] = (\n source.sequence_set.values_list(\"folio\", flat=True)\n .distinct()\n .order_by(\"folio\")\n )\n else:\n # if this is a chant source\n folios = (\n source.chant_set.values_list(\"folio\", flat=True)\n .distinct()\n .order_by(\"folio\")\n )\n context[\"folios\"] = folios\n # the options for the feast selector on the right, only chant sources have this\n context[\"feasts_with_folios\"] = get_feast_selector_options(source, folios)\n return context\n\n\nclass SourceListView(ListView):\n paginate_by = 100\n context_object_name = \"sources\"\n template_name = \"source_list.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"provenances\"] = (\n Provenance.objects.all().order_by(\"name\").values(\"id\", \"name\")\n )\n context[\"centuries\"] = (\n Century.objects.all().order_by(\"name\").values(\"id\", \"name\")\n )\n return context\n\n def get_queryset(self):\n # use select_related() for foreign keys to reduce DB queries\n queryset = Source.objects.select_related(\n \"rism_siglum\", \"segment\", \"provenance\"\n ).order_by(\"siglum\")\n\n display_unpublished = self.request.user.is_authenticated\n if display_unpublished:\n q_obj_filter = Q()\n else:\n q_obj_filter = Q(published=True)\n\n if self.request.GET.get(\"century\"):\n century_name = Century.objects.get(id=self.request.GET.get(\"century\")).name\n q_obj_filter &= Q(century__name__icontains=century_name)\n\n if self.request.GET.get(\"provenance\"):\n provenance_id = int(self.request.GET.get(\"provenance\"))\n q_obj_filter &= Q(provenance__id=provenance_id)\n if self.request.GET.get(\"segment\"):\n segment_id = int(self.request.GET.get(\"segment\"))\n q_obj_filter &= Q(segment__id=segment_id)\n if self.request.GET.get(\"fullSource\") in [\"true\", \"false\"]:\n full_source_str = self.request.GET.get(\"fullSource\")\n if full_source_str == \"true\":\n full_source_q = Q(full_source=True) | Q(full_source=None)\n q_obj_filter &= full_source_q\n else:\n q_obj_filter &= Q(full_source=False)\n\n if self.request.GET.get(\"general\"):\n # Strip spaces at the beginning and end. Then make list of terms split on spaces\n general_search_terms = self.request.GET.get(\"general\").strip(\" \").split(\" \")\n # We need a Q Object for each field we're gonna look into\n title_q = Q()\n siglum_q = Q()\n rism_siglum_q = Q()\n description_q = Q()\n # it seems that old cantus don't look into title and provenance for the general search terms\n # cantus.uwaterloo.ca/source/123901 this source cannot be found by searching its provenance 'Kremsm\u00fcnster' in the general search field\n # provenance_q = Q()\n summary_q = Q()\n\n # For each term, add it to the Q object of each field with an OR operation.\n # We split the terms so that the words can be separated in the actual\n # field, allowing for a more flexible search, and a field needs\n # to match only one of the terms\n for term in general_search_terms:\n title_q |= Q(title__icontains=term)\n siglum_q |= Q(siglum__icontains=term)\n rism_siglum_q |= Q(rism_siglum__name__icontains=term) | Q(\n rism_siglum__description__icontains=term\n )\n description_q |= Q(description__icontains=term)\n summary_q |= Q(summary__icontains=term)\n # provenance_q |= Q(provenance__name__icontains=term)\n # All the Q objects are put together with OR.\n # The end result is that at least one term has to match in at least one\n # field\n # general_search_q = (\n # title_q | siglum_q | rism_siglum_q | description_q | provenance_q\n # )\n general_search_q = (\n title_q | siglum_q | rism_siglum_q | description_q | summary_q\n )\n q_obj_filter &= general_search_q\n\n # For the indexing notes search we follow the same procedure as above but with\n # different fields\n if self.request.GET.get(\"indexing\"):\n # Make list of terms split on spaces\n indexing_search_terms = self.request.GET.get(\"indexing\").split(\" \")\n # We need a Q Object for each field we're gonna look into\n inventoried_by_q = Q()\n full_text_entered_by_q = Q()\n melodies_entered_by_q = Q()\n proofreaders_q = Q()\n other_editors_q = Q()\n indexing_notes_q = Q()\n # For each term, add it to the Q object of each field with an OR operation.\n # We split the terms so that the words can be separated in the actual\n # field, allowing for a more flexible search, and a field needs\n # to match only one of the terms\n for term in indexing_search_terms:\n inventoried_by_q |= Q(inventoried_by__full_name__icontains=term)\n full_text_entered_by_q |= Q(\n full_text_entered_by__full_name__icontains=term\n )\n melodies_entered_by_q |= Q(\n melodies_entered_by__full_name__icontains=term\n )\n proofreaders_q |= Q(proofreaders__full_name__icontains=term)\n other_editors_q |= Q(other_editors__full_name__icontains=term)\n indexing_notes_q |= Q(indexing_notes__icontains=term)\n # All the Q objects are put together with OR.\n # The end result is that at least one term has to match in at least one\n # field\n indexing_search_q = (\n inventoried_by_q\n | full_text_entered_by_q\n | melodies_entered_by_q\n | proofreaders_q\n | other_editors_q\n | indexing_notes_q\n )\n q_obj_filter &= indexing_search_q\n\n return queryset.filter(q_obj_filter).prefetch_related(\n Prefetch(\"century\", queryset=Century.objects.all().order_by(\"id\"))\n )\n\n\nclass SourceCreateView(LoginRequiredMixin, UserPassesTestMixin, CreateView):\n model = Source\n template_name = \"source_create_form.html\"\n form_class = SourceCreateForm\n\n def test_func(self):\n user = self.request.user\n # checks if the user is allowed to create sources\n is_authorized = user.groups.filter(\n Q(name=\"project manager\") | Q(name=\"editor\") | Q(name=\"contributor\")\n ).exists()\n\n if is_authorized:\n return True\n else:\n return False\n\n def get_success_url(self):\n return reverse(\"source-detail\", args=[self.object.id])\n\n def form_valid(self, form):\n form.instance.created_by = self.request.user\n self.object = form.save()\n\n # assign this source to the \"current_editors\"\n current_editors = self.object.current_editors.all()\n self.request.user.sources_user_can_edit.add(self.object)\n\n for editor in current_editors:\n editor.sources_user_can_edit.add(self.object)\n\n messages.success(\n self.request,\n \"Source created successfully!\",\n )\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass SourceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"source_edit.html\"\n model = Source\n form_class = SourceEditForm\n pk_url_kwarg = \"source_id\"\n\n def get_context_data(self, **kwargs):\n source = self.get_object()\n context = super().get_context_data(**kwargs)\n\n if source.segment and source.segment.id == 4064:\n # if this is a sequence source\n context[\"sequences\"] = source.sequence_set.order_by(\"s_sequence\")\n context[\"folios\"] = (\n source.sequence_set.values_list(\"folio\", flat=True)\n .distinct()\n .order_by(\"folio\")\n )\n else:\n # if this is a chant source\n folios = (\n source.chant_set.values_list(\"folio\", flat=True)\n .distinct()\n .order_by(\"folio\")\n )\n context[\"folios\"] = folios\n # the options for the feast selector on the right, only chant sources have this\n context[\"feasts_with_folios\"] = get_feast_selector_options(source, folios)\n return context\n\n def test_func(self):\n user = self.request.user\n source_id = self.kwargs.get(self.pk_url_kwarg)\n source = get_object_or_404(Source, id=source_id)\n\n assigned_to_source = user.sources_user_can_edit.filter(id=source_id)\n\n # checks if the user is a project manager\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n # checks if the user is an editor\n is_editor = user.groups.filter(name=\"editor\").exists()\n # checks if the user is a contributor\n is_contributor = user.groups.filter(name=\"contributor\").exists()\n\n if (\n (is_project_manager)\n or (is_editor and assigned_to_source)\n or (is_editor and source.created_by == user)\n or (is_contributor and source.created_by == user)\n ):\n return True\n else:\n return False\n\n def form_valid(self, form):\n form.instance.last_updated_by = self.request.user\n\n # remove this source from the old \"current_editors\"\n # assign this source to the new \"current_editors\"\n\n old_current_editors = list(\n Source.objects.get(id=form.instance.id).current_editors.all()\n )\n new_current_editors = form.cleaned_data[\"current_editors\"]\n source = form.save()\n\n for old_editor in old_current_editors:\n old_editor.sources_user_can_edit.remove(source)\n\n for new_editor in new_current_editors:\n new_editor.sources_user_can_edit.add(source)\n\n return HttpResponseRedirect(self.get_success_url())\n", "path": "django/cantusdb_project/main_app/views/source.py"}], "after_files": [{"content": "from django.views.generic import DetailView, ListView, CreateView, UpdateView\nfrom django.db.models import Q, Prefetch\nfrom main_app.models import Source, Provenance, Century\nfrom main_app.forms import SourceCreateForm, SourceEditForm\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404\nfrom main_app.views.chant import (\n get_feast_selector_options,\n user_can_edit_chants_in_source,\n)\n\n\nclass SourceDetailView(DetailView):\n model = Source\n context_object_name = \"source\"\n template_name = \"source_detail.html\"\n\n def get_context_data(self, **kwargs):\n source = self.get_object()\n user = self.request.user\n display_unpublished = self.request.user.is_authenticated\n if (source.published is False) and (not display_unpublished):\n raise PermissionDenied()\n\n context = super().get_context_data(**kwargs)\n\n if source.segment and source.segment.id == 4064:\n # if this is a sequence source\n context[\"sequences\"] = source.sequence_set.order_by(\"s_sequence\")\n context[\"folios\"] = (\n source.sequence_set.values_list(\"folio\", flat=True)\n .distinct()\n .order_by(\"folio\")\n )\n else:\n # if this is a chant source\n folios = (\n source.chant_set.values_list(\"folio\", flat=True)\n .distinct()\n .order_by(\"folio\")\n )\n context[\"folios\"] = folios\n # the options for the feast selector on the right, only chant sources have this\n context[\"feasts_with_folios\"] = get_feast_selector_options(source, folios)\n\n context[\"user_can_edit_chants\"] = user_can_edit_chants_in_source(user, source)\n context[\"user_can_edit_source\"] = user_can_edit_source(user, source)\n return context\n\n\nclass SourceListView(ListView):\n paginate_by = 100\n context_object_name = \"sources\"\n template_name = \"source_list.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"provenances\"] = (\n Provenance.objects.all().order_by(\"name\").values(\"id\", \"name\")\n )\n context[\"centuries\"] = (\n Century.objects.all().order_by(\"name\").values(\"id\", \"name\")\n )\n return context\n\n def get_queryset(self):\n # use select_related() for foreign keys to reduce DB queries\n queryset = Source.objects.select_related(\n \"rism_siglum\", \"segment\", \"provenance\"\n ).order_by(\"siglum\")\n\n display_unpublished = self.request.user.is_authenticated\n if display_unpublished:\n q_obj_filter = Q()\n else:\n q_obj_filter = Q(published=True)\n\n if self.request.GET.get(\"century\"):\n century_name = Century.objects.get(id=self.request.GET.get(\"century\")).name\n q_obj_filter &= Q(century__name__icontains=century_name)\n\n if self.request.GET.get(\"provenance\"):\n provenance_id = int(self.request.GET.get(\"provenance\"))\n q_obj_filter &= Q(provenance__id=provenance_id)\n if self.request.GET.get(\"segment\"):\n segment_id = int(self.request.GET.get(\"segment\"))\n q_obj_filter &= Q(segment__id=segment_id)\n if self.request.GET.get(\"fullSource\") in [\"true\", \"false\"]:\n full_source_str = self.request.GET.get(\"fullSource\")\n if full_source_str == \"true\":\n full_source_q = Q(full_source=True) | Q(full_source=None)\n q_obj_filter &= full_source_q\n else:\n q_obj_filter &= Q(full_source=False)\n\n if self.request.GET.get(\"general\"):\n # Strip spaces at the beginning and end. Then make list of terms split on spaces\n general_search_terms = self.request.GET.get(\"general\").strip(\" \").split(\" \")\n # We need a Q Object for each field we're gonna look into\n title_q = Q()\n siglum_q = Q()\n rism_siglum_q = Q()\n description_q = Q()\n # it seems that old cantus don't look into title and provenance for the general search terms\n # cantus.uwaterloo.ca/source/123901 this source cannot be found by searching its provenance 'Kremsm\u00fcnster' in the general search field\n # provenance_q = Q()\n summary_q = Q()\n\n # For each term, add it to the Q object of each field with an OR operation.\n # We split the terms so that the words can be separated in the actual\n # field, allowing for a more flexible search, and a field needs\n # to match only one of the terms\n for term in general_search_terms:\n title_q |= Q(title__icontains=term)\n siglum_q |= Q(siglum__icontains=term)\n rism_siglum_q |= Q(rism_siglum__name__icontains=term) | Q(\n rism_siglum__description__icontains=term\n )\n description_q |= Q(description__icontains=term)\n summary_q |= Q(summary__icontains=term)\n # provenance_q |= Q(provenance__name__icontains=term)\n # All the Q objects are put together with OR.\n # The end result is that at least one term has to match in at least one\n # field\n # general_search_q = (\n # title_q | siglum_q | rism_siglum_q | description_q | provenance_q\n # )\n general_search_q = (\n title_q | siglum_q | rism_siglum_q | description_q | summary_q\n )\n q_obj_filter &= general_search_q\n\n # For the indexing notes search we follow the same procedure as above but with\n # different fields\n if self.request.GET.get(\"indexing\"):\n # Make list of terms split on spaces\n indexing_search_terms = self.request.GET.get(\"indexing\").split(\" \")\n # We need a Q Object for each field we're gonna look into\n inventoried_by_q = Q()\n full_text_entered_by_q = Q()\n melodies_entered_by_q = Q()\n proofreaders_q = Q()\n other_editors_q = Q()\n indexing_notes_q = Q()\n # For each term, add it to the Q object of each field with an OR operation.\n # We split the terms so that the words can be separated in the actual\n # field, allowing for a more flexible search, and a field needs\n # to match only one of the terms\n for term in indexing_search_terms:\n inventoried_by_q |= Q(inventoried_by__full_name__icontains=term)\n full_text_entered_by_q |= Q(\n full_text_entered_by__full_name__icontains=term\n )\n melodies_entered_by_q |= Q(\n melodies_entered_by__full_name__icontains=term\n )\n proofreaders_q |= Q(proofreaders__full_name__icontains=term)\n other_editors_q |= Q(other_editors__full_name__icontains=term)\n indexing_notes_q |= Q(indexing_notes__icontains=term)\n # All the Q objects are put together with OR.\n # The end result is that at least one term has to match in at least one\n # field\n indexing_search_q = (\n inventoried_by_q\n | full_text_entered_by_q\n | melodies_entered_by_q\n | proofreaders_q\n | other_editors_q\n | indexing_notes_q\n )\n q_obj_filter &= indexing_search_q\n\n return queryset.filter(q_obj_filter).prefetch_related(\n Prefetch(\"century\", queryset=Century.objects.all().order_by(\"id\"))\n )\n\n\nclass SourceCreateView(LoginRequiredMixin, UserPassesTestMixin, CreateView):\n model = Source\n template_name = \"source_create_form.html\"\n form_class = SourceCreateForm\n\n def test_func(self):\n user = self.request.user\n # checks if the user is allowed to create sources\n is_authorized = user.groups.filter(\n Q(name=\"project manager\") | Q(name=\"editor\") | Q(name=\"contributor\")\n ).exists()\n\n if is_authorized:\n return True\n else:\n return False\n\n def get_success_url(self):\n return reverse(\"source-detail\", args=[self.object.id])\n\n def form_valid(self, form):\n form.instance.created_by = self.request.user\n self.object = form.save()\n\n # assign this source to the \"current_editors\"\n current_editors = self.object.current_editors.all()\n self.request.user.sources_user_can_edit.add(self.object)\n\n for editor in current_editors:\n editor.sources_user_can_edit.add(self.object)\n\n messages.success(\n self.request,\n \"Source created successfully!\",\n )\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass SourceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"source_edit.html\"\n model = Source\n form_class = SourceEditForm\n pk_url_kwarg = \"source_id\"\n\n def get_context_data(self, **kwargs):\n source = self.get_object()\n context = super().get_context_data(**kwargs)\n\n if source.segment and source.segment.id == 4064:\n # if this is a sequence source\n context[\"sequences\"] = source.sequence_set.order_by(\"s_sequence\")\n context[\"folios\"] = (\n source.sequence_set.values_list(\"folio\", flat=True)\n .distinct()\n .order_by(\"folio\")\n )\n else:\n # if this is a chant source\n folios = (\n source.chant_set.values_list(\"folio\", flat=True)\n .distinct()\n .order_by(\"folio\")\n )\n context[\"folios\"] = folios\n # the options for the feast selector on the right, only chant sources have this\n context[\"feasts_with_folios\"] = get_feast_selector_options(source, folios)\n return context\n\n def test_func(self):\n user = self.request.user\n source_id = self.kwargs.get(self.pk_url_kwarg)\n source = get_object_or_404(Source, id=source_id)\n\n return user_can_edit_source(user, source)\n\n def form_valid(self, form):\n form.instance.last_updated_by = self.request.user\n\n # remove this source from the old \"current_editors\"\n # assign this source to the new \"current_editors\"\n\n old_current_editors = list(\n Source.objects.get(id=form.instance.id).current_editors.all()\n )\n new_current_editors = form.cleaned_data[\"current_editors\"]\n source = form.save()\n\n for old_editor in old_current_editors:\n old_editor.sources_user_can_edit.remove(source)\n\n for new_editor in new_current_editors:\n new_editor.sources_user_can_edit.add(source)\n\n return HttpResponseRedirect(self.get_success_url())\n\n\ndef user_can_edit_source(user, source):\n source_id = source.id\n assigned_to_source = user.sources_user_can_edit.filter(id=source_id)\n\n # checks if the user is a project manager\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n # checks if the user is an editor\n is_editor = user.groups.filter(name=\"editor\").exists()\n # checks if the user is a contributor\n is_contributor = user.groups.filter(name=\"contributor\").exists()\n\n if (\n (is_project_manager)\n or (is_editor and assigned_to_source)\n or (is_editor and source.created_by == user)\n or (is_contributor and source.created_by == user)\n ):\n return True\n else:\n return False\n", "path": "django/cantusdb_project/main_app/views/source.py"}]} | 3,674 | 790 |
gh_patches_debug_2248 | rasdani/github-patches | git_diff | googleapis__python-bigquery-942 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
chore: update system tests and samples to use and `@google.com` email address
Re: https://github.com/googleapis/python-bigquery/pull/935#issuecomment-911791623
It may be some time before we can reconfigure our test project(s) to allow permissions for non-google.com folks.
I propose we use `[email protected]`, which is intended for interacting with the public and most Googlers who support this client are members of but don't have email enabled, so it should be less of a spam risk.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `samples/update_dataset_access.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 def update_dataset_access(dataset_id):
17
18 # [START bigquery_update_dataset_access]
19 from google.cloud import bigquery
20
21 # Construct a BigQuery client object.
22 client = bigquery.Client()
23
24 # TODO(developer): Set dataset_id to the ID of the dataset to fetch.
25 # dataset_id = 'your-project.your_dataset'
26
27 dataset = client.get_dataset(dataset_id) # Make an API request.
28
29 entry = bigquery.AccessEntry(
30 role="READER",
31 entity_type="userByEmail",
32 entity_id="[email protected]",
33 )
34
35 entries = list(dataset.access_entries)
36 entries.append(entry)
37 dataset.access_entries = entries
38
39 dataset = client.update_dataset(dataset, ["access_entries"]) # Make an API request.
40
41 full_dataset_id = "{}.{}".format(dataset.project, dataset.dataset_id)
42 print(
43 "Updated dataset '{}' with modified user permissions.".format(full_dataset_id)
44 )
45 # [END bigquery_update_dataset_access]
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/samples/update_dataset_access.py b/samples/update_dataset_access.py
--- a/samples/update_dataset_access.py
+++ b/samples/update_dataset_access.py
@@ -28,8 +28,8 @@
entry = bigquery.AccessEntry(
role="READER",
- entity_type="userByEmail",
- entity_id="[email protected]",
+ entity_type="groupByEmail",
+ entity_id="[email protected]",
)
entries = list(dataset.access_entries)
| {"golden_diff": "diff --git a/samples/update_dataset_access.py b/samples/update_dataset_access.py\n--- a/samples/update_dataset_access.py\n+++ b/samples/update_dataset_access.py\n@@ -28,8 +28,8 @@\n \n entry = bigquery.AccessEntry(\n role=\"READER\",\n- entity_type=\"userByEmail\",\n- entity_id=\"[email protected]\",\n+ entity_type=\"groupByEmail\",\n+ entity_id=\"[email protected]\",\n )\n \n entries = list(dataset.access_entries)\n", "issue": "chore: update system tests and samples to use and `@google.com` email address\nRe: https://github.com/googleapis/python-bigquery/pull/935#issuecomment-911791623\r\n\r\nIt may be some time before we can reconfigure our test project(s) to allow permissions for non-google.com folks.\r\n\r\nI propose we use `[email protected]`, which is intended for interacting with the public and most Googlers who support this client are members of but don't have email enabled, so it should be less of a spam risk.\r\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef update_dataset_access(dataset_id):\n\n # [START bigquery_update_dataset_access]\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set dataset_id to the ID of the dataset to fetch.\n # dataset_id = 'your-project.your_dataset'\n\n dataset = client.get_dataset(dataset_id) # Make an API request.\n\n entry = bigquery.AccessEntry(\n role=\"READER\",\n entity_type=\"userByEmail\",\n entity_id=\"[email protected]\",\n )\n\n entries = list(dataset.access_entries)\n entries.append(entry)\n dataset.access_entries = entries\n\n dataset = client.update_dataset(dataset, [\"access_entries\"]) # Make an API request.\n\n full_dataset_id = \"{}.{}\".format(dataset.project, dataset.dataset_id)\n print(\n \"Updated dataset '{}' with modified user permissions.\".format(full_dataset_id)\n )\n # [END bigquery_update_dataset_access]\n", "path": "samples/update_dataset_access.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef update_dataset_access(dataset_id):\n\n # [START bigquery_update_dataset_access]\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set dataset_id to the ID of the dataset to fetch.\n # dataset_id = 'your-project.your_dataset'\n\n dataset = client.get_dataset(dataset_id) # Make an API request.\n\n entry = bigquery.AccessEntry(\n role=\"READER\",\n entity_type=\"groupByEmail\",\n entity_id=\"[email protected]\",\n )\n\n entries = list(dataset.access_entries)\n entries.append(entry)\n dataset.access_entries = entries\n\n dataset = client.update_dataset(dataset, [\"access_entries\"]) # Make an API request.\n\n full_dataset_id = \"{}.{}\".format(dataset.project, dataset.dataset_id)\n print(\n \"Updated dataset '{}' with modified user permissions.\".format(full_dataset_id)\n )\n # [END bigquery_update_dataset_access]\n", "path": "samples/update_dataset_access.py"}]} | 815 | 116 |
gh_patches_debug_27274 | rasdani/github-patches | git_diff | open-mmlab__mmpose-465 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a README file to explain the settings in the config files
A good example can be found [here](https://github.com/open-mmlab/mmaction2/blob/master/docs/config.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/train.py`
Content:
```
1 import argparse
2 import copy
3 import os
4 import os.path as osp
5 import time
6
7 import mmcv
8 import torch
9 from mmcv import Config, DictAction
10 from mmcv.runner import init_dist, set_random_seed
11 from mmcv.utils import get_git_hash
12
13 from mmpose import __version__
14 from mmpose.apis import train_model
15 from mmpose.datasets import build_dataset
16 from mmpose.models import build_posenet
17 from mmpose.utils import collect_env, get_root_logger
18
19
20 def parse_args():
21 parser = argparse.ArgumentParser(description='Train a pose model')
22 parser.add_argument('config', help='train config file path')
23 parser.add_argument('--work-dir', help='the dir to save logs and models')
24 parser.add_argument(
25 '--resume-from', help='the checkpoint file to resume from')
26 parser.add_argument(
27 '--no-validate',
28 action='store_true',
29 help='whether not to evaluate the checkpoint during training')
30 group_gpus = parser.add_mutually_exclusive_group()
31 group_gpus.add_argument(
32 '--gpus',
33 type=int,
34 help='number of gpus to use '
35 '(only applicable to non-distributed training)')
36 group_gpus.add_argument(
37 '--gpu-ids',
38 type=int,
39 nargs='+',
40 help='ids of gpus to use '
41 '(only applicable to non-distributed training)')
42 parser.add_argument('--seed', type=int, default=None, help='random seed')
43 parser.add_argument(
44 '--deterministic',
45 action='store_true',
46 help='whether to set deterministic options for CUDNN backend.')
47 parser.add_argument(
48 '--options', nargs='+', action=DictAction, help='arguments in dict')
49 parser.add_argument(
50 '--launcher',
51 choices=['none', 'pytorch', 'slurm', 'mpi'],
52 default='none',
53 help='job launcher')
54 parser.add_argument('--local_rank', type=int, default=0)
55 parser.add_argument(
56 '--autoscale-lr',
57 action='store_true',
58 help='automatically scale lr with the number of gpus')
59 args = parser.parse_args()
60 if 'LOCAL_RANK' not in os.environ:
61 os.environ['LOCAL_RANK'] = str(args.local_rank)
62
63 return args
64
65
66 def main():
67 args = parse_args()
68 cfg = Config.fromfile(args.config)
69 if args.options is not None:
70 cfg.merge_from_dict(args.options)
71 # set cudnn_benchmark
72 if cfg.get('cudnn_benchmark', False):
73 torch.backends.cudnn.benchmark = True
74 # work_dir is determined in this priority: CLI > segment in file > filename
75 if args.work_dir is not None:
76 # update configs according to CLI args if args.work_dir is not None
77 cfg.work_dir = args.work_dir
78 elif cfg.get('work_dir', None) is None:
79 # use config filename as default work_dir if cfg.work_dir is None
80 cfg.work_dir = osp.join('./work_dirs',
81 osp.splitext(osp.basename(args.config))[0])
82 if args.resume_from is not None:
83 cfg.resume_from = args.resume_from
84 if args.gpu_ids is not None:
85 cfg.gpu_ids = args.gpu_ids
86 else:
87 cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
88
89 if args.autoscale_lr:
90 # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
91 cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8
92
93 # init distributed env first, since logger depends on the dist info.
94 if args.launcher == 'none':
95 distributed = False
96 else:
97 distributed = True
98 init_dist(args.launcher, **cfg.dist_params)
99
100 # create work_dir
101 mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
102 # init the logger before other steps
103 timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
104 log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
105 logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
106
107 # init the meta dict to record some important information such as
108 # environment info and seed, which will be logged
109 meta = dict()
110 # log env info
111 env_info_dict = collect_env()
112 env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
113 dash_line = '-' * 60 + '\n'
114 logger.info('Environment info:\n' + dash_line + env_info + '\n' +
115 dash_line)
116 meta['env_info'] = env_info
117
118 # log some basic info
119 logger.info(f'Distributed training: {distributed}')
120 logger.info(f'Config:\n{cfg.pretty_text}')
121
122 # set random seeds
123 if args.seed is not None:
124 logger.info(f'Set random seed to {args.seed}, '
125 f'deterministic: {args.deterministic}')
126 set_random_seed(args.seed, deterministic=args.deterministic)
127 cfg.seed = args.seed
128 meta['seed'] = args.seed
129
130 model = build_posenet(cfg.model)
131 datasets = [build_dataset(cfg.data.train)]
132
133 if len(cfg.workflow) == 2:
134 val_dataset = copy.deepcopy(cfg.data.val)
135 val_dataset.pipeline = cfg.data.train.pipeline
136 datasets.append(build_dataset(val_dataset))
137
138 if cfg.checkpoint_config is not None:
139 # save mmpose version, config file content
140 # checkpoints as meta data
141 cfg.checkpoint_config.meta = dict(
142 mmpose_version=__version__ + get_git_hash(digits=7),
143 config=cfg.pretty_text,
144 )
145 train_model(
146 model,
147 datasets,
148 cfg,
149 distributed=distributed,
150 validate=(not args.no_validate),
151 timestamp=timestamp,
152 meta=meta)
153
154
155 if __name__ == '__main__':
156 main()
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/train.py b/tools/train.py
--- a/tools/train.py
+++ b/tools/train.py
@@ -45,7 +45,13 @@
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
- '--options', nargs='+', action=DictAction, help='arguments in dict')
+ '--cfg-options',
+ nargs='+',
+ action=DictAction,
+ default={},
+ help='override some settings in the used config, the key-value pair '
+ 'in xxx=yyy format will be merged into config file. For example, '
+ "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
@@ -65,12 +71,16 @@
def main():
args = parse_args()
+
cfg = Config.fromfile(args.config)
- if args.options is not None:
- cfg.merge_from_dict(args.options)
+
+ if args.cfg_options is not None:
+ cfg.merge_from_dict(args.cfg_options)
+
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
+
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
| {"golden_diff": "diff --git a/tools/train.py b/tools/train.py\n--- a/tools/train.py\n+++ b/tools/train.py\n@@ -45,7 +45,13 @@\n action='store_true',\n help='whether to set deterministic options for CUDNN backend.')\n parser.add_argument(\n- '--options', nargs='+', action=DictAction, help='arguments in dict')\n+ '--cfg-options',\n+ nargs='+',\n+ action=DictAction,\n+ default={},\n+ help='override some settings in the used config, the key-value pair '\n+ 'in xxx=yyy format will be merged into config file. For example, '\n+ \"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'\")\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n@@ -65,12 +71,16 @@\n \n def main():\n args = parse_args()\n+\n cfg = Config.fromfile(args.config)\n- if args.options is not None:\n- cfg.merge_from_dict(args.options)\n+\n+ if args.cfg_options is not None:\n+ cfg.merge_from_dict(args.cfg_options)\n+\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n+\n # work_dir is determined in this priority: CLI > segment in file > filename\n if args.work_dir is not None:\n # update configs according to CLI args if args.work_dir is not None\n", "issue": "Add a README file to explain the settings in the config files\nA good example can be found [here](https://github.com/open-mmlab/mmaction2/blob/master/docs/config.md)\n", "before_files": [{"content": "import argparse\nimport copy\nimport os\nimport os.path as osp\nimport time\n\nimport mmcv\nimport torch\nfrom mmcv import Config, DictAction\nfrom mmcv.runner import init_dist, set_random_seed\nfrom mmcv.utils import get_git_hash\n\nfrom mmpose import __version__\nfrom mmpose.apis import train_model\nfrom mmpose.datasets import build_dataset\nfrom mmpose.models import build_posenet\nfrom mmpose.utils import collect_env, get_root_logger\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a pose model')\n parser.add_argument('config', help='train config file path')\n parser.add_argument('--work-dir', help='the dir to save logs and models')\n parser.add_argument(\n '--resume-from', help='the checkpoint file to resume from')\n parser.add_argument(\n '--no-validate',\n action='store_true',\n help='whether not to evaluate the checkpoint during training')\n group_gpus = parser.add_mutually_exclusive_group()\n group_gpus.add_argument(\n '--gpus',\n type=int,\n help='number of gpus to use '\n '(only applicable to non-distributed training)')\n group_gpus.add_argument(\n '--gpu-ids',\n type=int,\n nargs='+',\n help='ids of gpus to use '\n '(only applicable to non-distributed training)')\n parser.add_argument('--seed', type=int, default=None, help='random seed')\n parser.add_argument(\n '--deterministic',\n action='store_true',\n help='whether to set deterministic options for CUDNN backend.')\n parser.add_argument(\n '--options', nargs='+', action=DictAction, help='arguments in dict')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument(\n '--autoscale-lr',\n action='store_true',\n help='automatically scale lr with the number of gpus')\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n\n return args\n\n\ndef main():\n args = parse_args()\n cfg = Config.fromfile(args.config)\n if args.options is not None:\n cfg.merge_from_dict(args.options)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n # work_dir is determined in this priority: CLI > segment in file > filename\n if args.work_dir is not None:\n # update configs according to CLI args if args.work_dir is not None\n cfg.work_dir = args.work_dir\n elif cfg.get('work_dir', None) is None:\n # use config filename as default work_dir if cfg.work_dir is None\n cfg.work_dir = osp.join('./work_dirs',\n osp.splitext(osp.basename(args.config))[0])\n if args.resume_from is not None:\n cfg.resume_from = args.resume_from\n if args.gpu_ids is not None:\n cfg.gpu_ids = args.gpu_ids\n else:\n cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)\n\n if args.autoscale_lr:\n # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)\n cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # create work_dir\n mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))\n # init the logger before other steps\n timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n log_file = osp.join(cfg.work_dir, f'{timestamp}.log')\n logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)\n\n # init the meta dict to record some important information such as\n # environment info and seed, which will be logged\n meta = dict()\n # log env info\n env_info_dict = collect_env()\n env_info = '\\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])\n dash_line = '-' * 60 + '\\n'\n logger.info('Environment info:\\n' + dash_line + env_info + '\\n' +\n dash_line)\n meta['env_info'] = env_info\n\n # log some basic info\n logger.info(f'Distributed training: {distributed}')\n logger.info(f'Config:\\n{cfg.pretty_text}')\n\n # set random seeds\n if args.seed is not None:\n logger.info(f'Set random seed to {args.seed}, '\n f'deterministic: {args.deterministic}')\n set_random_seed(args.seed, deterministic=args.deterministic)\n cfg.seed = args.seed\n meta['seed'] = args.seed\n\n model = build_posenet(cfg.model)\n datasets = [build_dataset(cfg.data.train)]\n\n if len(cfg.workflow) == 2:\n val_dataset = copy.deepcopy(cfg.data.val)\n val_dataset.pipeline = cfg.data.train.pipeline\n datasets.append(build_dataset(val_dataset))\n\n if cfg.checkpoint_config is not None:\n # save mmpose version, config file content\n # checkpoints as meta data\n cfg.checkpoint_config.meta = dict(\n mmpose_version=__version__ + get_git_hash(digits=7),\n config=cfg.pretty_text,\n )\n train_model(\n model,\n datasets,\n cfg,\n distributed=distributed,\n validate=(not args.no_validate),\n timestamp=timestamp,\n meta=meta)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/train.py"}], "after_files": [{"content": "import argparse\nimport copy\nimport os\nimport os.path as osp\nimport time\n\nimport mmcv\nimport torch\nfrom mmcv import Config, DictAction\nfrom mmcv.runner import init_dist, set_random_seed\nfrom mmcv.utils import get_git_hash\n\nfrom mmpose import __version__\nfrom mmpose.apis import train_model\nfrom mmpose.datasets import build_dataset\nfrom mmpose.models import build_posenet\nfrom mmpose.utils import collect_env, get_root_logger\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a pose model')\n parser.add_argument('config', help='train config file path')\n parser.add_argument('--work-dir', help='the dir to save logs and models')\n parser.add_argument(\n '--resume-from', help='the checkpoint file to resume from')\n parser.add_argument(\n '--no-validate',\n action='store_true',\n help='whether not to evaluate the checkpoint during training')\n group_gpus = parser.add_mutually_exclusive_group()\n group_gpus.add_argument(\n '--gpus',\n type=int,\n help='number of gpus to use '\n '(only applicable to non-distributed training)')\n group_gpus.add_argument(\n '--gpu-ids',\n type=int,\n nargs='+',\n help='ids of gpus to use '\n '(only applicable to non-distributed training)')\n parser.add_argument('--seed', type=int, default=None, help='random seed')\n parser.add_argument(\n '--deterministic',\n action='store_true',\n help='whether to set deterministic options for CUDNN backend.')\n parser.add_argument(\n '--cfg-options',\n nargs='+',\n action=DictAction,\n default={},\n help='override some settings in the used config, the key-value pair '\n 'in xxx=yyy format will be merged into config file. For example, '\n \"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'\")\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument(\n '--autoscale-lr',\n action='store_true',\n help='automatically scale lr with the number of gpus')\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n\n return args\n\n\ndef main():\n args = parse_args()\n\n cfg = Config.fromfile(args.config)\n\n if args.cfg_options is not None:\n cfg.merge_from_dict(args.cfg_options)\n\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n\n # work_dir is determined in this priority: CLI > segment in file > filename\n if args.work_dir is not None:\n # update configs according to CLI args if args.work_dir is not None\n cfg.work_dir = args.work_dir\n elif cfg.get('work_dir', None) is None:\n # use config filename as default work_dir if cfg.work_dir is None\n cfg.work_dir = osp.join('./work_dirs',\n osp.splitext(osp.basename(args.config))[0])\n if args.resume_from is not None:\n cfg.resume_from = args.resume_from\n if args.gpu_ids is not None:\n cfg.gpu_ids = args.gpu_ids\n else:\n cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)\n\n if args.autoscale_lr:\n # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)\n cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # create work_dir\n mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))\n # init the logger before other steps\n timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n log_file = osp.join(cfg.work_dir, f'{timestamp}.log')\n logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)\n\n # init the meta dict to record some important information such as\n # environment info and seed, which will be logged\n meta = dict()\n # log env info\n env_info_dict = collect_env()\n env_info = '\\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])\n dash_line = '-' * 60 + '\\n'\n logger.info('Environment info:\\n' + dash_line + env_info + '\\n' +\n dash_line)\n meta['env_info'] = env_info\n\n # log some basic info\n logger.info(f'Distributed training: {distributed}')\n logger.info(f'Config:\\n{cfg.pretty_text}')\n\n # set random seeds\n if args.seed is not None:\n logger.info(f'Set random seed to {args.seed}, '\n f'deterministic: {args.deterministic}')\n set_random_seed(args.seed, deterministic=args.deterministic)\n cfg.seed = args.seed\n meta['seed'] = args.seed\n\n model = build_posenet(cfg.model)\n datasets = [build_dataset(cfg.data.train)]\n\n if len(cfg.workflow) == 2:\n val_dataset = copy.deepcopy(cfg.data.val)\n val_dataset.pipeline = cfg.data.train.pipeline\n datasets.append(build_dataset(val_dataset))\n\n if cfg.checkpoint_config is not None:\n # save mmpose version, config file content\n # checkpoints as meta data\n cfg.checkpoint_config.meta = dict(\n mmpose_version=__version__ + get_git_hash(digits=7),\n config=cfg.pretty_text,\n )\n train_model(\n model,\n datasets,\n cfg,\n distributed=distributed,\n validate=(not args.no_validate),\n timestamp=timestamp,\n meta=meta)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/train.py"}]} | 1,956 | 335 |
gh_patches_debug_39741 | rasdani/github-patches | git_diff | ipython__ipython-3220 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
race conditions in profile directory creation
Hi- I've been using IPython cluster for a bit now and have recently run into some race conditions when I start up multiple engines at once on a new machine. Specifically, in `IPython.core.profiledir.ProfileDir`, all the `check_*_dir` functions first check if the directory exists then create them if not. This will throw an exception if two engines are running the same code at once and try to create the directory after they both see that it doesn't exist.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/core/profiledir.py`
Content:
```
1 # encoding: utf-8
2 """
3 An object for managing IPython profile directories.
4
5 Authors:
6
7 * Brian Granger
8 * Fernando Perez
9 * Min RK
10
11 """
12
13 #-----------------------------------------------------------------------------
14 # Copyright (C) 2008-2011 The IPython Development Team
15 #
16 # Distributed under the terms of the BSD License. The full license is in
17 # the file COPYING, distributed as part of this software.
18 #-----------------------------------------------------------------------------
19
20 #-----------------------------------------------------------------------------
21 # Imports
22 #-----------------------------------------------------------------------------
23
24 import os
25 import shutil
26
27 from IPython.config.configurable import LoggingConfigurable
28 from IPython.utils.path import get_ipython_package_dir, expand_path
29 from IPython.utils.traitlets import Unicode, Bool
30
31 #-----------------------------------------------------------------------------
32 # Classes and functions
33 #-----------------------------------------------------------------------------
34
35
36 #-----------------------------------------------------------------------------
37 # Module errors
38 #-----------------------------------------------------------------------------
39
40 class ProfileDirError(Exception):
41 pass
42
43
44 #-----------------------------------------------------------------------------
45 # Class for managing profile directories
46 #-----------------------------------------------------------------------------
47
48 class ProfileDir(LoggingConfigurable):
49 """An object to manage the profile directory and its resources.
50
51 The profile directory is used by all IPython applications, to manage
52 configuration, logging and security.
53
54 This object knows how to find, create and manage these directories. This
55 should be used by any code that wants to handle profiles.
56 """
57
58 security_dir_name = Unicode('security')
59 log_dir_name = Unicode('log')
60 startup_dir_name = Unicode('startup')
61 pid_dir_name = Unicode('pid')
62 security_dir = Unicode(u'')
63 log_dir = Unicode(u'')
64 startup_dir = Unicode(u'')
65 pid_dir = Unicode(u'')
66
67 location = Unicode(u'', config=True,
68 help="""Set the profile location directly. This overrides the logic used by the
69 `profile` option.""",
70 )
71
72 _location_isset = Bool(False) # flag for detecting multiply set location
73
74 def _location_changed(self, name, old, new):
75 if self._location_isset:
76 raise RuntimeError("Cannot set profile location more than once.")
77 self._location_isset = True
78 if not os.path.isdir(new):
79 os.makedirs(new)
80
81 # ensure config files exist:
82 self.security_dir = os.path.join(new, self.security_dir_name)
83 self.log_dir = os.path.join(new, self.log_dir_name)
84 self.startup_dir = os.path.join(new, self.startup_dir_name)
85 self.pid_dir = os.path.join(new, self.pid_dir_name)
86 self.check_dirs()
87
88 def _log_dir_changed(self, name, old, new):
89 self.check_log_dir()
90
91 def check_log_dir(self):
92 if not os.path.isdir(self.log_dir):
93 os.mkdir(self.log_dir)
94
95 def _startup_dir_changed(self, name, old, new):
96 self.check_startup_dir()
97
98 def check_startup_dir(self):
99 if not os.path.isdir(self.startup_dir):
100 os.mkdir(self.startup_dir)
101 readme = os.path.join(self.startup_dir, 'README')
102 src = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'README_STARTUP')
103
104 if not os.path.exists(src):
105 self.log.warn("Could not copy README_STARTUP to startup dir. Source file %s does not exist." % src)
106
107 if os.path.exists(src) and not os.path.exists(readme):
108 shutil.copy(src, readme)
109
110 def _security_dir_changed(self, name, old, new):
111 self.check_security_dir()
112
113 def check_security_dir(self):
114 if not os.path.isdir(self.security_dir):
115 os.mkdir(self.security_dir, 0o700)
116 else:
117 try:
118 os.chmod(self.security_dir, 0o700)
119 except OSError:
120 self.log.warn("Could not set security dir permissions to private.")
121
122 def _pid_dir_changed(self, name, old, new):
123 self.check_pid_dir()
124
125 def check_pid_dir(self):
126 if not os.path.isdir(self.pid_dir):
127 os.mkdir(self.pid_dir, 0o700)
128 else:
129 try:
130 os.chmod(self.pid_dir, 0o700)
131 except OSError:
132 self.log.warn("Could not set pid dir permissions to private.")
133
134 def check_dirs(self):
135 self.check_security_dir()
136 self.check_log_dir()
137 self.check_pid_dir()
138 self.check_startup_dir()
139
140 def copy_config_file(self, config_file, path=None, overwrite=False):
141 """Copy a default config file into the active profile directory.
142
143 Default configuration files are kept in :mod:`IPython.config.default`.
144 This function moves these from that location to the working profile
145 directory.
146 """
147 dst = os.path.join(self.location, config_file)
148 if os.path.isfile(dst) and not overwrite:
149 return False
150 if path is None:
151 path = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
152 src = os.path.join(path, config_file)
153 shutil.copy(src, dst)
154 return True
155
156 @classmethod
157 def create_profile_dir(cls, profile_dir, config=None):
158 """Create a new profile directory given a full path.
159
160 Parameters
161 ----------
162 profile_dir : str
163 The full path to the profile directory. If it does exist, it will
164 be used. If not, it will be created.
165 """
166 return cls(location=profile_dir, config=config)
167
168 @classmethod
169 def create_profile_dir_by_name(cls, path, name=u'default', config=None):
170 """Create a profile dir by profile name and path.
171
172 Parameters
173 ----------
174 path : unicode
175 The path (directory) to put the profile directory in.
176 name : unicode
177 The name of the profile. The name of the profile directory will
178 be "profile_<profile>".
179 """
180 if not os.path.isdir(path):
181 raise ProfileDirError('Directory not found: %s' % path)
182 profile_dir = os.path.join(path, u'profile_' + name)
183 return cls(location=profile_dir, config=config)
184
185 @classmethod
186 def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):
187 """Find an existing profile dir by profile name, return its ProfileDir.
188
189 This searches through a sequence of paths for a profile dir. If it
190 is not found, a :class:`ProfileDirError` exception will be raised.
191
192 The search path algorithm is:
193 1. ``os.getcwdu()``
194 2. ``ipython_dir``
195
196 Parameters
197 ----------
198 ipython_dir : unicode or str
199 The IPython directory to use.
200 name : unicode or str
201 The name of the profile. The name of the profile directory
202 will be "profile_<profile>".
203 """
204 dirname = u'profile_' + name
205 paths = [os.getcwdu(), ipython_dir]
206 for p in paths:
207 profile_dir = os.path.join(p, dirname)
208 if os.path.isdir(profile_dir):
209 return cls(location=profile_dir, config=config)
210 else:
211 raise ProfileDirError('Profile directory not found in paths: %s' % dirname)
212
213 @classmethod
214 def find_profile_dir(cls, profile_dir, config=None):
215 """Find/create a profile dir and return its ProfileDir.
216
217 This will create the profile directory if it doesn't exist.
218
219 Parameters
220 ----------
221 profile_dir : unicode or str
222 The path of the profile directory. This is expanded using
223 :func:`IPython.utils.genutils.expand_path`.
224 """
225 profile_dir = expand_path(profile_dir)
226 if not os.path.isdir(profile_dir):
227 raise ProfileDirError('Profile directory not found: %s' % profile_dir)
228 return cls(location=profile_dir, config=config)
229
230
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/core/profiledir.py b/IPython/core/profiledir.py
--- a/IPython/core/profiledir.py
+++ b/IPython/core/profiledir.py
@@ -87,22 +87,55 @@
def _log_dir_changed(self, name, old, new):
self.check_log_dir()
+
+ def _mkdir(self, path, mode=None):
+ """ensure a directory exists at a given path
+
+ This is a version of os.mkdir, with the following differences:
+
+ - returns True if it created the directory, False otherwise
+ - ignores EEXIST, protecting against race conditions where
+ the dir may have been created in between the check and
+ the creation
+ - sets permissions if requested and the dir already exists
+ """
+ if os.path.exists(path):
+ if mode and os.stat(path).st_mode != mode:
+ try:
+ os.chmod(path, mode)
+ except OSError:
+ self.log.warn(
+ "Could not set permissions on %s",
+ path
+ )
+ return False
+ try:
+ if mode:
+ os.mkdir(path, mode)
+ else:
+ os.mkdir(path)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ return False
+ else:
+ raise
+
+ return True
def check_log_dir(self):
- if not os.path.isdir(self.log_dir):
- os.mkdir(self.log_dir)
+ self._mkdir(self.log_dir)
def _startup_dir_changed(self, name, old, new):
self.check_startup_dir()
def check_startup_dir(self):
- if not os.path.isdir(self.startup_dir):
- os.mkdir(self.startup_dir)
+ self._mkdir(self.startup_dir)
+
readme = os.path.join(self.startup_dir, 'README')
src = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'README_STARTUP')
if not os.path.exists(src):
- self.log.warn("Could not copy README_STARTUP to startup dir. Source file %s does not exist." % src)
+ self.log.warn("Could not copy README_STARTUP to startup dir. Source file %s does not exist.", src)
if os.path.exists(src) and not os.path.exists(readme):
shutil.copy(src, readme)
@@ -111,25 +144,13 @@
self.check_security_dir()
def check_security_dir(self):
- if not os.path.isdir(self.security_dir):
- os.mkdir(self.security_dir, 0o700)
- else:
- try:
- os.chmod(self.security_dir, 0o700)
- except OSError:
- self.log.warn("Could not set security dir permissions to private.")
+ self._mkdir(self.security_dir, 0o40700)
def _pid_dir_changed(self, name, old, new):
self.check_pid_dir()
def check_pid_dir(self):
- if not os.path.isdir(self.pid_dir):
- os.mkdir(self.pid_dir, 0o700)
- else:
- try:
- os.chmod(self.pid_dir, 0o700)
- except OSError:
- self.log.warn("Could not set pid dir permissions to private.")
+ self._mkdir(self.pid_dir, 0o40700)
def check_dirs(self):
self.check_security_dir()
| {"golden_diff": "diff --git a/IPython/core/profiledir.py b/IPython/core/profiledir.py\n--- a/IPython/core/profiledir.py\n+++ b/IPython/core/profiledir.py\n@@ -87,22 +87,55 @@\n \n def _log_dir_changed(self, name, old, new):\n self.check_log_dir()\n+ \n+ def _mkdir(self, path, mode=None):\n+ \"\"\"ensure a directory exists at a given path\n+ \n+ This is a version of os.mkdir, with the following differences:\n+ \n+ - returns True if it created the directory, False otherwise\n+ - ignores EEXIST, protecting against race conditions where\n+ the dir may have been created in between the check and\n+ the creation\n+ - sets permissions if requested and the dir already exists\n+ \"\"\"\n+ if os.path.exists(path):\n+ if mode and os.stat(path).st_mode != mode:\n+ try:\n+ os.chmod(path, mode)\n+ except OSError:\n+ self.log.warn(\n+ \"Could not set permissions on %s\",\n+ path\n+ )\n+ return False\n+ try:\n+ if mode:\n+ os.mkdir(path, mode)\n+ else:\n+ os.mkdir(path)\n+ except OSError as e:\n+ if e.errno == errno.EEXIST:\n+ return False\n+ else:\n+ raise\n+ \n+ return True\n \n def check_log_dir(self):\n- if not os.path.isdir(self.log_dir):\n- os.mkdir(self.log_dir)\n+ self._mkdir(self.log_dir)\n \n def _startup_dir_changed(self, name, old, new):\n self.check_startup_dir()\n \n def check_startup_dir(self):\n- if not os.path.isdir(self.startup_dir):\n- os.mkdir(self.startup_dir)\n+ self._mkdir(self.startup_dir)\n+\n readme = os.path.join(self.startup_dir, 'README')\n src = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'README_STARTUP')\n \n if not os.path.exists(src):\n- self.log.warn(\"Could not copy README_STARTUP to startup dir. Source file %s does not exist.\" % src)\n+ self.log.warn(\"Could not copy README_STARTUP to startup dir. Source file %s does not exist.\", src)\n \n if os.path.exists(src) and not os.path.exists(readme):\n shutil.copy(src, readme)\n@@ -111,25 +144,13 @@\n self.check_security_dir()\n \n def check_security_dir(self):\n- if not os.path.isdir(self.security_dir):\n- os.mkdir(self.security_dir, 0o700)\n- else:\n- try:\n- os.chmod(self.security_dir, 0o700)\n- except OSError:\n- self.log.warn(\"Could not set security dir permissions to private.\")\n+ self._mkdir(self.security_dir, 0o40700)\n \n def _pid_dir_changed(self, name, old, new):\n self.check_pid_dir()\n \n def check_pid_dir(self):\n- if not os.path.isdir(self.pid_dir):\n- os.mkdir(self.pid_dir, 0o700)\n- else:\n- try:\n- os.chmod(self.pid_dir, 0o700)\n- except OSError:\n- self.log.warn(\"Could not set pid dir permissions to private.\")\n+ self._mkdir(self.pid_dir, 0o40700)\n \n def check_dirs(self):\n self.check_security_dir()\n", "issue": "race conditions in profile directory creation\nHi- I've been using IPython cluster for a bit now and have recently run into some race conditions when I start up multiple engines at once on a new machine. Specifically, in `IPython.core.profiledir.ProfileDir`, all the `check_*_dir` functions first check if the directory exists then create them if not. This will throw an exception if two engines are running the same code at once and try to create the directory after they both see that it doesn't exist.\n\n", "before_files": [{"content": "# encoding: utf-8\n\"\"\"\nAn object for managing IPython profile directories.\n\nAuthors:\n\n* Brian Granger\n* Fernando Perez\n* Min RK\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport os\nimport shutil\n\nfrom IPython.config.configurable import LoggingConfigurable\nfrom IPython.utils.path import get_ipython_package_dir, expand_path\nfrom IPython.utils.traitlets import Unicode, Bool\n\n#-----------------------------------------------------------------------------\n# Classes and functions\n#-----------------------------------------------------------------------------\n\n\n#-----------------------------------------------------------------------------\n# Module errors\n#-----------------------------------------------------------------------------\n\nclass ProfileDirError(Exception):\n pass\n\n\n#-----------------------------------------------------------------------------\n# Class for managing profile directories\n#-----------------------------------------------------------------------------\n\nclass ProfileDir(LoggingConfigurable):\n \"\"\"An object to manage the profile directory and its resources.\n\n The profile directory is used by all IPython applications, to manage\n configuration, logging and security.\n\n This object knows how to find, create and manage these directories. This\n should be used by any code that wants to handle profiles.\n \"\"\"\n\n security_dir_name = Unicode('security')\n log_dir_name = Unicode('log')\n startup_dir_name = Unicode('startup')\n pid_dir_name = Unicode('pid')\n security_dir = Unicode(u'')\n log_dir = Unicode(u'')\n startup_dir = Unicode(u'')\n pid_dir = Unicode(u'')\n\n location = Unicode(u'', config=True,\n help=\"\"\"Set the profile location directly. This overrides the logic used by the\n `profile` option.\"\"\",\n )\n\n _location_isset = Bool(False) # flag for detecting multiply set location\n\n def _location_changed(self, name, old, new):\n if self._location_isset:\n raise RuntimeError(\"Cannot set profile location more than once.\")\n self._location_isset = True\n if not os.path.isdir(new):\n os.makedirs(new)\n \n # ensure config files exist:\n self.security_dir = os.path.join(new, self.security_dir_name)\n self.log_dir = os.path.join(new, self.log_dir_name)\n self.startup_dir = os.path.join(new, self.startup_dir_name)\n self.pid_dir = os.path.join(new, self.pid_dir_name)\n self.check_dirs()\n\n def _log_dir_changed(self, name, old, new):\n self.check_log_dir()\n\n def check_log_dir(self):\n if not os.path.isdir(self.log_dir):\n os.mkdir(self.log_dir)\n\n def _startup_dir_changed(self, name, old, new):\n self.check_startup_dir()\n\n def check_startup_dir(self):\n if not os.path.isdir(self.startup_dir):\n os.mkdir(self.startup_dir)\n readme = os.path.join(self.startup_dir, 'README')\n src = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'README_STARTUP')\n\n if not os.path.exists(src):\n self.log.warn(\"Could not copy README_STARTUP to startup dir. Source file %s does not exist.\" % src)\n\n if os.path.exists(src) and not os.path.exists(readme):\n shutil.copy(src, readme)\n\n def _security_dir_changed(self, name, old, new):\n self.check_security_dir()\n\n def check_security_dir(self):\n if not os.path.isdir(self.security_dir):\n os.mkdir(self.security_dir, 0o700)\n else:\n try:\n os.chmod(self.security_dir, 0o700)\n except OSError:\n self.log.warn(\"Could not set security dir permissions to private.\")\n\n def _pid_dir_changed(self, name, old, new):\n self.check_pid_dir()\n\n def check_pid_dir(self):\n if not os.path.isdir(self.pid_dir):\n os.mkdir(self.pid_dir, 0o700)\n else:\n try:\n os.chmod(self.pid_dir, 0o700)\n except OSError:\n self.log.warn(\"Could not set pid dir permissions to private.\")\n\n def check_dirs(self):\n self.check_security_dir()\n self.check_log_dir()\n self.check_pid_dir()\n self.check_startup_dir()\n\n def copy_config_file(self, config_file, path=None, overwrite=False):\n \"\"\"Copy a default config file into the active profile directory.\n\n Default configuration files are kept in :mod:`IPython.config.default`.\n This function moves these from that location to the working profile\n directory.\n \"\"\"\n dst = os.path.join(self.location, config_file)\n if os.path.isfile(dst) and not overwrite:\n return False\n if path is None:\n path = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')\n src = os.path.join(path, config_file)\n shutil.copy(src, dst)\n return True\n\n @classmethod\n def create_profile_dir(cls, profile_dir, config=None):\n \"\"\"Create a new profile directory given a full path.\n\n Parameters\n ----------\n profile_dir : str\n The full path to the profile directory. If it does exist, it will\n be used. If not, it will be created.\n \"\"\"\n return cls(location=profile_dir, config=config)\n\n @classmethod\n def create_profile_dir_by_name(cls, path, name=u'default', config=None):\n \"\"\"Create a profile dir by profile name and path.\n\n Parameters\n ----------\n path : unicode\n The path (directory) to put the profile directory in.\n name : unicode\n The name of the profile. The name of the profile directory will\n be \"profile_<profile>\".\n \"\"\"\n if not os.path.isdir(path):\n raise ProfileDirError('Directory not found: %s' % path)\n profile_dir = os.path.join(path, u'profile_' + name)\n return cls(location=profile_dir, config=config)\n\n @classmethod\n def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):\n \"\"\"Find an existing profile dir by profile name, return its ProfileDir.\n\n This searches through a sequence of paths for a profile dir. If it\n is not found, a :class:`ProfileDirError` exception will be raised.\n\n The search path algorithm is:\n 1. ``os.getcwdu()``\n 2. ``ipython_dir``\n\n Parameters\n ----------\n ipython_dir : unicode or str\n The IPython directory to use.\n name : unicode or str\n The name of the profile. The name of the profile directory\n will be \"profile_<profile>\".\n \"\"\"\n dirname = u'profile_' + name\n paths = [os.getcwdu(), ipython_dir]\n for p in paths:\n profile_dir = os.path.join(p, dirname)\n if os.path.isdir(profile_dir):\n return cls(location=profile_dir, config=config)\n else:\n raise ProfileDirError('Profile directory not found in paths: %s' % dirname)\n\n @classmethod\n def find_profile_dir(cls, profile_dir, config=None):\n \"\"\"Find/create a profile dir and return its ProfileDir.\n\n This will create the profile directory if it doesn't exist.\n\n Parameters\n ----------\n profile_dir : unicode or str\n The path of the profile directory. This is expanded using\n :func:`IPython.utils.genutils.expand_path`.\n \"\"\"\n profile_dir = expand_path(profile_dir)\n if not os.path.isdir(profile_dir):\n raise ProfileDirError('Profile directory not found: %s' % profile_dir)\n return cls(location=profile_dir, config=config)\n\n\n", "path": "IPython/core/profiledir.py"}], "after_files": [{"content": "# encoding: utf-8\n\"\"\"\nAn object for managing IPython profile directories.\n\nAuthors:\n\n* Brian Granger\n* Fernando Perez\n* Min RK\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport os\nimport shutil\n\nfrom IPython.config.configurable import LoggingConfigurable\nfrom IPython.utils.path import get_ipython_package_dir, expand_path\nfrom IPython.utils.traitlets import Unicode, Bool\n\n#-----------------------------------------------------------------------------\n# Classes and functions\n#-----------------------------------------------------------------------------\n\n\n#-----------------------------------------------------------------------------\n# Module errors\n#-----------------------------------------------------------------------------\n\nclass ProfileDirError(Exception):\n pass\n\n\n#-----------------------------------------------------------------------------\n# Class for managing profile directories\n#-----------------------------------------------------------------------------\n\nclass ProfileDir(LoggingConfigurable):\n \"\"\"An object to manage the profile directory and its resources.\n\n The profile directory is used by all IPython applications, to manage\n configuration, logging and security.\n\n This object knows how to find, create and manage these directories. This\n should be used by any code that wants to handle profiles.\n \"\"\"\n\n security_dir_name = Unicode('security')\n log_dir_name = Unicode('log')\n startup_dir_name = Unicode('startup')\n pid_dir_name = Unicode('pid')\n security_dir = Unicode(u'')\n log_dir = Unicode(u'')\n startup_dir = Unicode(u'')\n pid_dir = Unicode(u'')\n\n location = Unicode(u'', config=True,\n help=\"\"\"Set the profile location directly. This overrides the logic used by the\n `profile` option.\"\"\",\n )\n\n _location_isset = Bool(False) # flag for detecting multiply set location\n\n def _location_changed(self, name, old, new):\n if self._location_isset:\n raise RuntimeError(\"Cannot set profile location more than once.\")\n self._location_isset = True\n if not os.path.isdir(new):\n os.makedirs(new)\n \n # ensure config files exist:\n self.security_dir = os.path.join(new, self.security_dir_name)\n self.log_dir = os.path.join(new, self.log_dir_name)\n self.startup_dir = os.path.join(new, self.startup_dir_name)\n self.pid_dir = os.path.join(new, self.pid_dir_name)\n self.check_dirs()\n\n def _log_dir_changed(self, name, old, new):\n self.check_log_dir()\n \n def _mkdir(self, path, mode=None):\n \"\"\"ensure a directory exists at a given path\n \n This is a version of os.mkdir, with the following differences:\n \n - returns True if it created the directory, False otherwise\n - ignores EEXIST, protecting against race conditions where\n the dir may have been created in between the check and\n the creation\n - sets permissions if requested and the dir already exists\n \"\"\"\n if os.path.exists(path):\n if mode and os.stat(path).st_mode != mode:\n try:\n os.chmod(path, mode)\n except OSError:\n self.log.warn(\n \"Could not set permissions on %s\",\n path\n )\n return False\n try:\n if mode:\n os.mkdir(path, mode)\n else:\n os.mkdir(path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n return False\n else:\n raise\n \n return True\n\n def check_log_dir(self):\n self._mkdir(self.log_dir)\n\n def _startup_dir_changed(self, name, old, new):\n self.check_startup_dir()\n\n def check_startup_dir(self):\n self._mkdir(self.startup_dir)\n\n readme = os.path.join(self.startup_dir, 'README')\n src = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'README_STARTUP')\n\n if not os.path.exists(src):\n self.log.warn(\"Could not copy README_STARTUP to startup dir. Source file %s does not exist.\", src)\n\n if os.path.exists(src) and not os.path.exists(readme):\n shutil.copy(src, readme)\n\n def _security_dir_changed(self, name, old, new):\n self.check_security_dir()\n\n def check_security_dir(self):\n self._mkdir(self.security_dir, 0o40700)\n\n def _pid_dir_changed(self, name, old, new):\n self.check_pid_dir()\n\n def check_pid_dir(self):\n self._mkdir(self.pid_dir, 0o40700)\n\n def check_dirs(self):\n self.check_security_dir()\n self.check_log_dir()\n self.check_pid_dir()\n self.check_startup_dir()\n\n def copy_config_file(self, config_file, path=None, overwrite=False):\n \"\"\"Copy a default config file into the active profile directory.\n\n Default configuration files are kept in :mod:`IPython.config.default`.\n This function moves these from that location to the working profile\n directory.\n \"\"\"\n dst = os.path.join(self.location, config_file)\n if os.path.isfile(dst) and not overwrite:\n return False\n if path is None:\n path = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')\n src = os.path.join(path, config_file)\n shutil.copy(src, dst)\n return True\n\n @classmethod\n def create_profile_dir(cls, profile_dir, config=None):\n \"\"\"Create a new profile directory given a full path.\n\n Parameters\n ----------\n profile_dir : str\n The full path to the profile directory. If it does exist, it will\n be used. If not, it will be created.\n \"\"\"\n return cls(location=profile_dir, config=config)\n\n @classmethod\n def create_profile_dir_by_name(cls, path, name=u'default', config=None):\n \"\"\"Create a profile dir by profile name and path.\n\n Parameters\n ----------\n path : unicode\n The path (directory) to put the profile directory in.\n name : unicode\n The name of the profile. The name of the profile directory will\n be \"profile_<profile>\".\n \"\"\"\n if not os.path.isdir(path):\n raise ProfileDirError('Directory not found: %s' % path)\n profile_dir = os.path.join(path, u'profile_' + name)\n return cls(location=profile_dir, config=config)\n\n @classmethod\n def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):\n \"\"\"Find an existing profile dir by profile name, return its ProfileDir.\n\n This searches through a sequence of paths for a profile dir. If it\n is not found, a :class:`ProfileDirError` exception will be raised.\n\n The search path algorithm is:\n 1. ``os.getcwdu()``\n 2. ``ipython_dir``\n\n Parameters\n ----------\n ipython_dir : unicode or str\n The IPython directory to use.\n name : unicode or str\n The name of the profile. The name of the profile directory\n will be \"profile_<profile>\".\n \"\"\"\n dirname = u'profile_' + name\n paths = [os.getcwdu(), ipython_dir]\n for p in paths:\n profile_dir = os.path.join(p, dirname)\n if os.path.isdir(profile_dir):\n return cls(location=profile_dir, config=config)\n else:\n raise ProfileDirError('Profile directory not found in paths: %s' % dirname)\n\n @classmethod\n def find_profile_dir(cls, profile_dir, config=None):\n \"\"\"Find/create a profile dir and return its ProfileDir.\n\n This will create the profile directory if it doesn't exist.\n\n Parameters\n ----------\n profile_dir : unicode or str\n The path of the profile directory. This is expanded using\n :func:`IPython.utils.genutils.expand_path`.\n \"\"\"\n profile_dir = expand_path(profile_dir)\n if not os.path.isdir(profile_dir):\n raise ProfileDirError('Profile directory not found: %s' % profile_dir)\n return cls(location=profile_dir, config=config)\n\n\n", "path": "IPython/core/profiledir.py"}]} | 2,643 | 782 |
gh_patches_debug_4389 | rasdani/github-patches | git_diff | aio-libs__aiohttp-3434 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Design good distribution extras support
Follow-up https://github.com/aio-libs/aiohttp/pull/2312#issuecomment-339335603: this issue is intended to encourage discussion about adding extras support.
There's several optional dependencies mentioned in docs, and they instruct users to do `pip install {{dep_name}}`.
Common practice is to add feature-flags allowing to depend on certain additional ability of the framework w/o having to install those dependencies manually.
For example, aiohttp could have `speedups` extra and then user's could've just `pip install aiohttp[speedups]` instead of `pip install aiodns aiohttp cchardet`.
Also, it is possible to use environment markers as extra names, in this case certain dependencies get installed automatically if the environment matches specific expression, which might be useful for windows-only crutches etc.
Please consider treating extras as a **public API**, because they are exposed to end-users.
/cc: @asvetlov
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import codecs
2 import pathlib
3 import re
4 import sys
5 from distutils.command.build_ext import build_ext
6 from distutils.errors import (CCompilerError, DistutilsExecError,
7 DistutilsPlatformError)
8
9 from setuptools import Extension, setup
10
11
12 if sys.version_info < (3, 5, 3):
13 raise RuntimeError("aiohttp 3.x requires Python 3.5.3+")
14
15 here = pathlib.Path(__file__).parent
16
17 try:
18 from Cython.Build import cythonize
19 USE_CYTHON = True
20 except ImportError:
21 USE_CYTHON = False
22
23 if (here / '.git').exists() and not USE_CYTHON:
24 print("Install cython when building from git clone", file=sys.stderr)
25 print("Hint:", file=sys.stderr)
26 print(" pip install cython", file=sys.stderr)
27 sys.exit(1)
28
29
30 if (here / '.git').exists() and not (here / 'vendor/http-parser/README.md'):
31 print("Install submodules when building from git clone", file=sys.stderr)
32 print("Hint:", file=sys.stderr)
33 print(" git submodule update --init", file=sys.stderr)
34 sys.exit(2)
35
36
37 ext = '.pyx' if USE_CYTHON else '.c'
38
39
40 extensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket' + ext]),
41 Extension('aiohttp._http_parser',
42 ['aiohttp/_http_parser' + ext,
43 'vendor/http-parser/http_parser.c',
44 'aiohttp/_find_header.c'],
45 define_macros=[('HTTP_PARSER_STRICT', 0)],
46 ),
47 Extension('aiohttp._frozenlist',
48 ['aiohttp/_frozenlist' + ext]),
49 Extension('aiohttp._helpers',
50 ['aiohttp/_helpers' + ext]),
51 Extension('aiohttp._http_writer',
52 ['aiohttp/_http_writer' + ext])]
53
54
55 if USE_CYTHON:
56 extensions = cythonize(extensions)
57
58
59 class BuildFailed(Exception):
60 pass
61
62
63 class ve_build_ext(build_ext):
64 # This class allows C extension building to fail.
65
66 def run(self):
67 try:
68 build_ext.run(self)
69 except (DistutilsPlatformError, FileNotFoundError):
70 raise BuildFailed()
71
72 def build_extension(self, ext):
73 try:
74 build_ext.build_extension(self, ext)
75 except (CCompilerError, DistutilsExecError,
76 DistutilsPlatformError, ValueError):
77 raise BuildFailed()
78
79
80
81 txt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')
82 try:
83 version = re.findall(r"^__version__ = '([^']+)'\r?$",
84 txt, re.M)[0]
85 except IndexError:
86 raise RuntimeError('Unable to determine version.')
87
88 install_requires = [
89 'attrs>=17.3.0',
90 'chardet>=2.0,<4.0',
91 'multidict>=4.0,<5.0',
92 'async_timeout>=3.0,<4.0',
93 'yarl>=1.0,<2.0',
94 'idna-ssl>=1.0; python_version<"3.7"',
95 'typing_extensions>=3.6.5; python_version<"3.7"',
96 ]
97
98
99 def read(f):
100 return (here / f).read_text('utf-8').strip()
101
102
103 NEEDS_PYTEST = {'pytest', 'test'}.intersection(sys.argv)
104 pytest_runner = ['pytest-runner'] if NEEDS_PYTEST else []
105
106 tests_require = [
107 'pytest', 'gunicorn',
108 'pytest-timeout', 'async-generator',
109 'pytest-xdist',
110 ]
111
112
113 args = dict(
114 name='aiohttp',
115 version=version,
116 description='Async http client/server framework (asyncio)',
117 long_description='\n\n'.join((read('README.rst'), read('CHANGES.rst'))),
118 classifiers=[
119 'License :: OSI Approved :: Apache Software License',
120 'Intended Audience :: Developers',
121 'Programming Language :: Python',
122 'Programming Language :: Python :: 3',
123 'Programming Language :: Python :: 3.5',
124 'Programming Language :: Python :: 3.6',
125 'Programming Language :: Python :: 3.7',
126 'Development Status :: 5 - Production/Stable',
127 'Operating System :: POSIX',
128 'Operating System :: MacOS :: MacOS X',
129 'Operating System :: Microsoft :: Windows',
130 'Topic :: Internet :: WWW/HTTP',
131 'Framework :: AsyncIO',
132 ],
133 author='Nikolay Kim',
134 author_email='[email protected]',
135 maintainer=', '.join(('Nikolay Kim <[email protected]>',
136 'Andrew Svetlov <[email protected]>')),
137 maintainer_email='[email protected]',
138 url='https://github.com/aio-libs/aiohttp',
139 project_urls={
140 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',
141 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',
142 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',
143 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',
144 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',
145 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',
146 'Docs: RTD': 'https://docs.aiohttp.org',
147 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',
148 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',
149 },
150 license='Apache 2',
151 packages=['aiohttp'],
152 python_requires='>=3.5.3',
153 install_requires=install_requires,
154 tests_require=tests_require,
155 setup_requires=pytest_runner,
156 include_package_data=True,
157 ext_modules=extensions,
158 cmdclass=dict(build_ext=ve_build_ext),
159 )
160
161 try:
162 setup(**args)
163 except BuildFailed:
164 print("************************************************************")
165 print("Cannot compile C accelerator module, use pure python version")
166 print("************************************************************")
167 del args['ext_modules']
168 del args['cmdclass']
169 setup(**args)
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -151,6 +151,13 @@
packages=['aiohttp'],
python_requires='>=3.5.3',
install_requires=install_requires,
+ extras_require={
+ 'speedups': [
+ 'aiodns',
+ 'brotlipy',
+ 'cchardet',
+ ],
+ },
tests_require=tests_require,
setup_requires=pytest_runner,
include_package_data=True,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -151,6 +151,13 @@\n packages=['aiohttp'],\n python_requires='>=3.5.3',\n install_requires=install_requires,\n+ extras_require={\n+ 'speedups': [\n+ 'aiodns',\n+ 'brotlipy',\n+ 'cchardet',\n+ ],\n+ },\n tests_require=tests_require,\n setup_requires=pytest_runner,\n include_package_data=True,\n", "issue": "Design good distribution extras support\nFollow-up https://github.com/aio-libs/aiohttp/pull/2312#issuecomment-339335603: this issue is intended to encourage discussion about adding extras support.\r\n\r\nThere's several optional dependencies mentioned in docs, and they instruct users to do `pip install {{dep_name}}`.\r\n\r\nCommon practice is to add feature-flags allowing to depend on certain additional ability of the framework w/o having to install those dependencies manually.\r\n\r\nFor example, aiohttp could have `speedups` extra and then user's could've just `pip install aiohttp[speedups]` instead of `pip install aiodns aiohttp cchardet`.\r\n\r\nAlso, it is possible to use environment markers as extra names, in this case certain dependencies get installed automatically if the environment matches specific expression, which might be useful for windows-only crutches etc.\r\n\r\nPlease consider treating extras as a **public API**, because they are exposed to end-users.\r\n\r\n/cc: @asvetlov \n", "before_files": [{"content": "import codecs\nimport pathlib\nimport re\nimport sys\nfrom distutils.command.build_ext import build_ext\nfrom distutils.errors import (CCompilerError, DistutilsExecError,\n DistutilsPlatformError)\n\nfrom setuptools import Extension, setup\n\n\nif sys.version_info < (3, 5, 3):\n raise RuntimeError(\"aiohttp 3.x requires Python 3.5.3+\")\n\nhere = pathlib.Path(__file__).parent\n\ntry:\n from Cython.Build import cythonize\n USE_CYTHON = True\nexcept ImportError:\n USE_CYTHON = False\n\nif (here / '.git').exists() and not USE_CYTHON:\n print(\"Install cython when building from git clone\", file=sys.stderr)\n print(\"Hint:\", file=sys.stderr)\n print(\" pip install cython\", file=sys.stderr)\n sys.exit(1)\n\n\nif (here / '.git').exists() and not (here / 'vendor/http-parser/README.md'):\n print(\"Install submodules when building from git clone\", file=sys.stderr)\n print(\"Hint:\", file=sys.stderr)\n print(\" git submodule update --init\", file=sys.stderr)\n sys.exit(2)\n\n\next = '.pyx' if USE_CYTHON else '.c'\n\n\nextensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket' + ext]),\n Extension('aiohttp._http_parser',\n ['aiohttp/_http_parser' + ext,\n 'vendor/http-parser/http_parser.c',\n 'aiohttp/_find_header.c'],\n define_macros=[('HTTP_PARSER_STRICT', 0)],\n ),\n Extension('aiohttp._frozenlist',\n ['aiohttp/_frozenlist' + ext]),\n Extension('aiohttp._helpers',\n ['aiohttp/_helpers' + ext]),\n Extension('aiohttp._http_writer',\n ['aiohttp/_http_writer' + ext])]\n\n\nif USE_CYTHON:\n extensions = cythonize(extensions)\n\n\nclass BuildFailed(Exception):\n pass\n\n\nclass ve_build_ext(build_ext):\n # This class allows C extension building to fail.\n\n def run(self):\n try:\n build_ext.run(self)\n except (DistutilsPlatformError, FileNotFoundError):\n raise BuildFailed()\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except (CCompilerError, DistutilsExecError,\n DistutilsPlatformError, ValueError):\n raise BuildFailed()\n\n\n\ntxt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')\ntry:\n version = re.findall(r\"^__version__ = '([^']+)'\\r?$\",\n txt, re.M)[0]\nexcept IndexError:\n raise RuntimeError('Unable to determine version.')\n\ninstall_requires = [\n 'attrs>=17.3.0',\n 'chardet>=2.0,<4.0',\n 'multidict>=4.0,<5.0',\n 'async_timeout>=3.0,<4.0',\n 'yarl>=1.0,<2.0',\n 'idna-ssl>=1.0; python_version<\"3.7\"',\n 'typing_extensions>=3.6.5; python_version<\"3.7\"',\n]\n\n\ndef read(f):\n return (here / f).read_text('utf-8').strip()\n\n\nNEEDS_PYTEST = {'pytest', 'test'}.intersection(sys.argv)\npytest_runner = ['pytest-runner'] if NEEDS_PYTEST else []\n\ntests_require = [\n 'pytest', 'gunicorn',\n 'pytest-timeout', 'async-generator',\n 'pytest-xdist',\n]\n\n\nargs = dict(\n name='aiohttp',\n version=version,\n description='Async http client/server framework (asyncio)',\n long_description='\\n\\n'.join((read('README.rst'), read('CHANGES.rst'))),\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Topic :: Internet :: WWW/HTTP',\n 'Framework :: AsyncIO',\n ],\n author='Nikolay Kim',\n author_email='[email protected]',\n maintainer=', '.join(('Nikolay Kim <[email protected]>',\n 'Andrew Svetlov <[email protected]>')),\n maintainer_email='[email protected]',\n url='https://github.com/aio-libs/aiohttp',\n project_urls={\n 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',\n 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',\n 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',\n 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',\n 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',\n 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',\n 'Docs: RTD': 'https://docs.aiohttp.org',\n 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',\n 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',\n },\n license='Apache 2',\n packages=['aiohttp'],\n python_requires='>=3.5.3',\n install_requires=install_requires,\n tests_require=tests_require,\n setup_requires=pytest_runner,\n include_package_data=True,\n ext_modules=extensions,\n cmdclass=dict(build_ext=ve_build_ext),\n)\n\ntry:\n setup(**args)\nexcept BuildFailed:\n print(\"************************************************************\")\n print(\"Cannot compile C accelerator module, use pure python version\")\n print(\"************************************************************\")\n del args['ext_modules']\n del args['cmdclass']\n setup(**args)\n", "path": "setup.py"}], "after_files": [{"content": "import codecs\nimport pathlib\nimport re\nimport sys\nfrom distutils.command.build_ext import build_ext\nfrom distutils.errors import (CCompilerError, DistutilsExecError,\n DistutilsPlatformError)\n\nfrom setuptools import Extension, setup\n\n\nif sys.version_info < (3, 5, 3):\n raise RuntimeError(\"aiohttp 3.x requires Python 3.5.3+\")\n\nhere = pathlib.Path(__file__).parent\n\ntry:\n from Cython.Build import cythonize\n USE_CYTHON = True\nexcept ImportError:\n USE_CYTHON = False\n\nif (here / '.git').exists() and not USE_CYTHON:\n print(\"Install cython when building from git clone\", file=sys.stderr)\n print(\"Hint:\", file=sys.stderr)\n print(\" pip install cython\", file=sys.stderr)\n sys.exit(1)\n\n\nif (here / '.git').exists() and not (here / 'vendor/http-parser/README.md'):\n print(\"Install submodules when building from git clone\", file=sys.stderr)\n print(\"Hint:\", file=sys.stderr)\n print(\" git submodule update --init\", file=sys.stderr)\n sys.exit(2)\n\n\next = '.pyx' if USE_CYTHON else '.c'\n\n\nextensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket' + ext]),\n Extension('aiohttp._http_parser',\n ['aiohttp/_http_parser' + ext,\n 'vendor/http-parser/http_parser.c',\n 'aiohttp/_find_header.c'],\n define_macros=[('HTTP_PARSER_STRICT', 0)],\n ),\n Extension('aiohttp._frozenlist',\n ['aiohttp/_frozenlist' + ext]),\n Extension('aiohttp._helpers',\n ['aiohttp/_helpers' + ext]),\n Extension('aiohttp._http_writer',\n ['aiohttp/_http_writer' + ext])]\n\n\nif USE_CYTHON:\n extensions = cythonize(extensions)\n\n\nclass BuildFailed(Exception):\n pass\n\n\nclass ve_build_ext(build_ext):\n # This class allows C extension building to fail.\n\n def run(self):\n try:\n build_ext.run(self)\n except (DistutilsPlatformError, FileNotFoundError):\n raise BuildFailed()\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except (CCompilerError, DistutilsExecError,\n DistutilsPlatformError, ValueError):\n raise BuildFailed()\n\n\n\ntxt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')\ntry:\n version = re.findall(r\"^__version__ = '([^']+)'\\r?$\",\n txt, re.M)[0]\nexcept IndexError:\n raise RuntimeError('Unable to determine version.')\n\ninstall_requires = [\n 'attrs>=17.3.0',\n 'chardet>=2.0,<4.0',\n 'multidict>=4.0,<5.0',\n 'async_timeout>=3.0,<4.0',\n 'yarl>=1.0,<2.0',\n 'idna-ssl>=1.0; python_version<\"3.7\"',\n 'typing_extensions>=3.6.5; python_version<\"3.7\"',\n]\n\n\ndef read(f):\n return (here / f).read_text('utf-8').strip()\n\n\nNEEDS_PYTEST = {'pytest', 'test'}.intersection(sys.argv)\npytest_runner = ['pytest-runner'] if NEEDS_PYTEST else []\n\ntests_require = [\n 'pytest', 'gunicorn',\n 'pytest-timeout', 'async-generator',\n 'pytest-xdist',\n]\n\n\nargs = dict(\n name='aiohttp',\n version=version,\n description='Async http client/server framework (asyncio)',\n long_description='\\n\\n'.join((read('README.rst'), read('CHANGES.rst'))),\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Topic :: Internet :: WWW/HTTP',\n 'Framework :: AsyncIO',\n ],\n author='Nikolay Kim',\n author_email='[email protected]',\n maintainer=', '.join(('Nikolay Kim <[email protected]>',\n 'Andrew Svetlov <[email protected]>')),\n maintainer_email='[email protected]',\n url='https://github.com/aio-libs/aiohttp',\n project_urls={\n 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',\n 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',\n 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',\n 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',\n 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',\n 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',\n 'Docs: RTD': 'https://docs.aiohttp.org',\n 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',\n 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',\n },\n license='Apache 2',\n packages=['aiohttp'],\n python_requires='>=3.5.3',\n install_requires=install_requires,\n extras_require={\n 'speedups': [\n 'aiodns',\n 'brotlipy',\n 'cchardet',\n ],\n },\n tests_require=tests_require,\n setup_requires=pytest_runner,\n include_package_data=True,\n ext_modules=extensions,\n cmdclass=dict(build_ext=ve_build_ext),\n)\n\ntry:\n setup(**args)\nexcept BuildFailed:\n print(\"************************************************************\")\n print(\"Cannot compile C accelerator module, use pure python version\")\n print(\"************************************************************\")\n del args['ext_modules']\n del args['cmdclass']\n setup(**args)\n", "path": "setup.py"}]} | 2,262 | 121 |
gh_patches_debug_17048 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10073 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Concatenate/consolidate all algorithms with different implementations
### Feature description
There are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynamic_programming/longest_sub_array.py`
Content:
```
1 """
2 Author : Yvonne
3
4 This is a pure Python implementation of Dynamic Programming solution to the
5 longest_sub_array problem.
6
7 The problem is :
8 Given an array, to find the longest and continuous sub array and get the max sum of the
9 sub array in the given array.
10 """
11
12
13 class SubArray:
14 def __init__(self, arr):
15 # we need a list not a string, so do something to change the type
16 self.array = arr.split(",")
17
18 def solve_sub_array(self):
19 rear = [int(self.array[0])] * len(self.array)
20 sum_value = [int(self.array[0])] * len(self.array)
21 for i in range(1, len(self.array)):
22 sum_value[i] = max(
23 int(self.array[i]) + sum_value[i - 1], int(self.array[i])
24 )
25 rear[i] = max(sum_value[i], rear[i - 1])
26 return rear[len(self.array) - 1]
27
28
29 if __name__ == "__main__":
30 whole_array = input("please input some numbers:")
31 array = SubArray(whole_array)
32 re = array.solve_sub_array()
33 print(("the results is:", re))
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dynamic_programming/longest_sub_array.py b/dynamic_programming/longest_sub_array.py
deleted file mode 100644
--- a/dynamic_programming/longest_sub_array.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
-Author : Yvonne
-
-This is a pure Python implementation of Dynamic Programming solution to the
- longest_sub_array problem.
-
-The problem is :
-Given an array, to find the longest and continuous sub array and get the max sum of the
- sub array in the given array.
-"""
-
-
-class SubArray:
- def __init__(self, arr):
- # we need a list not a string, so do something to change the type
- self.array = arr.split(",")
-
- def solve_sub_array(self):
- rear = [int(self.array[0])] * len(self.array)
- sum_value = [int(self.array[0])] * len(self.array)
- for i in range(1, len(self.array)):
- sum_value[i] = max(
- int(self.array[i]) + sum_value[i - 1], int(self.array[i])
- )
- rear[i] = max(sum_value[i], rear[i - 1])
- return rear[len(self.array) - 1]
-
-
-if __name__ == "__main__":
- whole_array = input("please input some numbers:")
- array = SubArray(whole_array)
- re = array.solve_sub_array()
- print(("the results is:", re))
| {"golden_diff": "diff --git a/dynamic_programming/longest_sub_array.py b/dynamic_programming/longest_sub_array.py\ndeleted file mode 100644\n--- a/dynamic_programming/longest_sub_array.py\n+++ /dev/null\n@@ -1,33 +0,0 @@\n-\"\"\"\n-Author : Yvonne\n-\n-This is a pure Python implementation of Dynamic Programming solution to the\n- longest_sub_array problem.\n-\n-The problem is :\n-Given an array, to find the longest and continuous sub array and get the max sum of the\n- sub array in the given array.\n-\"\"\"\n-\n-\n-class SubArray:\n- def __init__(self, arr):\n- # we need a list not a string, so do something to change the type\n- self.array = arr.split(\",\")\n-\n- def solve_sub_array(self):\n- rear = [int(self.array[0])] * len(self.array)\n- sum_value = [int(self.array[0])] * len(self.array)\n- for i in range(1, len(self.array)):\n- sum_value[i] = max(\n- int(self.array[i]) + sum_value[i - 1], int(self.array[i])\n- )\n- rear[i] = max(sum_value[i], rear[i - 1])\n- return rear[len(self.array) - 1]\n-\n-\n-if __name__ == \"__main__\":\n- whole_array = input(\"please input some numbers:\")\n- array = SubArray(whole_array)\n- re = array.solve_sub_array()\n- print((\"the results is:\", re))\n", "issue": "Concatenate/consolidate all algorithms with different implementations\n### Feature description\n\nThere are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file\n", "before_files": [{"content": "\"\"\"\nAuthor : Yvonne\n\nThis is a pure Python implementation of Dynamic Programming solution to the\n longest_sub_array problem.\n\nThe problem is :\nGiven an array, to find the longest and continuous sub array and get the max sum of the\n sub array in the given array.\n\"\"\"\n\n\nclass SubArray:\n def __init__(self, arr):\n # we need a list not a string, so do something to change the type\n self.array = arr.split(\",\")\n\n def solve_sub_array(self):\n rear = [int(self.array[0])] * len(self.array)\n sum_value = [int(self.array[0])] * len(self.array)\n for i in range(1, len(self.array)):\n sum_value[i] = max(\n int(self.array[i]) + sum_value[i - 1], int(self.array[i])\n )\n rear[i] = max(sum_value[i], rear[i - 1])\n return rear[len(self.array) - 1]\n\n\nif __name__ == \"__main__\":\n whole_array = input(\"please input some numbers:\")\n array = SubArray(whole_array)\n re = array.solve_sub_array()\n print((\"the results is:\", re))\n", "path": "dynamic_programming/longest_sub_array.py"}], "after_files": [{"content": null, "path": "dynamic_programming/longest_sub_array.py"}]} | 622 | 346 |
gh_patches_debug_33172 | rasdani/github-patches | git_diff | pytorch__text-58 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include Moses Tokenizer
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchtext/data/example.py`
Content:
```
1 import csv
2 import json
3
4
5 class Example(object):
6 """Defines a single training or test example.
7
8 Stores each column of the example as an attribute.
9 """
10
11 @classmethod
12 def fromJSON(cls, data, fields):
13 return cls.fromdict(json.loads(data), fields)
14
15 @classmethod
16 def fromdict(cls, data, fields):
17 ex = cls()
18 for key, vals in fields.items():
19 if key in data and vals is not None:
20 if not isinstance(vals, list):
21 vals = [vals]
22 for val in vals:
23 name, field = val
24 setattr(ex, name, field.preprocess(data[key]))
25 return ex
26
27 @classmethod
28 def fromTSV(cls, data, fields):
29 if data[-1] == '\n':
30 data = data[:-1]
31 return cls.fromlist(data.split('\t'), fields)
32
33 @classmethod
34 def fromCSV(cls, data, fields):
35 if data[-1] == '\n':
36 data = data[:-1]
37 return cls.fromlist(list(csv.reader([data]))[0], fields)
38
39 @classmethod
40 def fromlist(cls, data, fields):
41 ex = cls()
42 for (name, field), val in zip(fields, data):
43 if field is not None:
44 setattr(ex, name, field.preprocess(val))
45 return ex
46
47 @classmethod
48 def fromtree(cls, data, fields, subtrees=False):
49 try:
50 from nltk.tree import Tree
51 except ImportError:
52 print('''Please install NLTK:
53 $ pip install nltk''')
54 raise
55 tree = Tree.fromstring(data)
56 if subtrees:
57 return [cls.fromlist(
58 [t.leaves(), t.label()], fields) for t in tree.subtrees()]
59 return cls.fromlist([tree.leaves(), tree.label()], fields)
60
```
Path: `torchtext/data/utils.py`
Content:
```
1 def get_tokenizer(tokenizer):
2 if callable(tokenizer):
3 return tokenizer
4 if tokenizer == 'spacy':
5 try:
6 import spacy
7 spacy_en = spacy.load('en')
8 return lambda s: [tok.text for tok in spacy_en.tokenizer(s)]
9 except ImportError:
10 print("Please install SpaCy and the SpaCy English tokenizer. "
11 "See the docs at https://spacy.io for more information.")
12 raise
13 except AttributeError:
14 print("Please install SpaCy and the SpaCy English tokenizer. "
15 "See the docs at https://spacy.io for more information.")
16 raise
17 raise ValueError("Requested tokenizer {}, valid choices are a "
18 "callable that takes a single string as input "
19 "and \"spacy\" for the SpaCy English "
20 "tokenizer.".format(tokenizer))
21
22
23 def interleave_keys(a, b):
24 """Interleave bits from two sort keys to form a joint sort key.
25
26 Examples that are similar in both of the provided keys will have similar
27 values for the key defined by this function. Useful for tasks with two
28 text fields like machine translation or natural language inference.
29 """
30 def interleave(args):
31 return ''.join([x for t in zip(*args) for x in t])
32 return int(''.join(interleave(format(x, '016b') for x in (a, b))), base=2)
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchtext/data/example.py b/torchtext/data/example.py
--- a/torchtext/data/example.py
+++ b/torchtext/data/example.py
@@ -49,8 +49,8 @@
try:
from nltk.tree import Tree
except ImportError:
- print('''Please install NLTK:
- $ pip install nltk''')
+ print("Please install NLTK. "
+ "See the docs at http://nltk.org for more information.")
raise
tree = Tree.fromstring(data)
if subtrees:
diff --git a/torchtext/data/utils.py b/torchtext/data/utils.py
--- a/torchtext/data/utils.py
+++ b/torchtext/data/utils.py
@@ -1,7 +1,7 @@
def get_tokenizer(tokenizer):
if callable(tokenizer):
return tokenizer
- if tokenizer == 'spacy':
+ if tokenizer == "spacy":
try:
import spacy
spacy_en = spacy.load('en')
@@ -14,10 +14,24 @@
print("Please install SpaCy and the SpaCy English tokenizer. "
"See the docs at https://spacy.io for more information.")
raise
+ elif tokenizer == "moses":
+ try:
+ from nltk.tokenize.moses import MosesTokenizer
+ moses_tokenizer = MosesTokenizer()
+ return moses_tokenizer.tokenize
+ except ImportError:
+ print("Please install NLTK. "
+ "See the docs at http://nltk.org for more information.")
+ raise
+ except LookupError:
+ print("Please install the necessary NLTK corpora. "
+ "See the docs at http://nltk.org for more information.")
+ raise
raise ValueError("Requested tokenizer {}, valid choices are a "
- "callable that takes a single string as input "
- "and \"spacy\" for the SpaCy English "
- "tokenizer.".format(tokenizer))
+ "callable that takes a single string as input, "
+ "\"spacy\" for the SpaCy English tokenizer, or "
+ "\"moses\" for the NLTK port of the Moses tokenization "
+ "script.".format(tokenizer))
def interleave_keys(a, b):
| {"golden_diff": "diff --git a/torchtext/data/example.py b/torchtext/data/example.py\n--- a/torchtext/data/example.py\n+++ b/torchtext/data/example.py\n@@ -49,8 +49,8 @@\n try:\n from nltk.tree import Tree\n except ImportError:\n- print('''Please install NLTK:\n- $ pip install nltk''')\n+ print(\"Please install NLTK. \"\n+ \"See the docs at http://nltk.org for more information.\")\n raise\n tree = Tree.fromstring(data)\n if subtrees:\ndiff --git a/torchtext/data/utils.py b/torchtext/data/utils.py\n--- a/torchtext/data/utils.py\n+++ b/torchtext/data/utils.py\n@@ -1,7 +1,7 @@\n def get_tokenizer(tokenizer):\n if callable(tokenizer):\n return tokenizer\n- if tokenizer == 'spacy':\n+ if tokenizer == \"spacy\":\n try:\n import spacy\n spacy_en = spacy.load('en')\n@@ -14,10 +14,24 @@\n print(\"Please install SpaCy and the SpaCy English tokenizer. \"\n \"See the docs at https://spacy.io for more information.\")\n raise\n+ elif tokenizer == \"moses\":\n+ try:\n+ from nltk.tokenize.moses import MosesTokenizer\n+ moses_tokenizer = MosesTokenizer()\n+ return moses_tokenizer.tokenize\n+ except ImportError:\n+ print(\"Please install NLTK. \"\n+ \"See the docs at http://nltk.org for more information.\")\n+ raise\n+ except LookupError:\n+ print(\"Please install the necessary NLTK corpora. \"\n+ \"See the docs at http://nltk.org for more information.\")\n+ raise\n raise ValueError(\"Requested tokenizer {}, valid choices are a \"\n- \"callable that takes a single string as input \"\n- \"and \\\"spacy\\\" for the SpaCy English \"\n- \"tokenizer.\".format(tokenizer))\n+ \"callable that takes a single string as input, \"\n+ \"\\\"spacy\\\" for the SpaCy English tokenizer, or \"\n+ \"\\\"moses\\\" for the NLTK port of the Moses tokenization \"\n+ \"script.\".format(tokenizer))\n \n \n def interleave_keys(a, b):\n", "issue": "Include Moses Tokenizer\n\n", "before_files": [{"content": "import csv\nimport json\n\n\nclass Example(object):\n \"\"\"Defines a single training or test example.\n\n Stores each column of the example as an attribute.\n \"\"\"\n\n @classmethod\n def fromJSON(cls, data, fields):\n return cls.fromdict(json.loads(data), fields)\n\n @classmethod\n def fromdict(cls, data, fields):\n ex = cls()\n for key, vals in fields.items():\n if key in data and vals is not None:\n if not isinstance(vals, list):\n vals = [vals]\n for val in vals:\n name, field = val\n setattr(ex, name, field.preprocess(data[key]))\n return ex\n\n @classmethod\n def fromTSV(cls, data, fields):\n if data[-1] == '\\n':\n data = data[:-1]\n return cls.fromlist(data.split('\\t'), fields)\n\n @classmethod\n def fromCSV(cls, data, fields):\n if data[-1] == '\\n':\n data = data[:-1]\n return cls.fromlist(list(csv.reader([data]))[0], fields)\n\n @classmethod\n def fromlist(cls, data, fields):\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n setattr(ex, name, field.preprocess(val))\n return ex\n\n @classmethod\n def fromtree(cls, data, fields, subtrees=False):\n try:\n from nltk.tree import Tree\n except ImportError:\n print('''Please install NLTK:\n $ pip install nltk''')\n raise\n tree = Tree.fromstring(data)\n if subtrees:\n return [cls.fromlist(\n [t.leaves(), t.label()], fields) for t in tree.subtrees()]\n return cls.fromlist([tree.leaves(), tree.label()], fields)\n", "path": "torchtext/data/example.py"}, {"content": "def get_tokenizer(tokenizer):\n if callable(tokenizer):\n return tokenizer\n if tokenizer == 'spacy':\n try:\n import spacy\n spacy_en = spacy.load('en')\n return lambda s: [tok.text for tok in spacy_en.tokenizer(s)]\n except ImportError:\n print(\"Please install SpaCy and the SpaCy English tokenizer. \"\n \"See the docs at https://spacy.io for more information.\")\n raise\n except AttributeError:\n print(\"Please install SpaCy and the SpaCy English tokenizer. \"\n \"See the docs at https://spacy.io for more information.\")\n raise\n raise ValueError(\"Requested tokenizer {}, valid choices are a \"\n \"callable that takes a single string as input \"\n \"and \\\"spacy\\\" for the SpaCy English \"\n \"tokenizer.\".format(tokenizer))\n\n\ndef interleave_keys(a, b):\n \"\"\"Interleave bits from two sort keys to form a joint sort key.\n\n Examples that are similar in both of the provided keys will have similar\n values for the key defined by this function. Useful for tasks with two\n text fields like machine translation or natural language inference.\n \"\"\"\n def interleave(args):\n return ''.join([x for t in zip(*args) for x in t])\n return int(''.join(interleave(format(x, '016b') for x in (a, b))), base=2)\n", "path": "torchtext/data/utils.py"}], "after_files": [{"content": "import csv\nimport json\n\n\nclass Example(object):\n \"\"\"Defines a single training or test example.\n\n Stores each column of the example as an attribute.\n \"\"\"\n\n @classmethod\n def fromJSON(cls, data, fields):\n return cls.fromdict(json.loads(data), fields)\n\n @classmethod\n def fromdict(cls, data, fields):\n ex = cls()\n for key, vals in fields.items():\n if key in data and vals is not None:\n if not isinstance(vals, list):\n vals = [vals]\n for val in vals:\n name, field = val\n setattr(ex, name, field.preprocess(data[key]))\n return ex\n\n @classmethod\n def fromTSV(cls, data, fields):\n if data[-1] == '\\n':\n data = data[:-1]\n return cls.fromlist(data.split('\\t'), fields)\n\n @classmethod\n def fromCSV(cls, data, fields):\n if data[-1] == '\\n':\n data = data[:-1]\n return cls.fromlist(list(csv.reader([data]))[0], fields)\n\n @classmethod\n def fromlist(cls, data, fields):\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n setattr(ex, name, field.preprocess(val))\n return ex\n\n @classmethod\n def fromtree(cls, data, fields, subtrees=False):\n try:\n from nltk.tree import Tree\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n tree = Tree.fromstring(data)\n if subtrees:\n return [cls.fromlist(\n [t.leaves(), t.label()], fields) for t in tree.subtrees()]\n return cls.fromlist([tree.leaves(), tree.label()], fields)\n", "path": "torchtext/data/example.py"}, {"content": "def get_tokenizer(tokenizer):\n if callable(tokenizer):\n return tokenizer\n if tokenizer == \"spacy\":\n try:\n import spacy\n spacy_en = spacy.load('en')\n return lambda s: [tok.text for tok in spacy_en.tokenizer(s)]\n except ImportError:\n print(\"Please install SpaCy and the SpaCy English tokenizer. \"\n \"See the docs at https://spacy.io for more information.\")\n raise\n except AttributeError:\n print(\"Please install SpaCy and the SpaCy English tokenizer. \"\n \"See the docs at https://spacy.io for more information.\")\n raise\n elif tokenizer == \"moses\":\n try:\n from nltk.tokenize.moses import MosesTokenizer\n moses_tokenizer = MosesTokenizer()\n return moses_tokenizer.tokenize\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n except LookupError:\n print(\"Please install the necessary NLTK corpora. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n raise ValueError(\"Requested tokenizer {}, valid choices are a \"\n \"callable that takes a single string as input, \"\n \"\\\"spacy\\\" for the SpaCy English tokenizer, or \"\n \"\\\"moses\\\" for the NLTK port of the Moses tokenization \"\n \"script.\".format(tokenizer))\n\n\ndef interleave_keys(a, b):\n \"\"\"Interleave bits from two sort keys to form a joint sort key.\n\n Examples that are similar in both of the provided keys will have similar\n values for the key defined by this function. Useful for tasks with two\n text fields like machine translation or natural language inference.\n \"\"\"\n def interleave(args):\n return ''.join([x for t in zip(*args) for x in t])\n return int(''.join(interleave(format(x, '016b') for x in (a, b))), base=2)\n", "path": "torchtext/data/utils.py"}]} | 1,150 | 496 |
gh_patches_debug_14448 | rasdani/github-patches | git_diff | oppia__oppia-9270 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typescript checks don't work on circle CI
<!--
- Thanks for taking the time to report a bug in the Oppia project.
- Before filing a new issue, please do a quick search to check that it hasn't
- already been filed on the [issue tracker](https://github.com/oppia/oppia/issues)._
-->
**Describe the bug**
Typescript checks don't work on circle CI
**To Reproduce**
Steps to reproduce the behavior:
1. See logs on any pull request.
**Observed behavior**
```
Compiling and testing typescript...
/usr/bin/env: ‘node’: No such file or directory
Compilation successful!
```
**Expected behavior**
They should run.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/typescript_checks.py`
Content:
```
1 # Copyright 2019 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS-IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """File for compiling and checking typescript."""
16
17 from __future__ import absolute_import # pylint: disable=import-only-modules
18 from __future__ import unicode_literals # pylint: disable=import-only-modules
19
20 import json
21 import os
22 import shutil
23 import subprocess
24 import sys
25
26 import python_utils
27
28 COMPILED_JS_DIR = os.path.join('local_compiled_js_for_test', '')
29 TSCONFIG_FILEPATH = 'tsconfig.json'
30
31
32 def validate_compiled_js_dir():
33 """Validates that compiled js dir matches out dir in tsconfig."""
34 with python_utils.open_file(TSCONFIG_FILEPATH, 'r') as f:
35 config_data = json.load(f)
36 out_dir = os.path.join(config_data['compilerOptions']['outDir'], '')
37 if out_dir != COMPILED_JS_DIR:
38 raise Exception(
39 'COMPILED_JS_DIR: %s does not match the output directory '
40 'in %s: %s' % (COMPILED_JS_DIR, TSCONFIG_FILEPATH, out_dir))
41
42
43 def compile_and_check_typescript():
44 """Compiles typescript files and checks the compilation errors."""
45 node_path = os.path.join(os.pardir, 'oppia_tools/node-10.18.0')
46 os.environ['PATH'] = '%s/bin:' % node_path + os.environ['PATH']
47
48 validate_compiled_js_dir()
49
50 if os.path.exists(COMPILED_JS_DIR):
51 shutil.rmtree(COMPILED_JS_DIR)
52
53 python_utils.PRINT('Compiling and testing typescript...')
54 cmd = [
55 './node_modules/typescript/bin/tsc', '--project',
56 TSCONFIG_FILEPATH]
57 process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
58 error_messages = []
59 for line in iter(process.stdout.readline, ''):
60 error_messages.append(line)
61 if os.path.exists(COMPILED_JS_DIR):
62 shutil.rmtree(COMPILED_JS_DIR)
63 if error_messages:
64 python_utils.PRINT('Errors found during compilation\n')
65 for message in error_messages:
66 python_utils.PRINT(message)
67 sys.exit(1)
68 else:
69 python_utils.PRINT('Compilation successful!')
70
71
72 # The 'no coverage' pragma is used as this line is un-testable. This is because
73 # it will only be called when typescript_checks.py is used as a script.
74 if __name__ == '__main__': # pragma: no cover
75 compile_and_check_typescript()
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/typescript_checks.py b/scripts/typescript_checks.py
--- a/scripts/typescript_checks.py
+++ b/scripts/typescript_checks.py
@@ -24,6 +24,7 @@
import sys
import python_utils
+from . import common
COMPILED_JS_DIR = os.path.join('local_compiled_js_for_test', '')
TSCONFIG_FILEPATH = 'tsconfig.json'
@@ -42,7 +43,7 @@
def compile_and_check_typescript():
"""Compiles typescript files and checks the compilation errors."""
- node_path = os.path.join(os.pardir, 'oppia_tools/node-10.18.0')
+ node_path = common.NODE_PATH
os.environ['PATH'] = '%s/bin:' % node_path + os.environ['PATH']
validate_compiled_js_dir()
| {"golden_diff": "diff --git a/scripts/typescript_checks.py b/scripts/typescript_checks.py\n--- a/scripts/typescript_checks.py\n+++ b/scripts/typescript_checks.py\n@@ -24,6 +24,7 @@\n import sys\n \n import python_utils\n+from . import common\n \n COMPILED_JS_DIR = os.path.join('local_compiled_js_for_test', '')\n TSCONFIG_FILEPATH = 'tsconfig.json'\n@@ -42,7 +43,7 @@\n \n def compile_and_check_typescript():\n \"\"\"Compiles typescript files and checks the compilation errors.\"\"\"\n- node_path = os.path.join(os.pardir, 'oppia_tools/node-10.18.0')\n+ node_path = common.NODE_PATH\n os.environ['PATH'] = '%s/bin:' % node_path + os.environ['PATH']\n \n validate_compiled_js_dir()\n", "issue": "Typescript checks don't work on circle CI\n<!--\r\n - Thanks for taking the time to report a bug in the Oppia project.\r\n - Before filing a new issue, please do a quick search to check that it hasn't\r\n - already been filed on the [issue tracker](https://github.com/oppia/oppia/issues)._\r\n -->\r\n\r\n**Describe the bug**\r\nTypescript checks don't work on circle CI\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. See logs on any pull request.\r\n\r\n**Observed behavior**\r\n```\r\nCompiling and testing typescript...\r\n/usr/bin/env: \u2018node\u2019: No such file or directory\r\nCompilation successful!\r\n```\r\n\r\n**Expected behavior**\r\nThey should run.\r\n\n", "before_files": [{"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"File for compiling and checking typescript.\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nimport json\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport python_utils\n\nCOMPILED_JS_DIR = os.path.join('local_compiled_js_for_test', '')\nTSCONFIG_FILEPATH = 'tsconfig.json'\n\n\ndef validate_compiled_js_dir():\n \"\"\"Validates that compiled js dir matches out dir in tsconfig.\"\"\"\n with python_utils.open_file(TSCONFIG_FILEPATH, 'r') as f:\n config_data = json.load(f)\n out_dir = os.path.join(config_data['compilerOptions']['outDir'], '')\n if out_dir != COMPILED_JS_DIR:\n raise Exception(\n 'COMPILED_JS_DIR: %s does not match the output directory '\n 'in %s: %s' % (COMPILED_JS_DIR, TSCONFIG_FILEPATH, out_dir))\n\n\ndef compile_and_check_typescript():\n \"\"\"Compiles typescript files and checks the compilation errors.\"\"\"\n node_path = os.path.join(os.pardir, 'oppia_tools/node-10.18.0')\n os.environ['PATH'] = '%s/bin:' % node_path + os.environ['PATH']\n\n validate_compiled_js_dir()\n\n if os.path.exists(COMPILED_JS_DIR):\n shutil.rmtree(COMPILED_JS_DIR)\n\n python_utils.PRINT('Compiling and testing typescript...')\n cmd = [\n './node_modules/typescript/bin/tsc', '--project',\n TSCONFIG_FILEPATH]\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n error_messages = []\n for line in iter(process.stdout.readline, ''):\n error_messages.append(line)\n if os.path.exists(COMPILED_JS_DIR):\n shutil.rmtree(COMPILED_JS_DIR)\n if error_messages:\n python_utils.PRINT('Errors found during compilation\\n')\n for message in error_messages:\n python_utils.PRINT(message)\n sys.exit(1)\n else:\n python_utils.PRINT('Compilation successful!')\n\n\n# The 'no coverage' pragma is used as this line is un-testable. This is because\n# it will only be called when typescript_checks.py is used as a script.\nif __name__ == '__main__': # pragma: no cover\n compile_and_check_typescript()\n", "path": "scripts/typescript_checks.py"}], "after_files": [{"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"File for compiling and checking typescript.\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nimport json\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport python_utils\nfrom . import common\n\nCOMPILED_JS_DIR = os.path.join('local_compiled_js_for_test', '')\nTSCONFIG_FILEPATH = 'tsconfig.json'\n\n\ndef validate_compiled_js_dir():\n \"\"\"Validates that compiled js dir matches out dir in tsconfig.\"\"\"\n with python_utils.open_file(TSCONFIG_FILEPATH, 'r') as f:\n config_data = json.load(f)\n out_dir = os.path.join(config_data['compilerOptions']['outDir'], '')\n if out_dir != COMPILED_JS_DIR:\n raise Exception(\n 'COMPILED_JS_DIR: %s does not match the output directory '\n 'in %s: %s' % (COMPILED_JS_DIR, TSCONFIG_FILEPATH, out_dir))\n\n\ndef compile_and_check_typescript():\n \"\"\"Compiles typescript files and checks the compilation errors.\"\"\"\n node_path = common.NODE_PATH\n os.environ['PATH'] = '%s/bin:' % node_path + os.environ['PATH']\n\n validate_compiled_js_dir()\n\n if os.path.exists(COMPILED_JS_DIR):\n shutil.rmtree(COMPILED_JS_DIR)\n\n python_utils.PRINT('Compiling and testing typescript...')\n cmd = [\n './node_modules/typescript/bin/tsc', '--project',\n TSCONFIG_FILEPATH]\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n error_messages = []\n for line in iter(process.stdout.readline, ''):\n error_messages.append(line)\n if os.path.exists(COMPILED_JS_DIR):\n shutil.rmtree(COMPILED_JS_DIR)\n if error_messages:\n python_utils.PRINT('Errors found during compilation\\n')\n for message in error_messages:\n python_utils.PRINT(message)\n sys.exit(1)\n else:\n python_utils.PRINT('Compilation successful!')\n\n\n# The 'no coverage' pragma is used as this line is un-testable. This is because\n# it will only be called when typescript_checks.py is used as a script.\nif __name__ == '__main__': # pragma: no cover\n compile_and_check_typescript()\n", "path": "scripts/typescript_checks.py"}]} | 1,215 | 184 |
gh_patches_debug_30010 | rasdani/github-patches | git_diff | holoviz__panel-688 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Panel transforms plotly timestamps to numbers
Create a plotly time series curve:
```python
import numpy as np
import pandas as pd
import panel as pn
import plotly.offline as py
import plotly.graph_objs as go
pn.extension("plotly")
py.init_notebook_mode()
index = pd.date_range(start="2019-01-01", end="2019-02-01", freq="1D")
data = np.random.randn(index.size)
traces = [go.Scatter(x=index, y=data)]
```
`py.iplot(traces)` correctly displays it:

`pn.panel(traces)` converts the timestamps to numbers:

Tested with panel 0.5.1 and plotly 3.7.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/pane/plotly.py`
Content:
```
1 """
2 Defines a PlotlyPane which renders a plotly plot using PlotlyPlot
3 bokeh model.
4 """
5 from __future__ import absolute_import, division, unicode_literals
6
7 import sys
8
9 import numpy as np
10
11 from bokeh.models import ColumnDataSource
12 from pyviz_comms import JupyterComm
13 import param
14
15 from .base import PaneBase
16
17
18 class Plotly(PaneBase):
19 """
20 Plotly panes allow rendering plotly Figures and traces.
21
22 For efficiency any array objects found inside a Figure are added
23 to a ColumnDataSource which allows using binary transport to sync
24 the figure on bokeh server and via Comms.
25 """
26
27 config = param.Dict(doc="""config data""")
28 relayout_data = param.Dict(doc="""relayout callback data""")
29 restyle_data = param.List(doc="""restyle callback data""")
30 click_data = param.Dict(doc="""click callback data""")
31 hover_data = param.Dict(doc="""hover callback data""")
32 clickannotation_data = param.Dict(doc="""clickannotation callback data""")
33 selected_data = param.Dict(doc="""selected callback data""")
34 viewport = param.Dict(doc="""current viewport state""")
35 viewport_update_policy = param.Selector(
36 objects=["mouseup", "continuous", "throttle"],
37 default="mouseup",
38 doc="""\
39 Policy by which the viewport parameter is updated during user interactions:
40 - "mouseup": updates are synchronized when mouse button is released after panning
41 - "continuous": updates are synchronized continually while panning
42 - "throttle": updates are synchronized while panning, at intervals determined by the
43 viewport_update_throttle parameter"""
44 )
45 viewport_update_throttle = param.Integer(
46 bounds=(0, None),
47 default=200,
48 doc='''\
49 Time interval in milliseconds at which viewport updates are synchronized when
50 viewport_update_policy is "throttle"'''
51 )
52 _render_count = param.Integer(
53 doc="""Number of renders, increment to trigger re-render""", default=0)
54
55 _updates = True
56
57 priority = 0.8
58
59 @classmethod
60 def applies(cls, obj):
61 return ((isinstance(obj, list) and obj and all(cls.applies(o) for o in obj)) or
62 hasattr(obj, 'to_plotly_json') or (isinstance(obj, dict)
63 and 'data' in obj and 'layout' in obj))
64
65 def __init__(self, object=None, **params):
66 super(Plotly, self).__init__(object, **params)
67 self._figure = None
68 self._update_figure()
69
70 def _to_figure(self, obj):
71 import plotly.graph_objs as go
72 if isinstance(obj, go.Figure):
73 return obj
74 elif isinstance(obj, dict):
75 data, layout = obj['data'], obj['layout']
76 elif isinstance(obj, tuple):
77 data, layout = obj
78 else:
79 data, layout = obj, {}
80 data = data if isinstance(data, list) else [data]
81 return go.Figure(data=data, layout=layout)
82
83 @staticmethod
84 def _get_sources(json):
85 sources = []
86 traces = json.get('data', [])
87 for trace in traces:
88 data = {}
89 Plotly._get_sources_for_trace(trace, data)
90 sources.append(ColumnDataSource(data))
91 return sources
92
93 @staticmethod
94 def _get_sources_for_trace(json, data, parent_path=''):
95 for key, value in list(json.items()):
96 full_path = key if not parent_path else (parent_path + '.' + key)
97 if isinstance(value, np.ndarray):
98 # Extract numpy array
99 data[full_path] = [json.pop(key)]
100 elif isinstance(value, dict):
101 # Recurse into dictionaries:
102 Plotly._get_sources_for_trace(value, data=data, parent_path=full_path)
103 elif isinstance(value, list) and value and isinstance(value[0], dict):
104 # recurse into object arrays:
105 for i, element in enumerate(value):
106 element_path = full_path + '.' + str(i)
107 Plotly._get_sources_for_trace(
108 element, data=data, parent_path=element_path
109 )
110
111 @param.depends('object', watch=True)
112 def _update_figure(self):
113 import plotly.graph_objs as go
114
115 if (self.object is None or
116 type(self.object) is not go.Figure or
117 self.object is self._figure):
118 return
119
120 # Monkey patch the message stubs used by FigureWidget.
121 # We only patch `Figure` objects (not subclasses like FigureWidget) so
122 # we don't interfere with subclasses that override these methods.
123 fig = self.object
124 fig._send_addTraces_msg = lambda *_, **__: self.param.trigger('object')
125 fig._send_moveTraces_msg = lambda *_, **__: self.param.trigger('object')
126 fig._send_deleteTraces_msg = lambda *_, **__: self.param.trigger('object')
127 fig._send_restyle_msg = lambda *_, **__: self.param.trigger('object')
128 fig._send_relayout_msg = lambda *_, **__: self.param.trigger('object')
129 fig._send_update_msg = lambda *_, **__: self.param.trigger('object')
130 fig._send_animate_msg = lambda *_, **__: self.param.trigger('object')
131 self._figure = fig
132
133 def _update_data_sources(self, cds, trace):
134 trace_arrays = {}
135 Plotly._get_sources_for_trace(trace, trace_arrays)
136
137 update_sources = False
138 for key, new_col in trace_arrays.items():
139 new = new_col[0]
140
141 try:
142 old = cds.data.get(key)[0]
143 update_array = (
144 (type(old) != type(new)) or
145 (new.shape != old.shape) or
146 (new != old).any())
147 except:
148 update_array = True
149
150 if update_array:
151 update_sources = True
152 cds.data[key] = [new]
153
154 return update_sources
155
156 def _get_model(self, doc, root=None, parent=None, comm=None):
157 """
158 Should return the bokeh model to be rendered.
159 """
160 if 'panel.models.plotly' not in sys.modules:
161 if isinstance(comm, JupyterComm):
162 self.param.warning('PlotlyPlot was not imported on instantiation '
163 'and may not render in a notebook. Restart '
164 'the notebook kernel and ensure you load '
165 'it as part of the extension using:'
166 '\n\npn.extension(\'plotly\')\n')
167 from ..models.plotly import PlotlyPlot
168 else:
169 PlotlyPlot = getattr(sys.modules['panel.models.plotly'], 'PlotlyPlot')
170
171 if self.object is None:
172 json, sources = {}, []
173 else:
174 fig = self._to_figure(self.object)
175 json = fig.to_plotly_json()
176 sources = Plotly._get_sources(json)
177 model = PlotlyPlot(data=json.get('data', []),
178 layout=json.get('layout', {}),
179 config=self.config,
180 viewport=self.viewport,
181 viewport_update_policy=self.viewport_update_policy,
182 viewport_update_throttle=self.viewport_update_throttle,
183 data_sources=sources,
184 _render_count=self._render_count)
185
186 if root is None:
187 root = model
188
189 self._link_props(
190 model, [
191 'config', 'relayout_data', 'restyle_data', 'click_data', 'hover_data',
192 'clickannotation_data', 'selected_data', 'viewport',
193 'viewport_update_policy', 'viewport_update_throttle', '_render_count'
194 ],
195 doc,
196 root,
197 comm
198 )
199
200 if root is None:
201 root = model
202 self._models[root.ref['id']] = (model, parent)
203 return model
204
205 def _update(self, model):
206 if self.object is None:
207 model.update(data=[], layout={})
208 model._render_count += 1
209 return
210
211 fig = self._to_figure(self.object)
212 json = fig.to_plotly_json()
213
214 traces = json['data']
215 new_sources = []
216 update_sources = False
217 for i, trace in enumerate(traces):
218 if i < len(model.data_sources):
219 cds = model.data_sources[i]
220 else:
221 cds = ColumnDataSource()
222 new_sources.append(cds)
223
224 update_sources = self._update_data_sources(cds, trace) or update_sources
225 try:
226 update_layout = model.layout != json.get('layout')
227 except:
228 update_layout = True
229
230 # Determine if model needs updates
231 if (len(model.data) != len(traces)):
232 update_data = True
233 else:
234 update_data = False
235 for new, old in zip(traces, model.data):
236 try:
237 update_data = (
238 {k: v for k, v in new.items() if k != 'uid'} !=
239 {k: v for k, v in old.items() if k != 'uid'})
240 except:
241 update_data = True
242 if update_data:
243 break
244
245 if new_sources:
246 model.data_sources += new_sources
247
248 if update_data:
249 model.data = json.get('data')
250
251 if update_layout:
252 model.layout = json.get('layout')
253
254 # Check if we should trigger rendering
255 if new_sources or update_sources or update_data or update_layout:
256 model._render_count += 1
257
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/pane/plotly.py b/panel/pane/plotly.py
--- a/panel/pane/plotly.py
+++ b/panel/pane/plotly.py
@@ -13,6 +13,7 @@
import param
from .base import PaneBase
+from ..util import isdatetime
class Plotly(PaneBase):
@@ -153,6 +154,21 @@
return update_sources
+ @staticmethod
+ def _plotly_json_wrapper(fig):
+ """Wraps around to_plotly_json and applies necessary fixes.
+
+ For #382: Map datetime elements to strings.
+ """
+ json = fig.to_plotly_json()
+ data = json['data']
+
+ for idx in range(len(data)):
+ for key in data[idx]:
+ if isdatetime(data[idx][key]):
+ data[idx][key].astype(str)
+ return json
+
def _get_model(self, doc, root=None, parent=None, comm=None):
"""
Should return the bokeh model to be rendered.
@@ -172,7 +188,7 @@
json, sources = {}, []
else:
fig = self._to_figure(self.object)
- json = fig.to_plotly_json()
+ json = self._plotly_json_wrapper(fig)
sources = Plotly._get_sources(json)
model = PlotlyPlot(data=json.get('data', []),
layout=json.get('layout', {}),
@@ -209,7 +225,7 @@
return
fig = self._to_figure(self.object)
- json = fig.to_plotly_json()
+ json = self._plotly_json_wrapper(fig)
traces = json['data']
new_sources = []
| {"golden_diff": "diff --git a/panel/pane/plotly.py b/panel/pane/plotly.py\n--- a/panel/pane/plotly.py\n+++ b/panel/pane/plotly.py\n@@ -13,6 +13,7 @@\n import param\n \n from .base import PaneBase\n+from ..util import isdatetime\n \n \n class Plotly(PaneBase):\n@@ -153,6 +154,21 @@\n \n return update_sources\n \n+ @staticmethod\n+ def _plotly_json_wrapper(fig):\n+ \"\"\"Wraps around to_plotly_json and applies necessary fixes.\n+\n+ For #382: Map datetime elements to strings.\n+ \"\"\"\n+ json = fig.to_plotly_json()\n+ data = json['data']\n+\n+ for idx in range(len(data)):\n+ for key in data[idx]:\n+ if isdatetime(data[idx][key]):\n+ data[idx][key].astype(str)\n+ return json\n+\n def _get_model(self, doc, root=None, parent=None, comm=None):\n \"\"\"\n Should return the bokeh model to be rendered.\n@@ -172,7 +188,7 @@\n json, sources = {}, []\n else:\n fig = self._to_figure(self.object)\n- json = fig.to_plotly_json()\n+ json = self._plotly_json_wrapper(fig)\n sources = Plotly._get_sources(json)\n model = PlotlyPlot(data=json.get('data', []),\n layout=json.get('layout', {}),\n@@ -209,7 +225,7 @@\n return\n \n fig = self._to_figure(self.object)\n- json = fig.to_plotly_json()\n+ json = self._plotly_json_wrapper(fig)\n \n traces = json['data']\n new_sources = []\n", "issue": "Panel transforms plotly timestamps to numbers\nCreate a plotly time series curve:\r\n```python\r\nimport numpy as np\r\nimport pandas as pd\r\nimport panel as pn\r\nimport plotly.offline as py\r\nimport plotly.graph_objs as go\r\n\r\npn.extension(\"plotly\")\r\npy.init_notebook_mode()\r\n\r\nindex = pd.date_range(start=\"2019-01-01\", end=\"2019-02-01\", freq=\"1D\")\r\ndata = np.random.randn(index.size)\r\ntraces = [go.Scatter(x=index, y=data)]\r\n```\r\n`py.iplot(traces)` correctly displays it:\r\n\r\n`pn.panel(traces)` converts the timestamps to numbers:\r\n\r\n\r\nTested with panel 0.5.1 and plotly 3.7.1\n", "before_files": [{"content": "\"\"\"\nDefines a PlotlyPane which renders a plotly plot using PlotlyPlot\nbokeh model.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport sys\n\nimport numpy as np\n\nfrom bokeh.models import ColumnDataSource\nfrom pyviz_comms import JupyterComm\nimport param\n\nfrom .base import PaneBase\n\n\nclass Plotly(PaneBase):\n \"\"\"\n Plotly panes allow rendering plotly Figures and traces.\n\n For efficiency any array objects found inside a Figure are added\n to a ColumnDataSource which allows using binary transport to sync\n the figure on bokeh server and via Comms.\n \"\"\"\n\n config = param.Dict(doc=\"\"\"config data\"\"\")\n relayout_data = param.Dict(doc=\"\"\"relayout callback data\"\"\")\n restyle_data = param.List(doc=\"\"\"restyle callback data\"\"\")\n click_data = param.Dict(doc=\"\"\"click callback data\"\"\")\n hover_data = param.Dict(doc=\"\"\"hover callback data\"\"\")\n clickannotation_data = param.Dict(doc=\"\"\"clickannotation callback data\"\"\")\n selected_data = param.Dict(doc=\"\"\"selected callback data\"\"\")\n viewport = param.Dict(doc=\"\"\"current viewport state\"\"\")\n viewport_update_policy = param.Selector(\n objects=[\"mouseup\", \"continuous\", \"throttle\"],\n default=\"mouseup\",\n doc=\"\"\"\\\nPolicy by which the viewport parameter is updated during user interactions:\n - \"mouseup\": updates are synchronized when mouse button is released after panning\n - \"continuous\": updates are synchronized continually while panning\n - \"throttle\": updates are synchronized while panning, at intervals determined by the\n viewport_update_throttle parameter\"\"\"\n )\n viewport_update_throttle = param.Integer(\n bounds=(0, None),\n default=200,\n doc='''\\\nTime interval in milliseconds at which viewport updates are synchronized when\nviewport_update_policy is \"throttle\"'''\n )\n _render_count = param.Integer(\n doc=\"\"\"Number of renders, increment to trigger re-render\"\"\", default=0)\n\n _updates = True\n\n priority = 0.8\n\n @classmethod\n def applies(cls, obj):\n return ((isinstance(obj, list) and obj and all(cls.applies(o) for o in obj)) or\n hasattr(obj, 'to_plotly_json') or (isinstance(obj, dict)\n and 'data' in obj and 'layout' in obj))\n\n def __init__(self, object=None, **params):\n super(Plotly, self).__init__(object, **params)\n self._figure = None\n self._update_figure()\n\n def _to_figure(self, obj):\n import plotly.graph_objs as go\n if isinstance(obj, go.Figure):\n return obj\n elif isinstance(obj, dict):\n data, layout = obj['data'], obj['layout']\n elif isinstance(obj, tuple):\n data, layout = obj\n else:\n data, layout = obj, {}\n data = data if isinstance(data, list) else [data]\n return go.Figure(data=data, layout=layout)\n\n @staticmethod\n def _get_sources(json):\n sources = []\n traces = json.get('data', [])\n for trace in traces:\n data = {}\n Plotly._get_sources_for_trace(trace, data)\n sources.append(ColumnDataSource(data))\n return sources\n\n @staticmethod\n def _get_sources_for_trace(json, data, parent_path=''):\n for key, value in list(json.items()):\n full_path = key if not parent_path else (parent_path + '.' + key)\n if isinstance(value, np.ndarray):\n # Extract numpy array\n data[full_path] = [json.pop(key)]\n elif isinstance(value, dict):\n # Recurse into dictionaries:\n Plotly._get_sources_for_trace(value, data=data, parent_path=full_path)\n elif isinstance(value, list) and value and isinstance(value[0], dict):\n # recurse into object arrays:\n for i, element in enumerate(value):\n element_path = full_path + '.' + str(i)\n Plotly._get_sources_for_trace(\n element, data=data, parent_path=element_path\n )\n\n @param.depends('object', watch=True)\n def _update_figure(self):\n import plotly.graph_objs as go\n\n if (self.object is None or\n type(self.object) is not go.Figure or\n self.object is self._figure):\n return\n\n # Monkey patch the message stubs used by FigureWidget.\n # We only patch `Figure` objects (not subclasses like FigureWidget) so\n # we don't interfere with subclasses that override these methods.\n fig = self.object\n fig._send_addTraces_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_moveTraces_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_deleteTraces_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_restyle_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_relayout_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_update_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_animate_msg = lambda *_, **__: self.param.trigger('object')\n self._figure = fig\n\n def _update_data_sources(self, cds, trace):\n trace_arrays = {}\n Plotly._get_sources_for_trace(trace, trace_arrays)\n\n update_sources = False\n for key, new_col in trace_arrays.items():\n new = new_col[0]\n\n try:\n old = cds.data.get(key)[0]\n update_array = (\n (type(old) != type(new)) or\n (new.shape != old.shape) or\n (new != old).any())\n except:\n update_array = True\n\n if update_array:\n update_sources = True\n cds.data[key] = [new]\n\n return update_sources\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n \"\"\"\n Should return the bokeh model to be rendered.\n \"\"\"\n if 'panel.models.plotly' not in sys.modules:\n if isinstance(comm, JupyterComm):\n self.param.warning('PlotlyPlot was not imported on instantiation '\n 'and may not render in a notebook. Restart '\n 'the notebook kernel and ensure you load '\n 'it as part of the extension using:'\n '\\n\\npn.extension(\\'plotly\\')\\n')\n from ..models.plotly import PlotlyPlot\n else:\n PlotlyPlot = getattr(sys.modules['panel.models.plotly'], 'PlotlyPlot')\n\n if self.object is None:\n json, sources = {}, []\n else:\n fig = self._to_figure(self.object)\n json = fig.to_plotly_json()\n sources = Plotly._get_sources(json)\n model = PlotlyPlot(data=json.get('data', []),\n layout=json.get('layout', {}),\n config=self.config,\n viewport=self.viewport,\n viewport_update_policy=self.viewport_update_policy,\n viewport_update_throttle=self.viewport_update_throttle,\n data_sources=sources,\n _render_count=self._render_count)\n\n if root is None:\n root = model\n\n self._link_props(\n model, [\n 'config', 'relayout_data', 'restyle_data', 'click_data', 'hover_data',\n 'clickannotation_data', 'selected_data', 'viewport',\n 'viewport_update_policy', 'viewport_update_throttle', '_render_count'\n ],\n doc,\n root,\n comm\n )\n\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _update(self, model):\n if self.object is None:\n model.update(data=[], layout={})\n model._render_count += 1\n return\n\n fig = self._to_figure(self.object)\n json = fig.to_plotly_json()\n\n traces = json['data']\n new_sources = []\n update_sources = False\n for i, trace in enumerate(traces):\n if i < len(model.data_sources):\n cds = model.data_sources[i]\n else:\n cds = ColumnDataSource()\n new_sources.append(cds)\n\n update_sources = self._update_data_sources(cds, trace) or update_sources\n try:\n update_layout = model.layout != json.get('layout')\n except:\n update_layout = True\n\n # Determine if model needs updates\n if (len(model.data) != len(traces)):\n update_data = True\n else:\n update_data = False\n for new, old in zip(traces, model.data):\n try:\n update_data = (\n {k: v for k, v in new.items() if k != 'uid'} !=\n {k: v for k, v in old.items() if k != 'uid'})\n except:\n update_data = True\n if update_data:\n break\n\n if new_sources:\n model.data_sources += new_sources\n\n if update_data:\n model.data = json.get('data')\n\n if update_layout:\n model.layout = json.get('layout')\n\n # Check if we should trigger rendering\n if new_sources or update_sources or update_data or update_layout:\n model._render_count += 1\n", "path": "panel/pane/plotly.py"}], "after_files": [{"content": "\"\"\"\nDefines a PlotlyPane which renders a plotly plot using PlotlyPlot\nbokeh model.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport sys\n\nimport numpy as np\n\nfrom bokeh.models import ColumnDataSource\nfrom pyviz_comms import JupyterComm\nimport param\n\nfrom .base import PaneBase\nfrom ..util import isdatetime\n\n\nclass Plotly(PaneBase):\n \"\"\"\n Plotly panes allow rendering plotly Figures and traces.\n\n For efficiency any array objects found inside a Figure are added\n to a ColumnDataSource which allows using binary transport to sync\n the figure on bokeh server and via Comms.\n \"\"\"\n\n config = param.Dict(doc=\"\"\"config data\"\"\")\n relayout_data = param.Dict(doc=\"\"\"relayout callback data\"\"\")\n restyle_data = param.List(doc=\"\"\"restyle callback data\"\"\")\n click_data = param.Dict(doc=\"\"\"click callback data\"\"\")\n hover_data = param.Dict(doc=\"\"\"hover callback data\"\"\")\n clickannotation_data = param.Dict(doc=\"\"\"clickannotation callback data\"\"\")\n selected_data = param.Dict(doc=\"\"\"selected callback data\"\"\")\n viewport = param.Dict(doc=\"\"\"current viewport state\"\"\")\n viewport_update_policy = param.Selector(\n objects=[\"mouseup\", \"continuous\", \"throttle\"],\n default=\"mouseup\",\n doc=\"\"\"\\\nPolicy by which the viewport parameter is updated during user interactions:\n - \"mouseup\": updates are synchronized when mouse button is released after panning\n - \"continuous\": updates are synchronized continually while panning\n - \"throttle\": updates are synchronized while panning, at intervals determined by the\n viewport_update_throttle parameter\"\"\"\n )\n viewport_update_throttle = param.Integer(\n bounds=(0, None),\n default=200,\n doc='''\\\nTime interval in milliseconds at which viewport updates are synchronized when\nviewport_update_policy is \"throttle\"'''\n )\n _render_count = param.Integer(\n doc=\"\"\"Number of renders, increment to trigger re-render\"\"\", default=0)\n\n _updates = True\n\n priority = 0.8\n\n @classmethod\n def applies(cls, obj):\n return ((isinstance(obj, list) and obj and all(cls.applies(o) for o in obj)) or\n hasattr(obj, 'to_plotly_json') or (isinstance(obj, dict)\n and 'data' in obj and 'layout' in obj))\n\n def __init__(self, object=None, **params):\n super(Plotly, self).__init__(object, **params)\n self._figure = None\n self._update_figure()\n\n def _to_figure(self, obj):\n import plotly.graph_objs as go\n if isinstance(obj, go.Figure):\n return obj\n elif isinstance(obj, dict):\n data, layout = obj['data'], obj['layout']\n elif isinstance(obj, tuple):\n data, layout = obj\n else:\n data, layout = obj, {}\n data = data if isinstance(data, list) else [data]\n return go.Figure(data=data, layout=layout)\n\n @staticmethod\n def _get_sources(json):\n sources = []\n traces = json.get('data', [])\n for trace in traces:\n data = {}\n Plotly._get_sources_for_trace(trace, data)\n sources.append(ColumnDataSource(data))\n return sources\n\n @staticmethod\n def _get_sources_for_trace(json, data, parent_path=''):\n for key, value in list(json.items()):\n full_path = key if not parent_path else (parent_path + '.' + key)\n if isinstance(value, np.ndarray):\n # Extract numpy array\n data[full_path] = [json.pop(key)]\n elif isinstance(value, dict):\n # Recurse into dictionaries:\n Plotly._get_sources_for_trace(value, data=data, parent_path=full_path)\n elif isinstance(value, list) and value and isinstance(value[0], dict):\n # recurse into object arrays:\n for i, element in enumerate(value):\n element_path = full_path + '.' + str(i)\n Plotly._get_sources_for_trace(\n element, data=data, parent_path=element_path\n )\n\n @param.depends('object', watch=True)\n def _update_figure(self):\n import plotly.graph_objs as go\n\n if (self.object is None or\n type(self.object) is not go.Figure or\n self.object is self._figure):\n return\n\n # Monkey patch the message stubs used by FigureWidget.\n # We only patch `Figure` objects (not subclasses like FigureWidget) so\n # we don't interfere with subclasses that override these methods.\n fig = self.object\n fig._send_addTraces_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_moveTraces_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_deleteTraces_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_restyle_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_relayout_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_update_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_animate_msg = lambda *_, **__: self.param.trigger('object')\n self._figure = fig\n\n def _update_data_sources(self, cds, trace):\n trace_arrays = {}\n Plotly._get_sources_for_trace(trace, trace_arrays)\n\n update_sources = False\n for key, new_col in trace_arrays.items():\n new = new_col[0]\n\n try:\n old = cds.data.get(key)[0]\n update_array = (\n (type(old) != type(new)) or\n (new.shape != old.shape) or\n (new != old).any())\n except:\n update_array = True\n\n if update_array:\n update_sources = True\n cds.data[key] = [new]\n\n return update_sources\n\n @staticmethod\n def _plotly_json_wrapper(fig):\n \"\"\"Wraps around to_plotly_json and applies necessary fixes.\n\n For #382: Map datetime elements to strings.\n \"\"\"\n json = fig.to_plotly_json()\n data = json['data']\n\n for idx in range(len(data)):\n for key in data[idx]:\n if isdatetime(data[idx][key]):\n data[idx][key].astype(str)\n return json\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n \"\"\"\n Should return the bokeh model to be rendered.\n \"\"\"\n if 'panel.models.plotly' not in sys.modules:\n if isinstance(comm, JupyterComm):\n self.param.warning('PlotlyPlot was not imported on instantiation '\n 'and may not render in a notebook. Restart '\n 'the notebook kernel and ensure you load '\n 'it as part of the extension using:'\n '\\n\\npn.extension(\\'plotly\\')\\n')\n from ..models.plotly import PlotlyPlot\n else:\n PlotlyPlot = getattr(sys.modules['panel.models.plotly'], 'PlotlyPlot')\n\n if self.object is None:\n json, sources = {}, []\n else:\n fig = self._to_figure(self.object)\n json = self._plotly_json_wrapper(fig)\n sources = Plotly._get_sources(json)\n model = PlotlyPlot(data=json.get('data', []),\n layout=json.get('layout', {}),\n config=self.config,\n viewport=self.viewport,\n viewport_update_policy=self.viewport_update_policy,\n viewport_update_throttle=self.viewport_update_throttle,\n data_sources=sources,\n _render_count=self._render_count)\n\n if root is None:\n root = model\n\n self._link_props(\n model, [\n 'config', 'relayout_data', 'restyle_data', 'click_data', 'hover_data',\n 'clickannotation_data', 'selected_data', 'viewport',\n 'viewport_update_policy', 'viewport_update_throttle', '_render_count'\n ],\n doc,\n root,\n comm\n )\n\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _update(self, model):\n if self.object is None:\n model.update(data=[], layout={})\n model._render_count += 1\n return\n\n fig = self._to_figure(self.object)\n json = self._plotly_json_wrapper(fig)\n\n traces = json['data']\n new_sources = []\n update_sources = False\n for i, trace in enumerate(traces):\n if i < len(model.data_sources):\n cds = model.data_sources[i]\n else:\n cds = ColumnDataSource()\n new_sources.append(cds)\n\n update_sources = self._update_data_sources(cds, trace) or update_sources\n try:\n update_layout = model.layout != json.get('layout')\n except:\n update_layout = True\n\n # Determine if model needs updates\n if (len(model.data) != len(traces)):\n update_data = True\n else:\n update_data = False\n for new, old in zip(traces, model.data):\n try:\n update_data = (\n {k: v for k, v in new.items() if k != 'uid'} !=\n {k: v for k, v in old.items() if k != 'uid'})\n except:\n update_data = True\n if update_data:\n break\n\n if new_sources:\n model.data_sources += new_sources\n\n if update_data:\n model.data = json.get('data')\n\n if update_layout:\n model.layout = json.get('layout')\n\n # Check if we should trigger rendering\n if new_sources or update_sources or update_data or update_layout:\n model._render_count += 1\n", "path": "panel/pane/plotly.py"}]} | 3,224 | 393 |
gh_patches_debug_257 | rasdani/github-patches | git_diff | facebookresearch__habitat-lab-66 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mistake in Agent class' docs
Agent's class docs string states that user has to implement 2 methods: `reset` and `step`. However, If I understood correctly, there's no method named `step` and there is method `act` instead. This is quite tiny issue but still.
https://github.com/facebookresearch/habitat-api/blob/c7443c39c5186e517d8a4b7c87a1b42106e4e77a/habitat/core/agent.py#L10-L17
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `habitat/core/agent.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 from habitat.core.simulator import Observations
8
9
10 class Agent:
11 """Abstract class for defining agents which act inside Env. This abstract
12 class standardizes agents to allow seamless benchmarking. To implement an
13 agent the user has to implement two methods:
14
15 reset
16 step
17 """
18
19 def reset(self) -> None:
20 """Called before starting a new episode in environment.
21 """
22 raise NotImplementedError
23
24 def act(self, observations: Observations) -> int:
25 """
26
27 Args:
28 observations: observations coming in from environment to be used
29 by agent to decide action.
30
31 Returns:
32 action to be taken inside the environment
33 """
34 raise NotImplementedError
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/habitat/core/agent.py b/habitat/core/agent.py
--- a/habitat/core/agent.py
+++ b/habitat/core/agent.py
@@ -13,7 +13,7 @@
agent the user has to implement two methods:
reset
- step
+ act
"""
def reset(self) -> None:
| {"golden_diff": "diff --git a/habitat/core/agent.py b/habitat/core/agent.py\n--- a/habitat/core/agent.py\n+++ b/habitat/core/agent.py\n@@ -13,7 +13,7 @@\n agent the user has to implement two methods:\n \n reset\n- step\n+ act\n \"\"\"\n \n def reset(self) -> None:\n", "issue": "Mistake in Agent class' docs\nAgent's class docs string states that user has to implement 2 methods: `reset` and `step`. However, If I understood correctly, there's no method named `step` and there is method `act` instead. This is quite tiny issue but still.\r\nhttps://github.com/facebookresearch/habitat-api/blob/c7443c39c5186e517d8a4b7c87a1b42106e4e77a/habitat/core/agent.py#L10-L17\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom habitat.core.simulator import Observations\n\n\nclass Agent:\n \"\"\"Abstract class for defining agents which act inside Env. This abstract\n class standardizes agents to allow seamless benchmarking. To implement an\n agent the user has to implement two methods:\n\n reset\n step\n \"\"\"\n\n def reset(self) -> None:\n \"\"\"Called before starting a new episode in environment.\n \"\"\"\n raise NotImplementedError\n\n def act(self, observations: Observations) -> int:\n \"\"\"\n\n Args:\n observations: observations coming in from environment to be used\n by agent to decide action.\n\n Returns:\n action to be taken inside the environment\n \"\"\"\n raise NotImplementedError\n", "path": "habitat/core/agent.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom habitat.core.simulator import Observations\n\n\nclass Agent:\n \"\"\"Abstract class for defining agents which act inside Env. This abstract\n class standardizes agents to allow seamless benchmarking. To implement an\n agent the user has to implement two methods:\n\n reset\n act\n \"\"\"\n\n def reset(self) -> None:\n \"\"\"Called before starting a new episode in environment.\n \"\"\"\n raise NotImplementedError\n\n def act(self, observations: Observations) -> int:\n \"\"\"\n\n Args:\n observations: observations coming in from environment to be used\n by agent to decide action.\n\n Returns:\n action to be taken inside the environment\n \"\"\"\n raise NotImplementedError\n", "path": "habitat/core/agent.py"}]} | 646 | 85 |
gh_patches_debug_26679 | rasdani/github-patches | git_diff | fossasia__open-event-server-2270 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ordering new tickets gives IntegrityError
IntegrityError: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely) (psycopg2.IntegrityError) null value in column "order_id" violates not-null constraint
DETAIL: Failing row contains (null, 1, 4).
[SQL: 'INSERT INTO orders_tickets (ticket_id, quantity) VALUES (%(ticket_id)s, %(quantity)s) RETURNING orders_tickets.order_id'] [parameters: {'ticket_id': 1, 'quantity': 4}]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/helpers/ticketing.py`
Content:
```
1 """Copyright 2016 Niranjan Rajendran"""
2 import binascii
3 import os
4 import uuid
5
6 from datetime import timedelta, datetime
7
8 from flask import url_for
9
10 from flask.ext import login
11
12 from app.helpers.cache import cache
13 from app.helpers.data import save_to_db
14 from app.helpers.helpers import string_empty, send_email_for_after_purchase, get_count
15 from app.models.order import Order
16 from app.models.ticket import Ticket
17 from app.helpers.data_getter import DataGetter
18 from app.helpers.data import DataManager
19
20 from app.helpers.payment import StripePaymentsManager, represents_int, PayPalPaymentsManager
21 from app.models.ticket_holder import TicketHolder
22 from app.models.order import OrderTicket
23 from app.models.event import Event
24 from app.models.user_detail import UserDetail
25 from app.models.discount_code import DiscountCode
26
27 from app.helpers.helpers import send_email_after_account_create_with_password
28
29
30 class TicketingManager(object):
31 """All ticketing and orders related functions"""
32
33 @staticmethod
34 @cache.memoize(50)
35 def get_ticket(ticket_id):
36 return Ticket.query.get(ticket_id)
37
38 @staticmethod
39 def get_orders_of_user(user_id=None, upcoming_events=True):
40 """
41 :return: Return all order objects with the current user
42 """
43 if not user_id:
44 user_id = login.current_user.id
45 query = Order.query.join(Order.event) \
46 .filter(Order.user_id == user_id) \
47 .filter(Order.status == 'completed')
48 if upcoming_events:
49 return query.filter(Event.start_time >= datetime.now())
50 else:
51 return query.filter(Event.end_time < datetime.now())
52
53 @staticmethod
54 def get_orders(event_id=None, status=None, from_date=None, to_date=None):
55 if event_id:
56 if status:
57 orders = Order.query.filter_by(event_id=event_id).filter_by(status=status) \
58 .filter(Order.user_id.isnot(None))
59 else:
60 orders = Order.query.filter_by(event_id=event_id).filter(Order.user_id.isnot(None))
61 else:
62 if status:
63 orders = Order.query.filter_by(status=status).filter(Order.user_id.isnot(None))
64 else:
65 orders = Order.query.filter(Order.user_id.isnot(None))
66
67 if from_date:
68 orders = orders.filter(Order.created_at >= from_date)
69 if to_date:
70 orders = orders.filter(Order.created_at <= to_date)
71 return orders.all()
72
73 @staticmethod
74 def get_orders_count(event_id, status='completed'):
75 return get_count(Order.query.filter_by(event_id=event_id).filter(Order.user_id.isnot(None))
76 .filter_by(status=status))
77
78 @staticmethod
79 def get_orders_count_by_type(event_id, type='free'):
80 return get_count(Order.query.filter_by(event_id=event_id).filter_by(status='completed')
81 .filter(Ticket.type == type))
82
83 @staticmethod
84 def get_all_orders_count_by_type(type='free'):
85 return get_count(Order.query.filter_by(status='completed').filter(Ticket.type == type))
86
87 @staticmethod
88 def get_max_orders_count(event_id, type='free'):
89 ticket = Ticket.query.filter_by(event_id=event_id).filter_by(type=type).first()
90 if ticket:
91 return ticket.quantity
92 else:
93 return 0
94
95 @staticmethod
96 def get_order_expiry():
97 return 10
98
99 @staticmethod
100 def get_new_order_identifier():
101 identifier = str(uuid.uuid4())
102 count = get_count(Order.query.filter_by(identifier=identifier))
103 if count == 0:
104 return identifier
105 else:
106 return TicketingManager.get_new_order_identifier()
107
108 @staticmethod
109 def get_order(order_id):
110 return Ticket.query.get(order_id)
111
112 @staticmethod
113 def get_order_by_identifier(identifier):
114 return Order.query.filter_by(identifier=identifier).first()
115
116 @staticmethod
117 def get_discount_codes(event_id):
118 return DiscountCode.query.filter_by(event_id=event_id).all()
119
120 @staticmethod
121 def get_discount_code(event_id, discount_code):
122 if represents_int(discount_code):
123 return DiscountCode.query.get(discount_code)
124 else:
125 return DiscountCode.query.filter_by(code=discount_code).first()
126
127 @staticmethod
128 def get_or_create_user_by_email(email, data=None):
129 user = DataGetter.get_user_by_email(email, False)
130 if not user:
131 password = binascii.b2a_hex(os.urandom(4))
132 user_data = [email, password]
133 user = DataManager.create_user(user_data)
134 send_email_after_account_create_with_password({
135 'email': email,
136 'password': password
137 })
138 if user.user_detail:
139 user.user_detail.firstname = data['firstname']
140 user.user_detail.lastname = data['lastname']
141 else:
142 user_detail = UserDetail(firstname=data['firstname'], lastname=data['lastname'])
143 user.user_detail = user_detail
144
145 save_to_db(user)
146 return user
147
148 @staticmethod
149 def get_and_set_expiry(identifier, override=False):
150 if type(identifier) is Order:
151 order = identifier
152 elif represents_int(identifier):
153 order = TicketingManager.get_order(identifier)
154 else:
155 order = TicketingManager.get_order_by_identifier(identifier)
156
157 if order and not order.paid_via:
158 if override \
159 or (order.status != 'completed' and
160 (order.created_at + timedelta(
161 minutes=TicketingManager.get_order_expiry())) < datetime.utcnow()):
162 order.status = 'expired'
163 save_to_db(order)
164 return order
165
166 @staticmethod
167 def create_order(form, from_organizer=False):
168 order = Order()
169 order.status = 'pending'
170 order.identifier = TicketingManager.get_new_order_identifier()
171 order.event_id = form.get('event_id')
172
173 if from_organizer:
174 order.paid_via = form.get('payment_via')
175
176 ticket_ids = form.getlist('ticket_ids[]')
177 ticket_quantity = form.getlist('ticket_quantities[]')
178
179 ticket_subtotals = []
180 if from_organizer:
181 ticket_subtotals = form.getlist('ticket_subtotals[]')
182
183 amount = 0
184 for index, id in enumerate(ticket_ids):
185 if not string_empty(id) and int(ticket_quantity[index]) > 0:
186 order_ticket = OrderTicket()
187 order_ticket.ticket = TicketingManager.get_ticket(id)
188 order_ticket.quantity = int(ticket_quantity[index])
189 order.tickets.append(order_ticket)
190
191 if from_organizer:
192 amount += int(ticket_subtotals[index])
193 else:
194 amount += (order_ticket.ticket.price * order_ticket.quantity)
195
196 order.amount = amount
197
198 if login.current_user.is_authenticated:
199 order.user_id = login.current_user.id
200
201 save_to_db(order)
202 return order
203
204 @staticmethod
205 def initiate_order_payment(form):
206 identifier = form['identifier']
207 email = form['email']
208
209 order = TicketingManager.get_and_set_expiry(identifier)
210
211 if order:
212
213 user = TicketingManager.get_or_create_user_by_email(email, form)
214 order.user_id = user.id
215
216 if order.amount > 0 \
217 and (not order.paid_via
218 or (order.paid_via
219 and (order.paid_via == 'stripe'
220 or order.paid_via == 'paypal'))):
221
222 country = form['country']
223 address = form['address']
224 city = form['city']
225 state = form['state']
226 zipcode = form['zipcode']
227 order.address = address
228 order.city = city
229 order.state = state
230 order.country = country
231 order.zipcode = zipcode
232 order.status = 'initialized'
233 ticket_holder = TicketHolder(name=user.user_detail.fullname,
234 email=email, address=address,
235 city=city, state=state, country=country, order_id=order.id)
236 else:
237 order.status = 'completed'
238 order.completed_at = datetime.utcnow()
239 if not order.paid_via:
240 order.paid_via = 'free'
241 ticket_holder = TicketHolder(name=user.user_detail.fullname, email=email, order_id=order.id)
242 # add attendee role to user
243 DataManager.add_attendee_role_to_event(user, order.event_id)
244 # save items
245 save_to_db(order)
246 save_to_db(ticket_holder)
247
248 return order
249 else:
250 return False
251
252 @staticmethod
253 def charge_stripe_order_payment(form):
254 order = TicketingManager.get_and_set_expiry(form['identifier'])
255 order.stripe_token = form['stripe_token_id']
256 save_to_db(order)
257
258 charge = StripePaymentsManager.capture_payment(order)
259 if charge:
260 order.paid_via = 'stripe'
261 order.payment_mode = charge.source.object
262 order.brand = charge.source.brand
263 order.exp_month = charge.source.exp_month
264 order.exp_year = charge.source.exp_year
265 order.last4 = charge.source.last4
266 order.transaction_id = charge.id
267 order.status = 'completed'
268 order.completed_at = datetime.utcnow()
269 save_to_db(order)
270
271 send_email_for_after_purchase(order.user.email, order.get_invoice_number(),
272 url_for('ticketing.view_order_after_payment',
273 order_identifier=order.identifier, _external=True))
274 return True, order
275 else:
276 return False, 'Error'
277
278 @staticmethod
279 def charge_paypal_order_payment(order):
280 payment_details = PayPalPaymentsManager.get_approved_payment_details(order)
281 if 'PAYERID' in payment_details:
282 capture_result = PayPalPaymentsManager.capture_payment(order, payment_details['PAYERID'])
283 if capture_result['ACK'] == 'Success':
284 order.paid_via = 'paypal'
285 order.status = 'completed'
286 order.transaction_id = capture_result['PAYMENTINFO_0_TRANSACTIONID']
287 order.completed_at = datetime.utcnow()
288 save_to_db(order)
289 send_email_for_after_purchase(order.user.email, order.get_invoice_number(),
290 url_for('ticketing.view_order_after_payment',
291 order_identifier=order.identifier, _external=True))
292 return True, order
293 else:
294 return False, capture_result['L_SHORTMESSAGE0']
295 else:
296 return False, 'Payer ID missing. Payment flow tampered.'
297
298 @staticmethod
299 def create_edit_discount_code(form, event_id, discount_code_id=None):
300 if not discount_code_id:
301 discount_code = DiscountCode()
302 else:
303 discount_code = TicketingManager.get_discount_code(event_id, discount_code_id)
304 discount_code.code = form.get('code')
305 discount_code.value = form.get('value')
306 discount_code.type = form.get('value_type')
307 discount_code.min_quantity = form.get('min_quantity', None)
308 discount_code.max_quantity = form.get('max_quantity', None)
309 discount_code.tickets_number = form.get('tickets_number')
310 discount_code.event_id = event_id
311
312 if discount_code.min_quantity == "":
313 discount_code.min_quantity = None
314 if discount_code.max_quantity == "":
315 discount_code.max_quantity = None
316 if discount_code.tickets_number == "":
317 discount_code.tickets_number = None
318
319 try:
320 discount_code.valid_from = datetime.strptime(form.get('start_date', None) + ' ' +
321 form.get('start_time', None), '%m/%d/%Y %H:%M')
322 except:
323 discount_code.valid_from = None
324
325 try:
326 discount_code.valid_till = datetime.strptime(form.get('end_date', None) + ' ' +
327 form.get('end_time', None), '%m/%d/%Y %H:%M')
328 except:
329 discount_code.valid_till = None
330
331 discount_code.tickets = ",".join(form.getlist('tickets[]'))
332
333 save_to_db(discount_code)
334
335 return discount_code
336
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/helpers/ticketing.py b/app/helpers/ticketing.py
--- a/app/helpers/ticketing.py
+++ b/app/helpers/ticketing.py
@@ -9,6 +9,7 @@
from flask.ext import login
+from app.models import db
from app.helpers.cache import cache
from app.helpers.data import save_to_db
from app.helpers.helpers import string_empty, send_email_for_after_purchase, get_count
@@ -183,15 +184,16 @@
amount = 0
for index, id in enumerate(ticket_ids):
if not string_empty(id) and int(ticket_quantity[index]) > 0:
- order_ticket = OrderTicket()
- order_ticket.ticket = TicketingManager.get_ticket(id)
- order_ticket.quantity = int(ticket_quantity[index])
- order.tickets.append(order_ticket)
-
- if from_organizer:
- amount += int(ticket_subtotals[index])
- else:
- amount += (order_ticket.ticket.price * order_ticket.quantity)
+ with db.session.no_autoflush:
+ order_ticket = OrderTicket()
+ order_ticket.ticket = TicketingManager.get_ticket(id)
+ order_ticket.quantity = int(ticket_quantity[index])
+ order.tickets.append(order_ticket)
+
+ if from_organizer:
+ amount += int(ticket_subtotals[index])
+ else:
+ amount += (order_ticket.ticket.price * order_ticket.quantity)
order.amount = amount
| {"golden_diff": "diff --git a/app/helpers/ticketing.py b/app/helpers/ticketing.py\n--- a/app/helpers/ticketing.py\n+++ b/app/helpers/ticketing.py\n@@ -9,6 +9,7 @@\n \n from flask.ext import login\n \n+from app.models import db\n from app.helpers.cache import cache\n from app.helpers.data import save_to_db\n from app.helpers.helpers import string_empty, send_email_for_after_purchase, get_count\n@@ -183,15 +184,16 @@\n amount = 0\n for index, id in enumerate(ticket_ids):\n if not string_empty(id) and int(ticket_quantity[index]) > 0:\n- order_ticket = OrderTicket()\n- order_ticket.ticket = TicketingManager.get_ticket(id)\n- order_ticket.quantity = int(ticket_quantity[index])\n- order.tickets.append(order_ticket)\n-\n- if from_organizer:\n- amount += int(ticket_subtotals[index])\n- else:\n- amount += (order_ticket.ticket.price * order_ticket.quantity)\n+ with db.session.no_autoflush:\n+ order_ticket = OrderTicket()\n+ order_ticket.ticket = TicketingManager.get_ticket(id)\n+ order_ticket.quantity = int(ticket_quantity[index])\n+ order.tickets.append(order_ticket)\n+\n+ if from_organizer:\n+ amount += int(ticket_subtotals[index])\n+ else:\n+ amount += (order_ticket.ticket.price * order_ticket.quantity)\n \n order.amount = amount\n", "issue": "Ordering new tickets gives IntegrityError\nIntegrityError: (raised as a result of Query-invoked autoflush; consider using a session.no_autoflush block if this flush is occurring prematurely) (psycopg2.IntegrityError) null value in column \"order_id\" violates not-null constraint\nDETAIL: Failing row contains (null, 1, 4).\n [SQL: 'INSERT INTO orders_tickets (ticket_id, quantity) VALUES (%(ticket_id)s, %(quantity)s) RETURNING orders_tickets.order_id'] [parameters: {'ticket_id': 1, 'quantity': 4}]\n\n", "before_files": [{"content": "\"\"\"Copyright 2016 Niranjan Rajendran\"\"\"\nimport binascii\nimport os\nimport uuid\n\nfrom datetime import timedelta, datetime\n\nfrom flask import url_for\n\nfrom flask.ext import login\n\nfrom app.helpers.cache import cache\nfrom app.helpers.data import save_to_db\nfrom app.helpers.helpers import string_empty, send_email_for_after_purchase, get_count\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.data import DataManager\n\nfrom app.helpers.payment import StripePaymentsManager, represents_int, PayPalPaymentsManager\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.order import OrderTicket\nfrom app.models.event import Event\nfrom app.models.user_detail import UserDetail\nfrom app.models.discount_code import DiscountCode\n\nfrom app.helpers.helpers import send_email_after_account_create_with_password\n\n\nclass TicketingManager(object):\n \"\"\"All ticketing and orders related functions\"\"\"\n\n @staticmethod\n @cache.memoize(50)\n def get_ticket(ticket_id):\n return Ticket.query.get(ticket_id)\n\n @staticmethod\n def get_orders_of_user(user_id=None, upcoming_events=True):\n \"\"\"\n :return: Return all order objects with the current user\n \"\"\"\n if not user_id:\n user_id = login.current_user.id\n query = Order.query.join(Order.event) \\\n .filter(Order.user_id == user_id) \\\n .filter(Order.status == 'completed')\n if upcoming_events:\n return query.filter(Event.start_time >= datetime.now())\n else:\n return query.filter(Event.end_time < datetime.now())\n\n @staticmethod\n def get_orders(event_id=None, status=None, from_date=None, to_date=None):\n if event_id:\n if status:\n orders = Order.query.filter_by(event_id=event_id).filter_by(status=status) \\\n .filter(Order.user_id.isnot(None))\n else:\n orders = Order.query.filter_by(event_id=event_id).filter(Order.user_id.isnot(None))\n else:\n if status:\n orders = Order.query.filter_by(status=status).filter(Order.user_id.isnot(None))\n else:\n orders = Order.query.filter(Order.user_id.isnot(None))\n\n if from_date:\n orders = orders.filter(Order.created_at >= from_date)\n if to_date:\n orders = orders.filter(Order.created_at <= to_date)\n return orders.all()\n\n @staticmethod\n def get_orders_count(event_id, status='completed'):\n return get_count(Order.query.filter_by(event_id=event_id).filter(Order.user_id.isnot(None))\n .filter_by(status=status))\n\n @staticmethod\n def get_orders_count_by_type(event_id, type='free'):\n return get_count(Order.query.filter_by(event_id=event_id).filter_by(status='completed')\n .filter(Ticket.type == type))\n\n @staticmethod\n def get_all_orders_count_by_type(type='free'):\n return get_count(Order.query.filter_by(status='completed').filter(Ticket.type == type))\n\n @staticmethod\n def get_max_orders_count(event_id, type='free'):\n ticket = Ticket.query.filter_by(event_id=event_id).filter_by(type=type).first()\n if ticket:\n return ticket.quantity\n else:\n return 0\n\n @staticmethod\n def get_order_expiry():\n return 10\n\n @staticmethod\n def get_new_order_identifier():\n identifier = str(uuid.uuid4())\n count = get_count(Order.query.filter_by(identifier=identifier))\n if count == 0:\n return identifier\n else:\n return TicketingManager.get_new_order_identifier()\n\n @staticmethod\n def get_order(order_id):\n return Ticket.query.get(order_id)\n\n @staticmethod\n def get_order_by_identifier(identifier):\n return Order.query.filter_by(identifier=identifier).first()\n\n @staticmethod\n def get_discount_codes(event_id):\n return DiscountCode.query.filter_by(event_id=event_id).all()\n\n @staticmethod\n def get_discount_code(event_id, discount_code):\n if represents_int(discount_code):\n return DiscountCode.query.get(discount_code)\n else:\n return DiscountCode.query.filter_by(code=discount_code).first()\n\n @staticmethod\n def get_or_create_user_by_email(email, data=None):\n user = DataGetter.get_user_by_email(email, False)\n if not user:\n password = binascii.b2a_hex(os.urandom(4))\n user_data = [email, password]\n user = DataManager.create_user(user_data)\n send_email_after_account_create_with_password({\n 'email': email,\n 'password': password\n })\n if user.user_detail:\n user.user_detail.firstname = data['firstname']\n user.user_detail.lastname = data['lastname']\n else:\n user_detail = UserDetail(firstname=data['firstname'], lastname=data['lastname'])\n user.user_detail = user_detail\n\n save_to_db(user)\n return user\n\n @staticmethod\n def get_and_set_expiry(identifier, override=False):\n if type(identifier) is Order:\n order = identifier\n elif represents_int(identifier):\n order = TicketingManager.get_order(identifier)\n else:\n order = TicketingManager.get_order_by_identifier(identifier)\n\n if order and not order.paid_via:\n if override \\\n or (order.status != 'completed' and\n (order.created_at + timedelta(\n minutes=TicketingManager.get_order_expiry())) < datetime.utcnow()):\n order.status = 'expired'\n save_to_db(order)\n return order\n\n @staticmethod\n def create_order(form, from_organizer=False):\n order = Order()\n order.status = 'pending'\n order.identifier = TicketingManager.get_new_order_identifier()\n order.event_id = form.get('event_id')\n\n if from_organizer:\n order.paid_via = form.get('payment_via')\n\n ticket_ids = form.getlist('ticket_ids[]')\n ticket_quantity = form.getlist('ticket_quantities[]')\n\n ticket_subtotals = []\n if from_organizer:\n ticket_subtotals = form.getlist('ticket_subtotals[]')\n\n amount = 0\n for index, id in enumerate(ticket_ids):\n if not string_empty(id) and int(ticket_quantity[index]) > 0:\n order_ticket = OrderTicket()\n order_ticket.ticket = TicketingManager.get_ticket(id)\n order_ticket.quantity = int(ticket_quantity[index])\n order.tickets.append(order_ticket)\n\n if from_organizer:\n amount += int(ticket_subtotals[index])\n else:\n amount += (order_ticket.ticket.price * order_ticket.quantity)\n\n order.amount = amount\n\n if login.current_user.is_authenticated:\n order.user_id = login.current_user.id\n\n save_to_db(order)\n return order\n\n @staticmethod\n def initiate_order_payment(form):\n identifier = form['identifier']\n email = form['email']\n\n order = TicketingManager.get_and_set_expiry(identifier)\n\n if order:\n\n user = TicketingManager.get_or_create_user_by_email(email, form)\n order.user_id = user.id\n\n if order.amount > 0 \\\n and (not order.paid_via\n or (order.paid_via\n and (order.paid_via == 'stripe'\n or order.paid_via == 'paypal'))):\n\n country = form['country']\n address = form['address']\n city = form['city']\n state = form['state']\n zipcode = form['zipcode']\n order.address = address\n order.city = city\n order.state = state\n order.country = country\n order.zipcode = zipcode\n order.status = 'initialized'\n ticket_holder = TicketHolder(name=user.user_detail.fullname,\n email=email, address=address,\n city=city, state=state, country=country, order_id=order.id)\n else:\n order.status = 'completed'\n order.completed_at = datetime.utcnow()\n if not order.paid_via:\n order.paid_via = 'free'\n ticket_holder = TicketHolder(name=user.user_detail.fullname, email=email, order_id=order.id)\n # add attendee role to user\n DataManager.add_attendee_role_to_event(user, order.event_id)\n # save items\n save_to_db(order)\n save_to_db(ticket_holder)\n\n return order\n else:\n return False\n\n @staticmethod\n def charge_stripe_order_payment(form):\n order = TicketingManager.get_and_set_expiry(form['identifier'])\n order.stripe_token = form['stripe_token_id']\n save_to_db(order)\n\n charge = StripePaymentsManager.capture_payment(order)\n if charge:\n order.paid_via = 'stripe'\n order.payment_mode = charge.source.object\n order.brand = charge.source.brand\n order.exp_month = charge.source.exp_month\n order.exp_year = charge.source.exp_year\n order.last4 = charge.source.last4\n order.transaction_id = charge.id\n order.status = 'completed'\n order.completed_at = datetime.utcnow()\n save_to_db(order)\n\n send_email_for_after_purchase(order.user.email, order.get_invoice_number(),\n url_for('ticketing.view_order_after_payment',\n order_identifier=order.identifier, _external=True))\n return True, order\n else:\n return False, 'Error'\n\n @staticmethod\n def charge_paypal_order_payment(order):\n payment_details = PayPalPaymentsManager.get_approved_payment_details(order)\n if 'PAYERID' in payment_details:\n capture_result = PayPalPaymentsManager.capture_payment(order, payment_details['PAYERID'])\n if capture_result['ACK'] == 'Success':\n order.paid_via = 'paypal'\n order.status = 'completed'\n order.transaction_id = capture_result['PAYMENTINFO_0_TRANSACTIONID']\n order.completed_at = datetime.utcnow()\n save_to_db(order)\n send_email_for_after_purchase(order.user.email, order.get_invoice_number(),\n url_for('ticketing.view_order_after_payment',\n order_identifier=order.identifier, _external=True))\n return True, order\n else:\n return False, capture_result['L_SHORTMESSAGE0']\n else:\n return False, 'Payer ID missing. Payment flow tampered.'\n\n @staticmethod\n def create_edit_discount_code(form, event_id, discount_code_id=None):\n if not discount_code_id:\n discount_code = DiscountCode()\n else:\n discount_code = TicketingManager.get_discount_code(event_id, discount_code_id)\n discount_code.code = form.get('code')\n discount_code.value = form.get('value')\n discount_code.type = form.get('value_type')\n discount_code.min_quantity = form.get('min_quantity', None)\n discount_code.max_quantity = form.get('max_quantity', None)\n discount_code.tickets_number = form.get('tickets_number')\n discount_code.event_id = event_id\n\n if discount_code.min_quantity == \"\":\n discount_code.min_quantity = None\n if discount_code.max_quantity == \"\":\n discount_code.max_quantity = None\n if discount_code.tickets_number == \"\":\n discount_code.tickets_number = None\n\n try:\n discount_code.valid_from = datetime.strptime(form.get('start_date', None) + ' ' +\n form.get('start_time', None), '%m/%d/%Y %H:%M')\n except:\n discount_code.valid_from = None\n\n try:\n discount_code.valid_till = datetime.strptime(form.get('end_date', None) + ' ' +\n form.get('end_time', None), '%m/%d/%Y %H:%M')\n except:\n discount_code.valid_till = None\n\n discount_code.tickets = \",\".join(form.getlist('tickets[]'))\n\n save_to_db(discount_code)\n\n return discount_code\n", "path": "app/helpers/ticketing.py"}], "after_files": [{"content": "\"\"\"Copyright 2016 Niranjan Rajendran\"\"\"\nimport binascii\nimport os\nimport uuid\n\nfrom datetime import timedelta, datetime\n\nfrom flask import url_for\n\nfrom flask.ext import login\n\nfrom app.models import db\nfrom app.helpers.cache import cache\nfrom app.helpers.data import save_to_db\nfrom app.helpers.helpers import string_empty, send_email_for_after_purchase, get_count\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.data import DataManager\n\nfrom app.helpers.payment import StripePaymentsManager, represents_int, PayPalPaymentsManager\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.order import OrderTicket\nfrom app.models.event import Event\nfrom app.models.user_detail import UserDetail\nfrom app.models.discount_code import DiscountCode\n\nfrom app.helpers.helpers import send_email_after_account_create_with_password\n\n\nclass TicketingManager(object):\n \"\"\"All ticketing and orders related functions\"\"\"\n\n @staticmethod\n @cache.memoize(50)\n def get_ticket(ticket_id):\n return Ticket.query.get(ticket_id)\n\n @staticmethod\n def get_orders_of_user(user_id=None, upcoming_events=True):\n \"\"\"\n :return: Return all order objects with the current user\n \"\"\"\n if not user_id:\n user_id = login.current_user.id\n query = Order.query.join(Order.event) \\\n .filter(Order.user_id == user_id) \\\n .filter(Order.status == 'completed')\n if upcoming_events:\n return query.filter(Event.start_time >= datetime.now())\n else:\n return query.filter(Event.end_time < datetime.now())\n\n @staticmethod\n def get_orders(event_id=None, status=None, from_date=None, to_date=None):\n if event_id:\n if status:\n orders = Order.query.filter_by(event_id=event_id).filter_by(status=status) \\\n .filter(Order.user_id.isnot(None))\n else:\n orders = Order.query.filter_by(event_id=event_id).filter(Order.user_id.isnot(None))\n else:\n if status:\n orders = Order.query.filter_by(status=status).filter(Order.user_id.isnot(None))\n else:\n orders = Order.query.filter(Order.user_id.isnot(None))\n\n if from_date:\n orders = orders.filter(Order.created_at >= from_date)\n if to_date:\n orders = orders.filter(Order.created_at <= to_date)\n return orders.all()\n\n @staticmethod\n def get_orders_count(event_id, status='completed'):\n return get_count(Order.query.filter_by(event_id=event_id).filter(Order.user_id.isnot(None))\n .filter_by(status=status))\n\n @staticmethod\n def get_orders_count_by_type(event_id, type='free'):\n return get_count(Order.query.filter_by(event_id=event_id).filter_by(status='completed')\n .filter(Ticket.type == type))\n\n @staticmethod\n def get_all_orders_count_by_type(type='free'):\n return get_count(Order.query.filter_by(status='completed').filter(Ticket.type == type))\n\n @staticmethod\n def get_max_orders_count(event_id, type='free'):\n ticket = Ticket.query.filter_by(event_id=event_id).filter_by(type=type).first()\n if ticket:\n return ticket.quantity\n else:\n return 0\n\n @staticmethod\n def get_order_expiry():\n return 10\n\n @staticmethod\n def get_new_order_identifier():\n identifier = str(uuid.uuid4())\n count = get_count(Order.query.filter_by(identifier=identifier))\n if count == 0:\n return identifier\n else:\n return TicketingManager.get_new_order_identifier()\n\n @staticmethod\n def get_order(order_id):\n return Ticket.query.get(order_id)\n\n @staticmethod\n def get_order_by_identifier(identifier):\n return Order.query.filter_by(identifier=identifier).first()\n\n @staticmethod\n def get_discount_codes(event_id):\n return DiscountCode.query.filter_by(event_id=event_id).all()\n\n @staticmethod\n def get_discount_code(event_id, discount_code):\n if represents_int(discount_code):\n return DiscountCode.query.get(discount_code)\n else:\n return DiscountCode.query.filter_by(code=discount_code).first()\n\n @staticmethod\n def get_or_create_user_by_email(email, data=None):\n user = DataGetter.get_user_by_email(email, False)\n if not user:\n password = binascii.b2a_hex(os.urandom(4))\n user_data = [email, password]\n user = DataManager.create_user(user_data)\n send_email_after_account_create_with_password({\n 'email': email,\n 'password': password\n })\n if user.user_detail:\n user.user_detail.firstname = data['firstname']\n user.user_detail.lastname = data['lastname']\n else:\n user_detail = UserDetail(firstname=data['firstname'], lastname=data['lastname'])\n user.user_detail = user_detail\n\n save_to_db(user)\n return user\n\n @staticmethod\n def get_and_set_expiry(identifier, override=False):\n if type(identifier) is Order:\n order = identifier\n elif represents_int(identifier):\n order = TicketingManager.get_order(identifier)\n else:\n order = TicketingManager.get_order_by_identifier(identifier)\n\n if order and not order.paid_via:\n if override \\\n or (order.status != 'completed' and\n (order.created_at + timedelta(\n minutes=TicketingManager.get_order_expiry())) < datetime.utcnow()):\n order.status = 'expired'\n save_to_db(order)\n return order\n\n @staticmethod\n def create_order(form, from_organizer=False):\n order = Order()\n order.status = 'pending'\n order.identifier = TicketingManager.get_new_order_identifier()\n order.event_id = form.get('event_id')\n\n if from_organizer:\n order.paid_via = form.get('payment_via')\n\n ticket_ids = form.getlist('ticket_ids[]')\n ticket_quantity = form.getlist('ticket_quantities[]')\n\n ticket_subtotals = []\n if from_organizer:\n ticket_subtotals = form.getlist('ticket_subtotals[]')\n\n amount = 0\n for index, id in enumerate(ticket_ids):\n if not string_empty(id) and int(ticket_quantity[index]) > 0:\n with db.session.no_autoflush:\n order_ticket = OrderTicket()\n order_ticket.ticket = TicketingManager.get_ticket(id)\n order_ticket.quantity = int(ticket_quantity[index])\n order.tickets.append(order_ticket)\n\n if from_organizer:\n amount += int(ticket_subtotals[index])\n else:\n amount += (order_ticket.ticket.price * order_ticket.quantity)\n\n order.amount = amount\n\n if login.current_user.is_authenticated:\n order.user_id = login.current_user.id\n\n save_to_db(order)\n return order\n\n @staticmethod\n def initiate_order_payment(form):\n identifier = form['identifier']\n email = form['email']\n\n order = TicketingManager.get_and_set_expiry(identifier)\n\n if order:\n\n user = TicketingManager.get_or_create_user_by_email(email, form)\n order.user_id = user.id\n\n if order.amount > 0 \\\n and (not order.paid_via\n or (order.paid_via\n and (order.paid_via == 'stripe'\n or order.paid_via == 'paypal'))):\n\n country = form['country']\n address = form['address']\n city = form['city']\n state = form['state']\n zipcode = form['zipcode']\n order.address = address\n order.city = city\n order.state = state\n order.country = country\n order.zipcode = zipcode\n order.status = 'initialized'\n ticket_holder = TicketHolder(name=user.user_detail.fullname,\n email=email, address=address,\n city=city, state=state, country=country, order_id=order.id)\n else:\n order.status = 'completed'\n order.completed_at = datetime.utcnow()\n if not order.paid_via:\n order.paid_via = 'free'\n ticket_holder = TicketHolder(name=user.user_detail.fullname, email=email, order_id=order.id)\n # add attendee role to user\n DataManager.add_attendee_role_to_event(user, order.event_id)\n # save items\n save_to_db(order)\n save_to_db(ticket_holder)\n\n return order\n else:\n return False\n\n @staticmethod\n def charge_stripe_order_payment(form):\n order = TicketingManager.get_and_set_expiry(form['identifier'])\n order.stripe_token = form['stripe_token_id']\n save_to_db(order)\n\n charge = StripePaymentsManager.capture_payment(order)\n if charge:\n order.paid_via = 'stripe'\n order.payment_mode = charge.source.object\n order.brand = charge.source.brand\n order.exp_month = charge.source.exp_month\n order.exp_year = charge.source.exp_year\n order.last4 = charge.source.last4\n order.transaction_id = charge.id\n order.status = 'completed'\n order.completed_at = datetime.utcnow()\n save_to_db(order)\n\n send_email_for_after_purchase(order.user.email, order.get_invoice_number(),\n url_for('ticketing.view_order_after_payment',\n order_identifier=order.identifier, _external=True))\n return True, order\n else:\n return False, 'Error'\n\n @staticmethod\n def charge_paypal_order_payment(order):\n payment_details = PayPalPaymentsManager.get_approved_payment_details(order)\n if 'PAYERID' in payment_details:\n capture_result = PayPalPaymentsManager.capture_payment(order, payment_details['PAYERID'])\n if capture_result['ACK'] == 'Success':\n order.paid_via = 'paypal'\n order.status = 'completed'\n order.transaction_id = capture_result['PAYMENTINFO_0_TRANSACTIONID']\n order.completed_at = datetime.utcnow()\n save_to_db(order)\n send_email_for_after_purchase(order.user.email, order.get_invoice_number(),\n url_for('ticketing.view_order_after_payment',\n order_identifier=order.identifier, _external=True))\n return True, order\n else:\n return False, capture_result['L_SHORTMESSAGE0']\n else:\n return False, 'Payer ID missing. Payment flow tampered.'\n\n @staticmethod\n def create_edit_discount_code(form, event_id, discount_code_id=None):\n if not discount_code_id:\n discount_code = DiscountCode()\n else:\n discount_code = TicketingManager.get_discount_code(event_id, discount_code_id)\n discount_code.code = form.get('code')\n discount_code.value = form.get('value')\n discount_code.type = form.get('value_type')\n discount_code.min_quantity = form.get('min_quantity', None)\n discount_code.max_quantity = form.get('max_quantity', None)\n discount_code.tickets_number = form.get('tickets_number')\n discount_code.event_id = event_id\n\n if discount_code.min_quantity == \"\":\n discount_code.min_quantity = None\n if discount_code.max_quantity == \"\":\n discount_code.max_quantity = None\n if discount_code.tickets_number == \"\":\n discount_code.tickets_number = None\n\n try:\n discount_code.valid_from = datetime.strptime(form.get('start_date', None) + ' ' +\n form.get('start_time', None), '%m/%d/%Y %H:%M')\n except:\n discount_code.valid_from = None\n\n try:\n discount_code.valid_till = datetime.strptime(form.get('end_date', None) + ' ' +\n form.get('end_time', None), '%m/%d/%Y %H:%M')\n except:\n discount_code.valid_till = None\n\n discount_code.tickets = \",\".join(form.getlist('tickets[]'))\n\n save_to_db(discount_code)\n\n return discount_code\n", "path": "app/helpers/ticketing.py"}]} | 3,783 | 315 |
gh_patches_debug_10850 | rasdani/github-patches | git_diff | mne-tools__mne-python-10894 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
update path handling in examples/preprocessing/otp.py
all of our `data_path` functions now return `pathlib.Path` objects, which allows things like the following:
https://github.com/mne-tools/mne-python/blob/37a9e5587443109515a85cdc4396af4e014ecf82/examples/datasets/brainstorm_data.py#L32-L35
Compare this to the old pattern (using `os.path.join`, often imported as `op.join`):
```py
raw_path = os.path.join(data_path, 'MEG', 'bst_raw',
'subj001_somatosensory_20111109_01_AUX-f.ds')
```
or the *really* old pattern (using the `+` operator on plain strings):
```py
raw_path = (data_path + '/MEG/bst_raw/' +
'subj001_somatosensory_20111109_01_AUX-f.ds')
```
Update the paths in the file mentioned in the issue title to use the new pathlib-style, which is considered the modern, best-practice way to handle file paths.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/preprocessing/otp.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 .. _ex-otp:
4
5 ===========================================================
6 Plot sensor denoising using oversampled temporal projection
7 ===========================================================
8
9 This demonstrates denoising using the OTP algorithm :footcite:`LarsonTaulu2018`
10 on data with with sensor artifacts (flux jumps) and random noise.
11 """
12 # Author: Eric Larson <[email protected]>
13 #
14 # License: BSD-3-Clause
15
16 # %%
17
18 import os.path as op
19 import mne
20 import numpy as np
21
22 from mne import find_events, fit_dipole
23 from mne.datasets.brainstorm import bst_phantom_elekta
24 from mne.io import read_raw_fif
25
26 print(__doc__)
27
28 # %%
29 # Plot the phantom data, lowpassed to get rid of high-frequency artifacts.
30 # We also crop to a single 10-second segment for speed.
31 # Notice that there are two large flux jumps on channel 1522 that could
32 # spread to other channels when performing subsequent spatial operations
33 # (e.g., Maxwell filtering, SSP, or ICA).
34
35 dipole_number = 1
36 data_path = bst_phantom_elekta.data_path()
37 raw = read_raw_fif(
38 op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif'))
39 raw.crop(40., 50.).load_data()
40 order = list(range(160, 170))
41 raw.copy().filter(0., 40.).plot(order=order, n_channels=10)
42
43 # %%
44 # Now we can clean the data with OTP, lowpass, and plot. The flux jumps have
45 # been suppressed alongside the random sensor noise.
46
47 raw_clean = mne.preprocessing.oversampled_temporal_projection(raw)
48 raw_clean.filter(0., 40.)
49 raw_clean.plot(order=order, n_channels=10)
50
51
52 # %%
53 # We can also look at the effect on single-trial phantom localization.
54 # See the :ref:`tut-brainstorm-elekta-phantom`
55 # for more information. Here we use a version that does single-trial
56 # localization across the 17 trials are in our 10-second window:
57
58 def compute_bias(raw):
59 events = find_events(raw, 'STI201', verbose=False)
60 events = events[1:] # first one has an artifact
61 tmin, tmax = -0.2, 0.1
62 epochs = mne.Epochs(raw, events, dipole_number, tmin, tmax,
63 baseline=(None, -0.01), preload=True, verbose=False)
64 sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None,
65 verbose=False)
66 cov = mne.compute_covariance(epochs, tmax=0, method='oas',
67 rank=None, verbose=False)
68 idx = epochs.time_as_index(0.036)[0]
69 data = epochs.get_data()[:, :, idx].T
70 evoked = mne.EvokedArray(data, epochs.info, tmin=0.)
71 dip = fit_dipole(evoked, cov, sphere, n_jobs=None, verbose=False)[0]
72 actual_pos = mne.dipole.get_phantom_dipoles()[0][dipole_number - 1]
73 misses = 1000 * np.linalg.norm(dip.pos - actual_pos, axis=-1)
74 return misses
75
76
77 bias = compute_bias(raw)
78 print('Raw bias: %0.1fmm (worst: %0.1fmm)'
79 % (np.mean(bias), np.max(bias)))
80 bias_clean = compute_bias(raw_clean)
81 print('OTP bias: %0.1fmm (worst: %0.1fmm)'
82 % (np.mean(bias_clean), np.max(bias_clean),))
83
84 # %%
85 # References
86 # ----------
87 # .. footbibliography::
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/preprocessing/otp.py b/examples/preprocessing/otp.py
--- a/examples/preprocessing/otp.py
+++ b/examples/preprocessing/otp.py
@@ -15,7 +15,6 @@
# %%
-import os.path as op
import mne
import numpy as np
@@ -34,8 +33,7 @@
dipole_number = 1
data_path = bst_phantom_elekta.data_path()
-raw = read_raw_fif(
- op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif'))
+raw = read_raw_fif(data_path / 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
raw.crop(40., 50.).load_data()
order = list(range(160, 170))
raw.copy().filter(0., 40.).plot(order=order, n_channels=10)
| {"golden_diff": "diff --git a/examples/preprocessing/otp.py b/examples/preprocessing/otp.py\n--- a/examples/preprocessing/otp.py\n+++ b/examples/preprocessing/otp.py\n@@ -15,7 +15,6 @@\n \n # %%\n \n-import os.path as op\n import mne\n import numpy as np\n \n@@ -34,8 +33,7 @@\n \n dipole_number = 1\n data_path = bst_phantom_elekta.data_path()\n-raw = read_raw_fif(\n- op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif'))\n+raw = read_raw_fif(data_path / 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')\n raw.crop(40., 50.).load_data()\n order = list(range(160, 170))\n raw.copy().filter(0., 40.).plot(order=order, n_channels=10)\n", "issue": "update path handling in examples/preprocessing/otp.py\nall of our `data_path` functions now return `pathlib.Path` objects, which allows things like the following:\r\n\r\nhttps://github.com/mne-tools/mne-python/blob/37a9e5587443109515a85cdc4396af4e014ecf82/examples/datasets/brainstorm_data.py#L32-L35\r\n\r\nCompare this to the old pattern (using `os.path.join`, often imported as `op.join`):\r\n\r\n```py\r\nraw_path = os.path.join(data_path, 'MEG', 'bst_raw',\r\n 'subj001_somatosensory_20111109_01_AUX-f.ds')\r\n```\r\n\r\nor the *really* old pattern (using the `+` operator on plain strings):\r\n\r\n```py\r\nraw_path = (data_path + '/MEG/bst_raw/' +\r\n 'subj001_somatosensory_20111109_01_AUX-f.ds')\r\n```\r\n\r\nUpdate the paths in the file mentioned in the issue title to use the new pathlib-style, which is considered the modern, best-practice way to handle file paths.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. _ex-otp:\n\n===========================================================\nPlot sensor denoising using oversampled temporal projection\n===========================================================\n\nThis demonstrates denoising using the OTP algorithm :footcite:`LarsonTaulu2018`\non data with with sensor artifacts (flux jumps) and random noise.\n\"\"\"\n# Author: Eric Larson <[email protected]>\n#\n# License: BSD-3-Clause\n\n# %%\n\nimport os.path as op\nimport mne\nimport numpy as np\n\nfrom mne import find_events, fit_dipole\nfrom mne.datasets.brainstorm import bst_phantom_elekta\nfrom mne.io import read_raw_fif\n\nprint(__doc__)\n\n# %%\n# Plot the phantom data, lowpassed to get rid of high-frequency artifacts.\n# We also crop to a single 10-second segment for speed.\n# Notice that there are two large flux jumps on channel 1522 that could\n# spread to other channels when performing subsequent spatial operations\n# (e.g., Maxwell filtering, SSP, or ICA).\n\ndipole_number = 1\ndata_path = bst_phantom_elekta.data_path()\nraw = read_raw_fif(\n op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif'))\nraw.crop(40., 50.).load_data()\norder = list(range(160, 170))\nraw.copy().filter(0., 40.).plot(order=order, n_channels=10)\n\n# %%\n# Now we can clean the data with OTP, lowpass, and plot. The flux jumps have\n# been suppressed alongside the random sensor noise.\n\nraw_clean = mne.preprocessing.oversampled_temporal_projection(raw)\nraw_clean.filter(0., 40.)\nraw_clean.plot(order=order, n_channels=10)\n\n\n# %%\n# We can also look at the effect on single-trial phantom localization.\n# See the :ref:`tut-brainstorm-elekta-phantom`\n# for more information. Here we use a version that does single-trial\n# localization across the 17 trials are in our 10-second window:\n\ndef compute_bias(raw):\n events = find_events(raw, 'STI201', verbose=False)\n events = events[1:] # first one has an artifact\n tmin, tmax = -0.2, 0.1\n epochs = mne.Epochs(raw, events, dipole_number, tmin, tmax,\n baseline=(None, -0.01), preload=True, verbose=False)\n sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None,\n verbose=False)\n cov = mne.compute_covariance(epochs, tmax=0, method='oas',\n rank=None, verbose=False)\n idx = epochs.time_as_index(0.036)[0]\n data = epochs.get_data()[:, :, idx].T\n evoked = mne.EvokedArray(data, epochs.info, tmin=0.)\n dip = fit_dipole(evoked, cov, sphere, n_jobs=None, verbose=False)[0]\n actual_pos = mne.dipole.get_phantom_dipoles()[0][dipole_number - 1]\n misses = 1000 * np.linalg.norm(dip.pos - actual_pos, axis=-1)\n return misses\n\n\nbias = compute_bias(raw)\nprint('Raw bias: %0.1fmm (worst: %0.1fmm)'\n % (np.mean(bias), np.max(bias)))\nbias_clean = compute_bias(raw_clean)\nprint('OTP bias: %0.1fmm (worst: %0.1fmm)'\n % (np.mean(bias_clean), np.max(bias_clean),))\n\n# %%\n# References\n# ----------\n# .. footbibliography::\n", "path": "examples/preprocessing/otp.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. _ex-otp:\n\n===========================================================\nPlot sensor denoising using oversampled temporal projection\n===========================================================\n\nThis demonstrates denoising using the OTP algorithm :footcite:`LarsonTaulu2018`\non data with with sensor artifacts (flux jumps) and random noise.\n\"\"\"\n# Author: Eric Larson <[email protected]>\n#\n# License: BSD-3-Clause\n\n# %%\n\nimport mne\nimport numpy as np\n\nfrom mne import find_events, fit_dipole\nfrom mne.datasets.brainstorm import bst_phantom_elekta\nfrom mne.io import read_raw_fif\n\nprint(__doc__)\n\n# %%\n# Plot the phantom data, lowpassed to get rid of high-frequency artifacts.\n# We also crop to a single 10-second segment for speed.\n# Notice that there are two large flux jumps on channel 1522 that could\n# spread to other channels when performing subsequent spatial operations\n# (e.g., Maxwell filtering, SSP, or ICA).\n\ndipole_number = 1\ndata_path = bst_phantom_elekta.data_path()\nraw = read_raw_fif(data_path / 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')\nraw.crop(40., 50.).load_data()\norder = list(range(160, 170))\nraw.copy().filter(0., 40.).plot(order=order, n_channels=10)\n\n# %%\n# Now we can clean the data with OTP, lowpass, and plot. The flux jumps have\n# been suppressed alongside the random sensor noise.\n\nraw_clean = mne.preprocessing.oversampled_temporal_projection(raw)\nraw_clean.filter(0., 40.)\nraw_clean.plot(order=order, n_channels=10)\n\n\n# %%\n# We can also look at the effect on single-trial phantom localization.\n# See the :ref:`tut-brainstorm-elekta-phantom`\n# for more information. Here we use a version that does single-trial\n# localization across the 17 trials are in our 10-second window:\n\ndef compute_bias(raw):\n events = find_events(raw, 'STI201', verbose=False)\n events = events[1:] # first one has an artifact\n tmin, tmax = -0.2, 0.1\n epochs = mne.Epochs(raw, events, dipole_number, tmin, tmax,\n baseline=(None, -0.01), preload=True, verbose=False)\n sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None,\n verbose=False)\n cov = mne.compute_covariance(epochs, tmax=0, method='oas',\n rank=None, verbose=False)\n idx = epochs.time_as_index(0.036)[0]\n data = epochs.get_data()[:, :, idx].T\n evoked = mne.EvokedArray(data, epochs.info, tmin=0.)\n dip = fit_dipole(evoked, cov, sphere, n_jobs=None, verbose=False)[0]\n actual_pos = mne.dipole.get_phantom_dipoles()[0][dipole_number - 1]\n misses = 1000 * np.linalg.norm(dip.pos - actual_pos, axis=-1)\n return misses\n\n\nbias = compute_bias(raw)\nprint('Raw bias: %0.1fmm (worst: %0.1fmm)'\n % (np.mean(bias), np.max(bias)))\nbias_clean = compute_bias(raw_clean)\nprint('OTP bias: %0.1fmm (worst: %0.1fmm)'\n % (np.mean(bias_clean), np.max(bias_clean),))\n\n# %%\n# References\n# ----------\n# .. footbibliography::\n", "path": "examples/preprocessing/otp.py"}]} | 1,552 | 213 |
gh_patches_debug_21205 | rasdani/github-patches | git_diff | conan-io__conan-4583 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
virtualbuildenv generator does not set LIB env var correctly in activate_build.bat
Running **conan install** with -g virtualbuildenv generates activate_build.bat.
The first four env vars set are:
```
SET PROMPT=...
SET CL=...
SET LIB=...
SET_LINK_=...
```
CL and \_LINK\_ are set correctly, but LIB does not contain my "lib_path".
According to the following code, what happens is LIB added to a dictionary with the correct lib_path, but then the call to "update" to add "vc_vars" overwrites the LIB value with the Visual Studio LIB value.
**virtualbuildenv.py**
```javascript
if compiler == "Visual Studio":
self.env = VisualStudioBuildEnvironment(conanfile).vars_dict
self.env.update(vcvars_dict(conanfile.settings, output=conanfile.output))
else:
self.env = AutoToolsBuildEnvironment(conanfile).vars_dict
```
This is incorrect. The lib_path needs to be merged with the Visual Studio LIB value instead of being replaced.
I'm using conan v1.12.3 on Windows 10 and Visual Studio 2015.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/generators/virtualbuildenv.py`
Content:
```
1 from conans.client.build.autotools_environment import AutoToolsBuildEnvironment
2 from conans.client.build.visual_environment import VisualStudioBuildEnvironment
3 from conans.client.generators.virtualenv import VirtualEnvGenerator
4 from conans.client.tools.win import vcvars_dict
5
6
7 class VirtualBuildEnvGenerator(VirtualEnvGenerator):
8
9 def __init__(self, conanfile):
10 super(VirtualBuildEnvGenerator, self).__init__(conanfile)
11 self.venv_name = "conanbuildenv"
12 compiler = conanfile.settings.get_safe("compiler")
13 if compiler == "Visual Studio":
14 self.env = VisualStudioBuildEnvironment(conanfile).vars_dict
15 self.env.update(vcvars_dict(conanfile.settings, output=conanfile.output))
16 else:
17 self.env = AutoToolsBuildEnvironment(conanfile).vars_dict
18
19 @property
20 def content(self):
21 tmp = super(VirtualBuildEnvGenerator, self).content
22 ret = {}
23 for name, value in tmp.items():
24 tmp = name.split(".")
25 ret["%s_build.%s" % (tmp[0], tmp[1])] = value
26
27 return ret
28
```
Path: `conans/client/generators/virtualenv.py`
Content:
```
1 import os
2
3 from conans.client.tools.oss import OSInfo
4 from conans.model import Generator
5
6
7 class VirtualEnvGenerator(Generator):
8
9 append_with_spaces = ["CPPFLAGS", "CFLAGS", "CXXFLAGS", "LIBS", "LDFLAGS", "CL"]
10
11 def __init__(self, conanfile):
12 self.conanfile = conanfile
13 self.env = conanfile.env
14 self.venv_name = "conanenv"
15
16 @property
17 def filename(self):
18 return
19
20 def _variable_placeholder(self, flavor, name, append_with_spaces):
21 """
22 :param flavor: flavor of the execution environment
23 :param name: variable name
24 :return: placeholder for the variable name formatted for a certain execution environment.
25 (e.g., cmd, ps1, sh).
26 """
27 if flavor == "cmd":
28 return "%%%s%%" % name
29 if flavor == "ps1":
30 return "$env:%s" % name
31 # flavor == sh
32 return "${%s+ $%s}" % (name, name) if append_with_spaces else "${%s+:$%s}" % (name, name)
33
34 def format_values(self, flavor, variables):
35 """
36 Formats the values for the different supported script language flavors.
37 :param flavor: flavor of the execution environment
38 :param variables: variables to be formatted
39 :return:
40 """
41 variables = variables or self.env.items()
42 if flavor == "cmd":
43 path_sep, quote_elements, quote_full_value = ";", False, False
44 elif flavor == "ps1":
45 path_sep, quote_elements, quote_full_value = ";", False, True
46 elif flavor == "sh":
47 path_sep, quote_elements, quote_full_value = ":", True, False
48
49 ret = []
50 for name, value in variables:
51 # activate values
52 if isinstance(value, list):
53 append_with_spaces = name in self.append_with_spaces
54 placeholder = self._variable_placeholder(flavor, name, append_with_spaces)
55 if append_with_spaces:
56 # Variables joined with spaces look like: CPPFLAGS="one two three"
57 value = " ".join(value+[placeholder])
58 value = "\"%s\"" % value if quote_elements else value
59 else:
60 # Quoted variables joined with pathset may look like:
61 # PATH="one path":"two paths"
62 # Unquoted variables joined with pathset may look like: PATH=one path;two paths
63 value = ["\"%s\"" % v for v in value] if quote_elements else value
64 if flavor == "sh":
65 value = path_sep.join(value) + placeholder
66 else:
67 value = path_sep.join(value + [placeholder])
68 else:
69 # single value
70 value = "\"%s\"" % value if quote_elements else value
71 activate_value = "\"%s\"" % value if quote_full_value else value
72
73 # deactivate values
74 value = os.environ.get(name, "")
75 deactivate_value = "\"%s\"" % value if quote_full_value or quote_elements else value
76 ret.append((name, activate_value, deactivate_value))
77 return ret
78
79 def _sh_lines(self):
80 variables = [("OLD_PS1", "$PS1"),
81 ("PS1", "(%s) $PS1" % self.venv_name)]
82 variables.extend(self.env.items())
83
84 activate_lines = []
85 deactivate_lines = ["%s=%s" % ("PS1", "$OLD_PS1"), "export PS1"]
86
87 for name, activate, deactivate in self.format_values("sh", variables):
88 activate_lines.append("%s=%s" % (name, activate))
89 activate_lines.append("export %s" % name)
90 if name != "PS1":
91 if deactivate == '""':
92 deactivate_lines.append("unset %s" % name)
93 else:
94 deactivate_lines.append("%s=%s" % (name, deactivate))
95 deactivate_lines.append("export %s" % name)
96 activate_lines.append('')
97 deactivate_lines.append('')
98 return activate_lines, deactivate_lines
99
100 def _cmd_lines(self):
101 variables = [("PROMPT", "(%s) %%PROMPT%%" % self.venv_name)]
102 variables.extend(self.env.items())
103
104 activate_lines = ["@echo off"]
105 deactivate_lines = ["@echo off"]
106 for name, activate, deactivate in self.format_values("cmd", variables):
107 activate_lines.append("SET %s=%s" % (name, activate))
108 deactivate_lines.append("SET %s=%s" % (name, deactivate))
109 activate_lines.append('')
110 deactivate_lines.append('')
111 return activate_lines, deactivate_lines
112
113 def _ps1_lines(self):
114 activate_lines = ['function global:_old_conan_prompt {""}']
115 activate_lines.append('$function:_old_conan_prompt = $function:prompt')
116 activate_lines.append('function global:prompt { write-host "(%s) " -nonewline; '
117 '& $function:_old_conan_prompt }' % self.venv_name)
118 deactivate_lines = ['$function:prompt = $function:_old_conan_prompt']
119 deactivate_lines.append('remove-item function:_old_conan_prompt')
120 for name, activate, deactivate in self.format_values("ps1", self.env.items()):
121 activate_lines.append('$env:%s = %s' % (name, activate))
122 deactivate_lines.append('$env:%s = %s' % (name, deactivate))
123 activate_lines.append('')
124 return activate_lines, deactivate_lines
125
126 @property
127 def content(self):
128 os_info = OSInfo()
129 result = {}
130 if os_info.is_windows and not os_info.is_posix:
131 activate, deactivate = self._cmd_lines()
132 result["activate.bat"] = os.linesep.join(activate)
133 result["deactivate.bat"] = os.linesep.join(deactivate)
134
135 activate, deactivate = self._ps1_lines()
136 result["activate.ps1"] = os.linesep.join(activate)
137 result["deactivate.ps1"] = os.linesep.join(deactivate)
138
139 activate, deactivate = self._sh_lines()
140 result["activate.sh"] = os.linesep.join(activate)
141 result["deactivate.sh"] = os.linesep.join(deactivate)
142
143 return result
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/generators/virtualbuildenv.py b/conans/client/generators/virtualbuildenv.py
--- a/conans/client/generators/virtualbuildenv.py
+++ b/conans/client/generators/virtualbuildenv.py
@@ -12,7 +12,10 @@
compiler = conanfile.settings.get_safe("compiler")
if compiler == "Visual Studio":
self.env = VisualStudioBuildEnvironment(conanfile).vars_dict
- self.env.update(vcvars_dict(conanfile.settings, output=conanfile.output))
+ settings_vars = vcvars_dict(conanfile.settings, output=conanfile.output)
+ for env_var in self.env:
+ self.env[env_var].extend(settings_vars.pop(env_var, []))
+ self.env.update(settings_vars)
else:
self.env = AutoToolsBuildEnvironment(conanfile).vars_dict
diff --git a/conans/client/generators/virtualenv.py b/conans/client/generators/virtualenv.py
--- a/conans/client/generators/virtualenv.py
+++ b/conans/client/generators/virtualenv.py
@@ -6,7 +6,7 @@
class VirtualEnvGenerator(Generator):
- append_with_spaces = ["CPPFLAGS", "CFLAGS", "CXXFLAGS", "LIBS", "LDFLAGS", "CL"]
+ append_with_spaces = ["CPPFLAGS", "CFLAGS", "CXXFLAGS", "LIBS", "LDFLAGS", "CL", "_LINK_"]
def __init__(self, conanfile):
self.conanfile = conanfile
| {"golden_diff": "diff --git a/conans/client/generators/virtualbuildenv.py b/conans/client/generators/virtualbuildenv.py\n--- a/conans/client/generators/virtualbuildenv.py\n+++ b/conans/client/generators/virtualbuildenv.py\n@@ -12,7 +12,10 @@\n compiler = conanfile.settings.get_safe(\"compiler\")\n if compiler == \"Visual Studio\":\n self.env = VisualStudioBuildEnvironment(conanfile).vars_dict\n- self.env.update(vcvars_dict(conanfile.settings, output=conanfile.output))\n+ settings_vars = vcvars_dict(conanfile.settings, output=conanfile.output)\n+ for env_var in self.env:\n+ self.env[env_var].extend(settings_vars.pop(env_var, []))\n+ self.env.update(settings_vars)\n else:\n self.env = AutoToolsBuildEnvironment(conanfile).vars_dict\n \ndiff --git a/conans/client/generators/virtualenv.py b/conans/client/generators/virtualenv.py\n--- a/conans/client/generators/virtualenv.py\n+++ b/conans/client/generators/virtualenv.py\n@@ -6,7 +6,7 @@\n \n class VirtualEnvGenerator(Generator):\n \n- append_with_spaces = [\"CPPFLAGS\", \"CFLAGS\", \"CXXFLAGS\", \"LIBS\", \"LDFLAGS\", \"CL\"]\n+ append_with_spaces = [\"CPPFLAGS\", \"CFLAGS\", \"CXXFLAGS\", \"LIBS\", \"LDFLAGS\", \"CL\", \"_LINK_\"]\n \n def __init__(self, conanfile):\n self.conanfile = conanfile\n", "issue": "virtualbuildenv generator does not set LIB env var correctly in activate_build.bat\nRunning **conan install** with -g virtualbuildenv generates activate_build.bat.\r\nThe first four env vars set are:\r\n```\r\nSET PROMPT=...\r\nSET CL=...\r\nSET LIB=...\r\nSET_LINK_=...\r\n```\r\nCL and \\_LINK\\_ are set correctly, but LIB does not contain my \"lib_path\".\r\n\r\nAccording to the following code, what happens is LIB added to a dictionary with the correct lib_path, but then the call to \"update\" to add \"vc_vars\" overwrites the LIB value with the Visual Studio LIB value.\r\n\r\n**virtualbuildenv.py**\r\n```javascript\r\n if compiler == \"Visual Studio\":\r\n self.env = VisualStudioBuildEnvironment(conanfile).vars_dict\r\n self.env.update(vcvars_dict(conanfile.settings, output=conanfile.output))\r\n else:\r\n self.env = AutoToolsBuildEnvironment(conanfile).vars_dict\r\n```\r\nThis is incorrect. The lib_path needs to be merged with the Visual Studio LIB value instead of being replaced.\r\n\r\nI'm using conan v1.12.3 on Windows 10 and Visual Studio 2015.\r\n\r\n\r\n\n", "before_files": [{"content": "from conans.client.build.autotools_environment import AutoToolsBuildEnvironment\nfrom conans.client.build.visual_environment import VisualStudioBuildEnvironment\nfrom conans.client.generators.virtualenv import VirtualEnvGenerator\nfrom conans.client.tools.win import vcvars_dict\n\n\nclass VirtualBuildEnvGenerator(VirtualEnvGenerator):\n\n def __init__(self, conanfile):\n super(VirtualBuildEnvGenerator, self).__init__(conanfile)\n self.venv_name = \"conanbuildenv\"\n compiler = conanfile.settings.get_safe(\"compiler\")\n if compiler == \"Visual Studio\":\n self.env = VisualStudioBuildEnvironment(conanfile).vars_dict\n self.env.update(vcvars_dict(conanfile.settings, output=conanfile.output))\n else:\n self.env = AutoToolsBuildEnvironment(conanfile).vars_dict\n\n @property\n def content(self):\n tmp = super(VirtualBuildEnvGenerator, self).content\n ret = {}\n for name, value in tmp.items():\n tmp = name.split(\".\")\n ret[\"%s_build.%s\" % (tmp[0], tmp[1])] = value\n\n return ret\n", "path": "conans/client/generators/virtualbuildenv.py"}, {"content": "import os\n\nfrom conans.client.tools.oss import OSInfo\nfrom conans.model import Generator\n\n\nclass VirtualEnvGenerator(Generator):\n\n append_with_spaces = [\"CPPFLAGS\", \"CFLAGS\", \"CXXFLAGS\", \"LIBS\", \"LDFLAGS\", \"CL\"]\n\n def __init__(self, conanfile):\n self.conanfile = conanfile\n self.env = conanfile.env\n self.venv_name = \"conanenv\"\n\n @property\n def filename(self):\n return\n\n def _variable_placeholder(self, flavor, name, append_with_spaces):\n \"\"\"\n :param flavor: flavor of the execution environment\n :param name: variable name\n :return: placeholder for the variable name formatted for a certain execution environment.\n (e.g., cmd, ps1, sh).\n \"\"\"\n if flavor == \"cmd\":\n return \"%%%s%%\" % name\n if flavor == \"ps1\":\n return \"$env:%s\" % name\n # flavor == sh\n return \"${%s+ $%s}\" % (name, name) if append_with_spaces else \"${%s+:$%s}\" % (name, name)\n\n def format_values(self, flavor, variables):\n \"\"\"\n Formats the values for the different supported script language flavors.\n :param flavor: flavor of the execution environment\n :param variables: variables to be formatted\n :return:\n \"\"\"\n variables = variables or self.env.items()\n if flavor == \"cmd\":\n path_sep, quote_elements, quote_full_value = \";\", False, False\n elif flavor == \"ps1\":\n path_sep, quote_elements, quote_full_value = \";\", False, True\n elif flavor == \"sh\":\n path_sep, quote_elements, quote_full_value = \":\", True, False\n\n ret = []\n for name, value in variables:\n # activate values\n if isinstance(value, list):\n append_with_spaces = name in self.append_with_spaces\n placeholder = self._variable_placeholder(flavor, name, append_with_spaces)\n if append_with_spaces:\n # Variables joined with spaces look like: CPPFLAGS=\"one two three\"\n value = \" \".join(value+[placeholder])\n value = \"\\\"%s\\\"\" % value if quote_elements else value\n else:\n # Quoted variables joined with pathset may look like:\n # PATH=\"one path\":\"two paths\"\n # Unquoted variables joined with pathset may look like: PATH=one path;two paths\n value = [\"\\\"%s\\\"\" % v for v in value] if quote_elements else value\n if flavor == \"sh\":\n value = path_sep.join(value) + placeholder\n else:\n value = path_sep.join(value + [placeholder])\n else:\n # single value\n value = \"\\\"%s\\\"\" % value if quote_elements else value\n activate_value = \"\\\"%s\\\"\" % value if quote_full_value else value\n\n # deactivate values\n value = os.environ.get(name, \"\")\n deactivate_value = \"\\\"%s\\\"\" % value if quote_full_value or quote_elements else value\n ret.append((name, activate_value, deactivate_value))\n return ret\n\n def _sh_lines(self):\n variables = [(\"OLD_PS1\", \"$PS1\"),\n (\"PS1\", \"(%s) $PS1\" % self.venv_name)]\n variables.extend(self.env.items())\n\n activate_lines = []\n deactivate_lines = [\"%s=%s\" % (\"PS1\", \"$OLD_PS1\"), \"export PS1\"]\n\n for name, activate, deactivate in self.format_values(\"sh\", variables):\n activate_lines.append(\"%s=%s\" % (name, activate))\n activate_lines.append(\"export %s\" % name)\n if name != \"PS1\":\n if deactivate == '\"\"':\n deactivate_lines.append(\"unset %s\" % name)\n else:\n deactivate_lines.append(\"%s=%s\" % (name, deactivate))\n deactivate_lines.append(\"export %s\" % name)\n activate_lines.append('')\n deactivate_lines.append('')\n return activate_lines, deactivate_lines\n\n def _cmd_lines(self):\n variables = [(\"PROMPT\", \"(%s) %%PROMPT%%\" % self.venv_name)]\n variables.extend(self.env.items())\n\n activate_lines = [\"@echo off\"]\n deactivate_lines = [\"@echo off\"]\n for name, activate, deactivate in self.format_values(\"cmd\", variables):\n activate_lines.append(\"SET %s=%s\" % (name, activate))\n deactivate_lines.append(\"SET %s=%s\" % (name, deactivate))\n activate_lines.append('')\n deactivate_lines.append('')\n return activate_lines, deactivate_lines\n\n def _ps1_lines(self):\n activate_lines = ['function global:_old_conan_prompt {\"\"}']\n activate_lines.append('$function:_old_conan_prompt = $function:prompt')\n activate_lines.append('function global:prompt { write-host \"(%s) \" -nonewline; '\n '& $function:_old_conan_prompt }' % self.venv_name)\n deactivate_lines = ['$function:prompt = $function:_old_conan_prompt']\n deactivate_lines.append('remove-item function:_old_conan_prompt')\n for name, activate, deactivate in self.format_values(\"ps1\", self.env.items()):\n activate_lines.append('$env:%s = %s' % (name, activate))\n deactivate_lines.append('$env:%s = %s' % (name, deactivate))\n activate_lines.append('')\n return activate_lines, deactivate_lines\n\n @property\n def content(self):\n os_info = OSInfo()\n result = {}\n if os_info.is_windows and not os_info.is_posix:\n activate, deactivate = self._cmd_lines()\n result[\"activate.bat\"] = os.linesep.join(activate)\n result[\"deactivate.bat\"] = os.linesep.join(deactivate)\n\n activate, deactivate = self._ps1_lines()\n result[\"activate.ps1\"] = os.linesep.join(activate)\n result[\"deactivate.ps1\"] = os.linesep.join(deactivate)\n\n activate, deactivate = self._sh_lines()\n result[\"activate.sh\"] = os.linesep.join(activate)\n result[\"deactivate.sh\"] = os.linesep.join(deactivate)\n\n return result\n", "path": "conans/client/generators/virtualenv.py"}], "after_files": [{"content": "from conans.client.build.autotools_environment import AutoToolsBuildEnvironment\nfrom conans.client.build.visual_environment import VisualStudioBuildEnvironment\nfrom conans.client.generators.virtualenv import VirtualEnvGenerator\nfrom conans.client.tools.win import vcvars_dict\n\n\nclass VirtualBuildEnvGenerator(VirtualEnvGenerator):\n\n def __init__(self, conanfile):\n super(VirtualBuildEnvGenerator, self).__init__(conanfile)\n self.venv_name = \"conanbuildenv\"\n compiler = conanfile.settings.get_safe(\"compiler\")\n if compiler == \"Visual Studio\":\n self.env = VisualStudioBuildEnvironment(conanfile).vars_dict\n settings_vars = vcvars_dict(conanfile.settings, output=conanfile.output)\n for env_var in self.env:\n self.env[env_var].extend(settings_vars.pop(env_var, []))\n self.env.update(settings_vars)\n else:\n self.env = AutoToolsBuildEnvironment(conanfile).vars_dict\n\n @property\n def content(self):\n tmp = super(VirtualBuildEnvGenerator, self).content\n ret = {}\n for name, value in tmp.items():\n tmp = name.split(\".\")\n ret[\"%s_build.%s\" % (tmp[0], tmp[1])] = value\n\n return ret\n", "path": "conans/client/generators/virtualbuildenv.py"}, {"content": "import os\n\nfrom conans.client.tools.oss import OSInfo\nfrom conans.model import Generator\n\n\nclass VirtualEnvGenerator(Generator):\n\n append_with_spaces = [\"CPPFLAGS\", \"CFLAGS\", \"CXXFLAGS\", \"LIBS\", \"LDFLAGS\", \"CL\", \"_LINK_\"]\n\n def __init__(self, conanfile):\n self.conanfile = conanfile\n self.env = conanfile.env\n self.venv_name = \"conanenv\"\n\n @property\n def filename(self):\n return\n\n def _variable_placeholder(self, flavor, name, append_with_spaces):\n \"\"\"\n :param flavor: flavor of the execution environment\n :param name: variable name\n :return: placeholder for the variable name formatted for a certain execution environment.\n (e.g., cmd, ps1, sh).\n \"\"\"\n if flavor == \"cmd\":\n return \"%%%s%%\" % name\n if flavor == \"ps1\":\n return \"$env:%s\" % name\n # flavor == sh\n return \"${%s+ $%s}\" % (name, name) if append_with_spaces else \"${%s+:$%s}\" % (name, name)\n\n def format_values(self, flavor, variables):\n \"\"\"\n Formats the values for the different supported script language flavors.\n :param flavor: flavor of the execution environment\n :param variables: variables to be formatted\n :return:\n \"\"\"\n variables = variables or self.env.items()\n if flavor == \"cmd\":\n path_sep, quote_elements, quote_full_value = \";\", False, False\n elif flavor == \"ps1\":\n path_sep, quote_elements, quote_full_value = \";\", False, True\n elif flavor == \"sh\":\n path_sep, quote_elements, quote_full_value = \":\", True, False\n\n ret = []\n for name, value in variables:\n # activate values\n if isinstance(value, list):\n append_with_spaces = name in self.append_with_spaces\n placeholder = self._variable_placeholder(flavor, name, append_with_spaces)\n if append_with_spaces:\n # Variables joined with spaces look like: CPPFLAGS=\"one two three\"\n value = \" \".join(value+[placeholder])\n value = \"\\\"%s\\\"\" % value if quote_elements else value\n else:\n # Quoted variables joined with pathset may look like:\n # PATH=\"one path\":\"two paths\"\n # Unquoted variables joined with pathset may look like: PATH=one path;two paths\n value = [\"\\\"%s\\\"\" % v for v in value] if quote_elements else value\n if flavor == \"sh\":\n value = path_sep.join(value) + placeholder\n else:\n value = path_sep.join(value + [placeholder])\n else:\n # single value\n value = \"\\\"%s\\\"\" % value if quote_elements else value\n activate_value = \"\\\"%s\\\"\" % value if quote_full_value else value\n\n # deactivate values\n value = os.environ.get(name, \"\")\n deactivate_value = \"\\\"%s\\\"\" % value if quote_full_value or quote_elements else value\n ret.append((name, activate_value, deactivate_value))\n return ret\n\n def _sh_lines(self):\n variables = [(\"OLD_PS1\", \"$PS1\"),\n (\"PS1\", \"(%s) $PS1\" % self.venv_name)]\n variables.extend(self.env.items())\n\n activate_lines = []\n deactivate_lines = [\"%s=%s\" % (\"PS1\", \"$OLD_PS1\"), \"export PS1\"]\n\n for name, activate, deactivate in self.format_values(\"sh\", variables):\n activate_lines.append(\"%s=%s\" % (name, activate))\n activate_lines.append(\"export %s\" % name)\n if name != \"PS1\":\n if deactivate == '\"\"':\n deactivate_lines.append(\"unset %s\" % name)\n else:\n deactivate_lines.append(\"%s=%s\" % (name, deactivate))\n deactivate_lines.append(\"export %s\" % name)\n activate_lines.append('')\n deactivate_lines.append('')\n return activate_lines, deactivate_lines\n\n def _cmd_lines(self):\n variables = [(\"PROMPT\", \"(%s) %%PROMPT%%\" % self.venv_name)]\n variables.extend(self.env.items())\n\n activate_lines = [\"@echo off\"]\n deactivate_lines = [\"@echo off\"]\n for name, activate, deactivate in self.format_values(\"cmd\", variables):\n activate_lines.append(\"SET %s=%s\" % (name, activate))\n deactivate_lines.append(\"SET %s=%s\" % (name, deactivate))\n activate_lines.append('')\n deactivate_lines.append('')\n return activate_lines, deactivate_lines\n\n def _ps1_lines(self):\n activate_lines = ['function global:_old_conan_prompt {\"\"}']\n activate_lines.append('$function:_old_conan_prompt = $function:prompt')\n activate_lines.append('function global:prompt { write-host \"(%s) \" -nonewline; '\n '& $function:_old_conan_prompt }' % self.venv_name)\n deactivate_lines = ['$function:prompt = $function:_old_conan_prompt']\n deactivate_lines.append('remove-item function:_old_conan_prompt')\n for name, activate, deactivate in self.format_values(\"ps1\", self.env.items()):\n activate_lines.append('$env:%s = %s' % (name, activate))\n deactivate_lines.append('$env:%s = %s' % (name, deactivate))\n activate_lines.append('')\n return activate_lines, deactivate_lines\n\n @property\n def content(self):\n os_info = OSInfo()\n result = {}\n if os_info.is_windows and not os_info.is_posix:\n activate, deactivate = self._cmd_lines()\n result[\"activate.bat\"] = os.linesep.join(activate)\n result[\"deactivate.bat\"] = os.linesep.join(deactivate)\n\n activate, deactivate = self._ps1_lines()\n result[\"activate.ps1\"] = os.linesep.join(activate)\n result[\"deactivate.ps1\"] = os.linesep.join(deactivate)\n\n activate, deactivate = self._sh_lines()\n result[\"activate.sh\"] = os.linesep.join(activate)\n result[\"deactivate.sh\"] = os.linesep.join(deactivate)\n\n return result\n", "path": "conans/client/generators/virtualenv.py"}]} | 2,512 | 357 |
gh_patches_debug_43985 | rasdani/github-patches | git_diff | spyder-ide__spyder-7478 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check for new conda packages in Anaconda's repos instead of Github
Right now we're looking for new releases only on Github. But this gives a bad UX in Anaconda because they take several days to create new conda packages for Spyder.
So in that case we need to look for new packages (for example) in
https://repo.continuum.io/pkgs/main/win-64/
to only show the update message when new packages are placed there.
@dalthviz, please work on this one.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spyder/workers/updates.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 # Standard library imports
8 import json
9 import ssl
10
11 # Third party imports
12 from qtpy.QtCore import QObject, Signal
13
14 # Local imports
15 from spyder import __version__
16 from spyder.config.base import _
17 from spyder.py3compat import PY3
18 from spyder.utils.programs import check_version, is_stable_version
19
20
21 if PY3:
22 from urllib.request import urlopen
23 from urllib.error import URLError, HTTPError
24 else:
25 from urllib2 import urlopen, URLError, HTTPError
26
27
28 class WorkerUpdates(QObject):
29 """
30 Worker that checks for releases using the Github API without blocking the
31 Spyder user interface, in case of connections issues.
32 """
33 sig_ready = Signal()
34
35 def __init__(self, parent, startup):
36 QObject.__init__(self)
37 self._parent = parent
38 self.error = None
39 self.latest_release = None
40 self.startup = startup
41
42 def check_update_available(self, version, releases):
43 """Checks if there is an update available.
44
45 It takes as parameters the current version of Spyder and a list of
46 valid cleaned releases in chronological order (what github api returns
47 by default). Example: ['2.3.4', '2.3.3' ...]
48 """
49 if is_stable_version(version):
50 # Remove non stable versions from the list
51 releases = [r for r in releases if is_stable_version(r)]
52
53 latest_release = releases[0]
54
55 if version.endswith('dev'):
56 return (False, latest_release)
57
58 return (check_version(version, latest_release, '<'), latest_release)
59
60 def start(self):
61 """Main method of the WorkerUpdates worker"""
62 self.url = 'https://api.github.com/repos/spyder-ide/spyder/releases'
63 self.update_available = False
64 self.latest_release = __version__
65
66 error_msg = None
67
68 try:
69 if hasattr(ssl, '_create_unverified_context'):
70 # Fix for issue # 2685 [Works only with Python >=2.7.9]
71 # More info: https://www.python.org/dev/peps/pep-0476/#opting-out
72 context = ssl._create_unverified_context()
73 page = urlopen(self.url, context=context)
74 else:
75 page = urlopen(self.url)
76 try:
77 data = page.read()
78
79 # Needed step for python3 compatibility
80 if not isinstance(data, str):
81 data = data.decode()
82
83 data = json.loads(data)
84 releases = [item['tag_name'].replace('v', '') for item in data]
85 version = __version__
86
87 result = self.check_update_available(version, releases)
88 self.update_available, self.latest_release = result
89 except Exception:
90 error_msg = _('Unable to retrieve information.')
91 except HTTPError:
92 error_msg = _('Unable to retrieve information.')
93 except URLError:
94 error_msg = _('Unable to connect to the internet. <br><br>Make '
95 'sure the connection is working properly.')
96 except Exception:
97 error_msg = _('Unable to check for updates.')
98
99 # Don't show dialog when starting up spyder and an error occur
100 if not (self.startup and error_msg is not None):
101 self.error = error_msg
102 self.sig_ready.emit()
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/spyder/workers/updates.py b/spyder/workers/updates.py
--- a/spyder/workers/updates.py
+++ b/spyder/workers/updates.py
@@ -6,7 +6,9 @@
# Standard library imports
import json
+import os
import ssl
+import sys
# Third party imports
from qtpy.QtCore import QObject, Signal
@@ -15,6 +17,7 @@
from spyder import __version__
from spyder.config.base import _
from spyder.py3compat import PY3
+from spyder.config.utils import is_anaconda
from spyder.utils.programs import check_version, is_stable_version
@@ -27,30 +30,36 @@
class WorkerUpdates(QObject):
"""
- Worker that checks for releases using the Github API without blocking the
- Spyder user interface, in case of connections issues.
+ Worker that checks for releases using either the Anaconda
+ default channels or the Github Releases page without
+ blocking the Spyder user interface, in case of connection
+ issues.
"""
sig_ready = Signal()
- def __init__(self, parent, startup):
+ def __init__(self, parent, startup, version=""):
QObject.__init__(self)
self._parent = parent
self.error = None
self.latest_release = None
self.startup = startup
+ self.version = version
- def check_update_available(self, version, releases):
+ def check_update_available(self, version, releases, github=False):
"""Checks if there is an update available.
It takes as parameters the current version of Spyder and a list of
- valid cleaned releases in chronological order (what github api returns
- by default). Example: ['2.3.4', '2.3.3' ...]
+ valid cleaned releases in chronological order.
+ Example: ['2.3.2', '2.3.3' ...] or with github ['2.3.4', '2.3.3' ...]
"""
if is_stable_version(version):
# Remove non stable versions from the list
releases = [r for r in releases if is_stable_version(r)]
- latest_release = releases[0]
+ if github:
+ latest_release = releases[0]
+ else:
+ latest_release = releases[-1]
if version.endswith('dev'):
return (False, latest_release)
@@ -59,7 +68,17 @@
def start(self):
"""Main method of the WorkerUpdates worker"""
- self.url = 'https://api.github.com/repos/spyder-ide/spyder/releases'
+ if is_anaconda():
+ self.url = 'https://repo.anaconda.com/pkgs/main'
+ if os.name == 'nt':
+ self.url += '/win-64/repodata.json'
+ elif sys.platform == 'darwin':
+ self.url += '/osx-64/repodata.json'
+ else:
+ self.url += '/linux-64/repodata.json'
+ else:
+ self.url = ('https://api.github.com/repos/'
+ 'spyder-ide/spyder/releases')
self.update_available = False
self.latest_release = __version__
@@ -81,10 +100,22 @@
data = data.decode()
data = json.loads(data)
- releases = [item['tag_name'].replace('v', '') for item in data]
- version = __version__
-
- result = self.check_update_available(version, releases)
+ if not self.version:
+ self.version = __version__
+
+ if is_anaconda():
+ releases = []
+ for item in data['packages']:
+ if 'spyder' in item and 'spyder-kernels' not in item:
+ releases.append(item.split('-')[1])
+ result = self.check_update_available(self.version,
+ releases)
+ else:
+ releases = [item['tag_name'].replace('v', '')
+ for item in data]
+ result = self.check_update_available(self.version,
+ releases,
+ github=True)
self.update_available, self.latest_release = result
except Exception:
error_msg = _('Unable to retrieve information.')
| {"golden_diff": "diff --git a/spyder/workers/updates.py b/spyder/workers/updates.py\n--- a/spyder/workers/updates.py\n+++ b/spyder/workers/updates.py\n@@ -6,7 +6,9 @@\n \n # Standard library imports\n import json\n+import os\n import ssl\n+import sys\n \n # Third party imports\n from qtpy.QtCore import QObject, Signal\n@@ -15,6 +17,7 @@\n from spyder import __version__\n from spyder.config.base import _\n from spyder.py3compat import PY3\n+from spyder.config.utils import is_anaconda\n from spyder.utils.programs import check_version, is_stable_version\n \n \n@@ -27,30 +30,36 @@\n \n class WorkerUpdates(QObject):\n \"\"\"\n- Worker that checks for releases using the Github API without blocking the\n- Spyder user interface, in case of connections issues.\n+ Worker that checks for releases using either the Anaconda\n+ default channels or the Github Releases page without\n+ blocking the Spyder user interface, in case of connection\n+ issues.\n \"\"\"\n sig_ready = Signal()\n \n- def __init__(self, parent, startup):\n+ def __init__(self, parent, startup, version=\"\"):\n QObject.__init__(self)\n self._parent = parent\n self.error = None\n self.latest_release = None\n self.startup = startup\n+ self.version = version\n \n- def check_update_available(self, version, releases):\n+ def check_update_available(self, version, releases, github=False):\n \"\"\"Checks if there is an update available.\n \n It takes as parameters the current version of Spyder and a list of\n- valid cleaned releases in chronological order (what github api returns\n- by default). Example: ['2.3.4', '2.3.3' ...]\n+ valid cleaned releases in chronological order.\n+ Example: ['2.3.2', '2.3.3' ...] or with github ['2.3.4', '2.3.3' ...]\n \"\"\"\n if is_stable_version(version):\n # Remove non stable versions from the list\n releases = [r for r in releases if is_stable_version(r)]\n \n- latest_release = releases[0]\n+ if github:\n+ latest_release = releases[0]\n+ else:\n+ latest_release = releases[-1]\n \n if version.endswith('dev'):\n return (False, latest_release)\n@@ -59,7 +68,17 @@\n \n def start(self):\n \"\"\"Main method of the WorkerUpdates worker\"\"\"\n- self.url = 'https://api.github.com/repos/spyder-ide/spyder/releases'\n+ if is_anaconda():\n+ self.url = 'https://repo.anaconda.com/pkgs/main'\n+ if os.name == 'nt':\n+ self.url += '/win-64/repodata.json'\n+ elif sys.platform == 'darwin':\n+ self.url += '/osx-64/repodata.json'\n+ else:\n+ self.url += '/linux-64/repodata.json'\n+ else:\n+ self.url = ('https://api.github.com/repos/'\n+ 'spyder-ide/spyder/releases')\n self.update_available = False\n self.latest_release = __version__\n \n@@ -81,10 +100,22 @@\n data = data.decode()\n \n data = json.loads(data)\n- releases = [item['tag_name'].replace('v', '') for item in data]\n- version = __version__\n-\n- result = self.check_update_available(version, releases)\n+ if not self.version:\n+ self.version = __version__\n+\n+ if is_anaconda():\n+ releases = []\n+ for item in data['packages']:\n+ if 'spyder' in item and 'spyder-kernels' not in item:\n+ releases.append(item.split('-')[1])\n+ result = self.check_update_available(self.version,\n+ releases)\n+ else:\n+ releases = [item['tag_name'].replace('v', '')\n+ for item in data]\n+ result = self.check_update_available(self.version,\n+ releases,\n+ github=True)\n self.update_available, self.latest_release = result\n except Exception:\n error_msg = _('Unable to retrieve information.')\n", "issue": "Check for new conda packages in Anaconda's repos instead of Github\nRight now we're looking for new releases only on Github. But this gives a bad UX in Anaconda because they take several days to create new conda packages for Spyder.\r\n\r\nSo in that case we need to look for new packages (for example) in\r\n\r\nhttps://repo.continuum.io/pkgs/main/win-64/\r\n\r\nto only show the update message when new packages are placed there.\r\n\r\n@dalthviz, please work on this one.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n# Standard library imports\nimport json\nimport ssl\n\n# Third party imports\nfrom qtpy.QtCore import QObject, Signal\n\n# Local imports\nfrom spyder import __version__\nfrom spyder.config.base import _\nfrom spyder.py3compat import PY3\nfrom spyder.utils.programs import check_version, is_stable_version\n\n\nif PY3:\n from urllib.request import urlopen\n from urllib.error import URLError, HTTPError\nelse:\n from urllib2 import urlopen, URLError, HTTPError\n\n\nclass WorkerUpdates(QObject):\n \"\"\"\n Worker that checks for releases using the Github API without blocking the\n Spyder user interface, in case of connections issues.\n \"\"\"\n sig_ready = Signal()\n\n def __init__(self, parent, startup):\n QObject.__init__(self)\n self._parent = parent\n self.error = None\n self.latest_release = None\n self.startup = startup\n\n def check_update_available(self, version, releases):\n \"\"\"Checks if there is an update available.\n\n It takes as parameters the current version of Spyder and a list of\n valid cleaned releases in chronological order (what github api returns\n by default). Example: ['2.3.4', '2.3.3' ...]\n \"\"\"\n if is_stable_version(version):\n # Remove non stable versions from the list\n releases = [r for r in releases if is_stable_version(r)]\n\n latest_release = releases[0]\n\n if version.endswith('dev'):\n return (False, latest_release)\n\n return (check_version(version, latest_release, '<'), latest_release)\n\n def start(self):\n \"\"\"Main method of the WorkerUpdates worker\"\"\"\n self.url = 'https://api.github.com/repos/spyder-ide/spyder/releases'\n self.update_available = False\n self.latest_release = __version__\n\n error_msg = None\n\n try:\n if hasattr(ssl, '_create_unverified_context'):\n # Fix for issue # 2685 [Works only with Python >=2.7.9]\n # More info: https://www.python.org/dev/peps/pep-0476/#opting-out\n context = ssl._create_unverified_context()\n page = urlopen(self.url, context=context)\n else:\n page = urlopen(self.url)\n try:\n data = page.read()\n\n # Needed step for python3 compatibility\n if not isinstance(data, str):\n data = data.decode()\n\n data = json.loads(data)\n releases = [item['tag_name'].replace('v', '') for item in data]\n version = __version__\n\n result = self.check_update_available(version, releases)\n self.update_available, self.latest_release = result\n except Exception:\n error_msg = _('Unable to retrieve information.')\n except HTTPError:\n error_msg = _('Unable to retrieve information.')\n except URLError:\n error_msg = _('Unable to connect to the internet. <br><br>Make '\n 'sure the connection is working properly.')\n except Exception:\n error_msg = _('Unable to check for updates.')\n\n # Don't show dialog when starting up spyder and an error occur\n if not (self.startup and error_msg is not None):\n self.error = error_msg\n self.sig_ready.emit()\n", "path": "spyder/workers/updates.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n# Standard library imports\nimport json\nimport os\nimport ssl\nimport sys\n\n# Third party imports\nfrom qtpy.QtCore import QObject, Signal\n\n# Local imports\nfrom spyder import __version__\nfrom spyder.config.base import _\nfrom spyder.py3compat import PY3\nfrom spyder.config.utils import is_anaconda\nfrom spyder.utils.programs import check_version, is_stable_version\n\n\nif PY3:\n from urllib.request import urlopen\n from urllib.error import URLError, HTTPError\nelse:\n from urllib2 import urlopen, URLError, HTTPError\n\n\nclass WorkerUpdates(QObject):\n \"\"\"\n Worker that checks for releases using either the Anaconda\n default channels or the Github Releases page without\n blocking the Spyder user interface, in case of connection\n issues.\n \"\"\"\n sig_ready = Signal()\n\n def __init__(self, parent, startup, version=\"\"):\n QObject.__init__(self)\n self._parent = parent\n self.error = None\n self.latest_release = None\n self.startup = startup\n self.version = version\n\n def check_update_available(self, version, releases, github=False):\n \"\"\"Checks if there is an update available.\n\n It takes as parameters the current version of Spyder and a list of\n valid cleaned releases in chronological order.\n Example: ['2.3.2', '2.3.3' ...] or with github ['2.3.4', '2.3.3' ...]\n \"\"\"\n if is_stable_version(version):\n # Remove non stable versions from the list\n releases = [r for r in releases if is_stable_version(r)]\n\n if github:\n latest_release = releases[0]\n else:\n latest_release = releases[-1]\n\n if version.endswith('dev'):\n return (False, latest_release)\n\n return (check_version(version, latest_release, '<'), latest_release)\n\n def start(self):\n \"\"\"Main method of the WorkerUpdates worker\"\"\"\n if is_anaconda():\n self.url = 'https://repo.anaconda.com/pkgs/main'\n if os.name == 'nt':\n self.url += '/win-64/repodata.json'\n elif sys.platform == 'darwin':\n self.url += '/osx-64/repodata.json'\n else:\n self.url += '/linux-64/repodata.json'\n else:\n self.url = ('https://api.github.com/repos/'\n 'spyder-ide/spyder/releases')\n self.update_available = False\n self.latest_release = __version__\n\n error_msg = None\n\n try:\n if hasattr(ssl, '_create_unverified_context'):\n # Fix for issue # 2685 [Works only with Python >=2.7.9]\n # More info: https://www.python.org/dev/peps/pep-0476/#opting-out\n context = ssl._create_unverified_context()\n page = urlopen(self.url, context=context)\n else:\n page = urlopen(self.url)\n try:\n data = page.read()\n\n # Needed step for python3 compatibility\n if not isinstance(data, str):\n data = data.decode()\n\n data = json.loads(data)\n if not self.version:\n self.version = __version__\n\n if is_anaconda():\n releases = []\n for item in data['packages']:\n if 'spyder' in item and 'spyder-kernels' not in item:\n releases.append(item.split('-')[1])\n result = self.check_update_available(self.version,\n releases)\n else:\n releases = [item['tag_name'].replace('v', '')\n for item in data]\n result = self.check_update_available(self.version,\n releases,\n github=True)\n self.update_available, self.latest_release = result\n except Exception:\n error_msg = _('Unable to retrieve information.')\n except HTTPError:\n error_msg = _('Unable to retrieve information.')\n except URLError:\n error_msg = _('Unable to connect to the internet. <br><br>Make '\n 'sure the connection is working properly.')\n except Exception:\n error_msg = _('Unable to check for updates.')\n\n # Don't show dialog when starting up spyder and an error occur\n if not (self.startup and error_msg is not None):\n self.error = error_msg\n self.sig_ready.emit()\n", "path": "spyder/workers/updates.py"}]} | 1,329 | 964 |
gh_patches_debug_29894 | rasdani/github-patches | git_diff | getpelican__pelican-2415 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Full Atom feeds contain empty summaries
When providing the full content of articles in an Atom feed, the generated file contains an empty `summary` in each entry. It should only contain a `content` and no `summary`.
According to the [Atom RFC](https://tools.ietf.org/html/rfc4287):
> It is advisable that each atom:entry element contain a non-empty atom:title element, a non-empty atom:content element when that element is present, and a non-empty atom:summary element when the entry contains no atom:content element.
The [W3C validator](https://validator.w3.org/feed/) raises a warning when it finds such empty summaries.
Sample configuration:
```python
FEED_DOMAIN = SITEURL
FEED_ALL_ATOM = 'feeds/atom.xml'
FEED_ALL_RSS = 'feeds/rss.xml'
ATOM_FEED_SUMMARY_ONLY = False
RSS_FEED_SUMMARY_ONLY = False
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
SUMMARY_MAX_LENGTH = 0
```
Excerpt of the generated Atom feed:
```xml
<entry>
<title>Foo</title>
<link href="https://foo.com/bar.html" rel="alternate"></link>
<published>2018-03-13T18:27:00+01:00</published>
<updated>2018-03-13T18:27:00+01:00</updated>
<author>
<name>Foo</name>
</author>
<id>https://foo.com/bar.html</id>
<summary type="html"></summary>
<content type="html">This is the content.</content>
</entry>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pelican/writers.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import print_function, unicode_literals, with_statement
3
4 import logging
5 import os
6
7 from feedgenerator import Atom1Feed, Rss201rev2Feed, get_tag_uri
8
9 from jinja2 import Markup
10
11 import six
12 from six.moves.urllib.parse import urljoin
13
14 from pelican import signals
15 from pelican.paginator import Paginator
16 from pelican.utils import (get_relative_path, is_selected_for_writing,
17 path_to_url, sanitised_join, set_date_tzinfo)
18
19 if not six.PY3:
20 from codecs import open
21
22 logger = logging.getLogger(__name__)
23
24
25 class Writer(object):
26
27 def __init__(self, output_path, settings=None):
28 self.output_path = output_path
29 self.reminder = dict()
30 self.settings = settings or {}
31 self._written_files = set()
32 self._overridden_files = set()
33
34 # See Content._link_replacer for details
35 if self.settings['RELATIVE_URLS']:
36 self.urljoiner = os.path.join
37 else:
38 self.urljoiner = lambda base, url: urljoin(
39 base if base.endswith('/') else base + '/', url)
40
41 def _create_new_feed(self, feed_type, feed_title, context):
42 feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed
43 if feed_title:
44 feed_title = context['SITENAME'] + ' - ' + feed_title
45 else:
46 feed_title = context['SITENAME']
47 feed = feed_class(
48 title=Markup(feed_title).striptags(),
49 link=(self.site_url + '/'),
50 feed_url=self.feed_url,
51 description=context.get('SITESUBTITLE', ''),
52 subtitle=context.get('SITESUBTITLE', None))
53 return feed
54
55 def _add_item_to_the_feed(self, feed, item):
56 title = Markup(item.title).striptags()
57 link = self.urljoiner(self.site_url, item.url)
58 is_rss = isinstance(feed, Rss201rev2Feed)
59 if not is_rss or self.settings.get('RSS_FEED_SUMMARY_ONLY'):
60 description = item.summary
61 else:
62 description = item.get_content(self.site_url)
63 feed.add_item(
64 title=title,
65 link=link,
66 unique_id=get_tag_uri(link, item.date),
67 description=description,
68 content=item.get_content(self.site_url),
69 categories=item.tags if hasattr(item, 'tags') else None,
70 author_name=getattr(item, 'author', ''),
71 pubdate=set_date_tzinfo(
72 item.date, self.settings.get('TIMEZONE', None)),
73 updateddate=set_date_tzinfo(
74 item.modified, self.settings.get('TIMEZONE', None)
75 ) if hasattr(item, 'modified') else None)
76
77 def _open_w(self, filename, encoding, override=False):
78 """Open a file to write some content to it.
79
80 Exit if we have already written to that file, unless one (and no more
81 than one) of the writes has the override parameter set to True.
82 """
83 if filename in self._overridden_files:
84 if override:
85 raise RuntimeError('File %s is set to be overridden twice'
86 % filename)
87 else:
88 logger.info('Skipping %s', filename)
89 filename = os.devnull
90 elif filename in self._written_files:
91 if override:
92 logger.info('Overwriting %s', filename)
93 else:
94 raise RuntimeError('File %s is to be overwritten' % filename)
95 if override:
96 self._overridden_files.add(filename)
97 self._written_files.add(filename)
98 return open(filename, 'w', encoding=encoding)
99
100 def write_feed(self, elements, context, path=None, url=None,
101 feed_type='atom', override_output=False, feed_title=None):
102 """Generate a feed with the list of articles provided
103
104 Return the feed. If no path or output_path is specified, just
105 return the feed object.
106
107 :param elements: the articles to put on the feed.
108 :param context: the context to get the feed metadata.
109 :param path: the path to output.
110 :param url: the publicly visible feed URL; if None, path is used
111 instead
112 :param feed_type: the feed type to use (atom or rss)
113 :param override_output: boolean telling if we can override previous
114 output with the same name (and if next files written with the same
115 name should be skipped to keep that one)
116 :param feed_title: the title of the feed.o
117 """
118 if not is_selected_for_writing(self.settings, path):
119 return
120
121 self.site_url = context.get(
122 'SITEURL', path_to_url(get_relative_path(path)))
123
124 self.feed_domain = context.get('FEED_DOMAIN')
125 self.feed_url = self.urljoiner(self.feed_domain, url if url else path)
126
127 feed = self._create_new_feed(feed_type, feed_title, context)
128
129 max_items = len(elements)
130 if self.settings['FEED_MAX_ITEMS']:
131 max_items = min(self.settings['FEED_MAX_ITEMS'], max_items)
132 for i in range(max_items):
133 self._add_item_to_the_feed(feed, elements[i])
134
135 signals.feed_generated.send(context, feed=feed)
136 if path:
137 complete_path = sanitised_join(self.output_path, path)
138
139 try:
140 os.makedirs(os.path.dirname(complete_path))
141 except Exception:
142 pass
143
144 encoding = 'utf-8' if six.PY3 else None
145 with self._open_w(complete_path, encoding, override_output) as fp:
146 feed.write(fp, 'utf-8')
147 logger.info('Writing %s', complete_path)
148
149 signals.feed_written.send(
150 complete_path, context=context, feed=feed)
151 return feed
152
153 def write_file(self, name, template, context, relative_urls=False,
154 paginated=None, template_name=None, override_output=False,
155 url=None, **kwargs):
156 """Render the template and write the file.
157
158 :param name: name of the file to output
159 :param template: template to use to generate the content
160 :param context: dict to pass to the templates.
161 :param relative_urls: use relative urls or absolutes ones
162 :param paginated: dict of article list to paginate - must have the
163 same length (same list in different orders)
164 :param template_name: the template name, for pagination
165 :param override_output: boolean telling if we can override previous
166 output with the same name (and if next files written with the same
167 name should be skipped to keep that one)
168 :param url: url of the file (needed by the paginator)
169 :param **kwargs: additional variables to pass to the templates
170 """
171
172 if name is False or \
173 name == "" or \
174 not is_selected_for_writing(self.settings,
175 os.path.join(self.output_path, name)):
176 return
177 elif not name:
178 # other stuff, just return for now
179 return
180
181 def _write_file(template, localcontext, output_path, name, override):
182 """Render the template write the file."""
183 # set localsiteurl for context so that Contents can adjust links
184 if localcontext['localsiteurl']:
185 context['localsiteurl'] = localcontext['localsiteurl']
186 output = template.render(localcontext)
187 path = sanitised_join(output_path, name)
188
189 try:
190 os.makedirs(os.path.dirname(path))
191 except Exception:
192 pass
193
194 with self._open_w(path, 'utf-8', override=override) as f:
195 f.write(output)
196 logger.info('Writing %s', path)
197
198 # Send a signal to say we're writing a file with some specific
199 # local context.
200 signals.content_written.send(path, context=localcontext)
201
202 def _get_localcontext(context, name, kwargs, relative_urls):
203 localcontext = context.copy()
204 localcontext['localsiteurl'] = localcontext.get(
205 'localsiteurl', None)
206 if relative_urls:
207 relative_url = path_to_url(get_relative_path(name))
208 localcontext['SITEURL'] = relative_url
209 localcontext['localsiteurl'] = relative_url
210 localcontext['output_file'] = name
211 localcontext.update(kwargs)
212 return localcontext
213
214 if paginated is None:
215 paginated = {key: val for key, val in kwargs.items()
216 if key in {'articles', 'dates'}}
217
218 # pagination
219 if paginated and template_name in self.settings['PAGINATED_TEMPLATES']:
220 # pagination needed
221 per_page = self.settings['PAGINATED_TEMPLATES'][template_name] \
222 or self.settings['DEFAULT_PAGINATION']
223
224 # init paginators
225 paginators = {key: Paginator(name, url, val, self.settings,
226 per_page)
227 for key, val in paginated.items()}
228
229 # generated pages, and write
230 for page_num in range(list(paginators.values())[0].num_pages):
231 paginated_kwargs = kwargs.copy()
232 for key in paginators.keys():
233 paginator = paginators[key]
234 previous_page = paginator.page(page_num) \
235 if page_num > 0 else None
236 page = paginator.page(page_num + 1)
237 next_page = paginator.page(page_num + 2) \
238 if page_num + 1 < paginator.num_pages else None
239 paginated_kwargs.update(
240 {'%s_paginator' % key: paginator,
241 '%s_page' % key: page,
242 '%s_previous_page' % key: previous_page,
243 '%s_next_page' % key: next_page})
244
245 localcontext = _get_localcontext(
246 context, page.save_as, paginated_kwargs, relative_urls)
247 _write_file(template, localcontext, self.output_path,
248 page.save_as, override_output)
249 else:
250 # no pagination
251 localcontext = _get_localcontext(
252 context, name, kwargs, relative_urls)
253 _write_file(template, localcontext, self.output_path, name,
254 override_output)
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pelican/writers.py b/pelican/writers.py
--- a/pelican/writers.py
+++ b/pelican/writers.py
@@ -55,17 +55,35 @@
def _add_item_to_the_feed(self, feed, item):
title = Markup(item.title).striptags()
link = self.urljoiner(self.site_url, item.url)
- is_rss = isinstance(feed, Rss201rev2Feed)
- if not is_rss or self.settings.get('RSS_FEED_SUMMARY_ONLY'):
- description = item.summary
+
+ if isinstance(feed, Rss201rev2Feed):
+ # RSS feeds use a single tag called 'description' for both the full
+ # content and the summary
+ content = None
+ if self.settings.get('RSS_FEED_SUMMARY_ONLY'):
+ description = item.summary
+ else:
+ description = item.get_content(self.site_url)
+
else:
- description = item.get_content(self.site_url)
+ # Atom feeds have two different tags for full content (called
+ # 'content' by feedgenerator) and summary (called 'description' by
+ # feedgenerator).
+ #
+ # It does not make sense to have the summary be the
+ # exact same thing as the full content. If we detect that
+ # they are we just remove the summary.
+ content = item.get_content(self.site_url)
+ description = item.summary
+ if description == content:
+ description = None
+
feed.add_item(
title=title,
link=link,
unique_id=get_tag_uri(link, item.date),
description=description,
- content=item.get_content(self.site_url),
+ content=content,
categories=item.tags if hasattr(item, 'tags') else None,
author_name=getattr(item, 'author', ''),
pubdate=set_date_tzinfo(
| {"golden_diff": "diff --git a/pelican/writers.py b/pelican/writers.py\n--- a/pelican/writers.py\n+++ b/pelican/writers.py\n@@ -55,17 +55,35 @@\n def _add_item_to_the_feed(self, feed, item):\n title = Markup(item.title).striptags()\n link = self.urljoiner(self.site_url, item.url)\n- is_rss = isinstance(feed, Rss201rev2Feed)\n- if not is_rss or self.settings.get('RSS_FEED_SUMMARY_ONLY'):\n- description = item.summary\n+\n+ if isinstance(feed, Rss201rev2Feed):\n+ # RSS feeds use a single tag called 'description' for both the full\n+ # content and the summary\n+ content = None\n+ if self.settings.get('RSS_FEED_SUMMARY_ONLY'):\n+ description = item.summary\n+ else:\n+ description = item.get_content(self.site_url)\n+\n else:\n- description = item.get_content(self.site_url)\n+ # Atom feeds have two different tags for full content (called\n+ # 'content' by feedgenerator) and summary (called 'description' by\n+ # feedgenerator).\n+ #\n+ # It does not make sense to have the summary be the\n+ # exact same thing as the full content. If we detect that\n+ # they are we just remove the summary.\n+ content = item.get_content(self.site_url)\n+ description = item.summary\n+ if description == content:\n+ description = None\n+\n feed.add_item(\n title=title,\n link=link,\n unique_id=get_tag_uri(link, item.date),\n description=description,\n- content=item.get_content(self.site_url),\n+ content=content,\n categories=item.tags if hasattr(item, 'tags') else None,\n author_name=getattr(item, 'author', ''),\n pubdate=set_date_tzinfo(\n", "issue": "Full Atom feeds contain empty summaries\nWhen providing the full content of articles in an Atom feed, the generated file contains an empty `summary` in each entry. It should only contain a `content` and no `summary`.\r\n\r\nAccording to the [Atom RFC](https://tools.ietf.org/html/rfc4287):\r\n\r\n> It is advisable that each atom:entry element contain a non-empty atom:title element, a non-empty atom:content element when that element is present, and a non-empty atom:summary element when the entry contains no atom:content element.\r\n\r\nThe [W3C validator](https://validator.w3.org/feed/) raises a warning when it finds such empty summaries.\r\n\r\nSample configuration:\r\n\r\n```python\r\nFEED_DOMAIN = SITEURL\r\nFEED_ALL_ATOM = 'feeds/atom.xml'\r\nFEED_ALL_RSS = 'feeds/rss.xml'\r\nATOM_FEED_SUMMARY_ONLY = False\r\nRSS_FEED_SUMMARY_ONLY = False\r\nCATEGORY_FEED_ATOM = None\r\nTRANSLATION_FEED_ATOM = None\r\nAUTHOR_FEED_ATOM = None\r\nAUTHOR_FEED_RSS = None\r\nSUMMARY_MAX_LENGTH = 0\r\n```\r\n\r\nExcerpt of the generated Atom feed:\r\n```xml\r\n <entry>\r\n <title>Foo</title>\r\n <link href=\"https://foo.com/bar.html\" rel=\"alternate\"></link>\r\n <published>2018-03-13T18:27:00+01:00</published>\r\n <updated>2018-03-13T18:27:00+01:00</updated>\r\n <author>\r\n <name>Foo</name>\r\n </author>\r\n <id>https://foo.com/bar.html</id>\r\n <summary type=\"html\"></summary>\r\n <content type=\"html\">This is the content.</content>\r\n </entry>\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, unicode_literals, with_statement\n\nimport logging\nimport os\n\nfrom feedgenerator import Atom1Feed, Rss201rev2Feed, get_tag_uri\n\nfrom jinja2 import Markup\n\nimport six\nfrom six.moves.urllib.parse import urljoin\n\nfrom pelican import signals\nfrom pelican.paginator import Paginator\nfrom pelican.utils import (get_relative_path, is_selected_for_writing,\n path_to_url, sanitised_join, set_date_tzinfo)\n\nif not six.PY3:\n from codecs import open\n\nlogger = logging.getLogger(__name__)\n\n\nclass Writer(object):\n\n def __init__(self, output_path, settings=None):\n self.output_path = output_path\n self.reminder = dict()\n self.settings = settings or {}\n self._written_files = set()\n self._overridden_files = set()\n\n # See Content._link_replacer for details\n if self.settings['RELATIVE_URLS']:\n self.urljoiner = os.path.join\n else:\n self.urljoiner = lambda base, url: urljoin(\n base if base.endswith('/') else base + '/', url)\n\n def _create_new_feed(self, feed_type, feed_title, context):\n feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed\n if feed_title:\n feed_title = context['SITENAME'] + ' - ' + feed_title\n else:\n feed_title = context['SITENAME']\n feed = feed_class(\n title=Markup(feed_title).striptags(),\n link=(self.site_url + '/'),\n feed_url=self.feed_url,\n description=context.get('SITESUBTITLE', ''),\n subtitle=context.get('SITESUBTITLE', None))\n return feed\n\n def _add_item_to_the_feed(self, feed, item):\n title = Markup(item.title).striptags()\n link = self.urljoiner(self.site_url, item.url)\n is_rss = isinstance(feed, Rss201rev2Feed)\n if not is_rss or self.settings.get('RSS_FEED_SUMMARY_ONLY'):\n description = item.summary\n else:\n description = item.get_content(self.site_url)\n feed.add_item(\n title=title,\n link=link,\n unique_id=get_tag_uri(link, item.date),\n description=description,\n content=item.get_content(self.site_url),\n categories=item.tags if hasattr(item, 'tags') else None,\n author_name=getattr(item, 'author', ''),\n pubdate=set_date_tzinfo(\n item.date, self.settings.get('TIMEZONE', None)),\n updateddate=set_date_tzinfo(\n item.modified, self.settings.get('TIMEZONE', None)\n ) if hasattr(item, 'modified') else None)\n\n def _open_w(self, filename, encoding, override=False):\n \"\"\"Open a file to write some content to it.\n\n Exit if we have already written to that file, unless one (and no more\n than one) of the writes has the override parameter set to True.\n \"\"\"\n if filename in self._overridden_files:\n if override:\n raise RuntimeError('File %s is set to be overridden twice'\n % filename)\n else:\n logger.info('Skipping %s', filename)\n filename = os.devnull\n elif filename in self._written_files:\n if override:\n logger.info('Overwriting %s', filename)\n else:\n raise RuntimeError('File %s is to be overwritten' % filename)\n if override:\n self._overridden_files.add(filename)\n self._written_files.add(filename)\n return open(filename, 'w', encoding=encoding)\n\n def write_feed(self, elements, context, path=None, url=None,\n feed_type='atom', override_output=False, feed_title=None):\n \"\"\"Generate a feed with the list of articles provided\n\n Return the feed. If no path or output_path is specified, just\n return the feed object.\n\n :param elements: the articles to put on the feed.\n :param context: the context to get the feed metadata.\n :param path: the path to output.\n :param url: the publicly visible feed URL; if None, path is used\n instead\n :param feed_type: the feed type to use (atom or rss)\n :param override_output: boolean telling if we can override previous\n output with the same name (and if next files written with the same\n name should be skipped to keep that one)\n :param feed_title: the title of the feed.o\n \"\"\"\n if not is_selected_for_writing(self.settings, path):\n return\n\n self.site_url = context.get(\n 'SITEURL', path_to_url(get_relative_path(path)))\n\n self.feed_domain = context.get('FEED_DOMAIN')\n self.feed_url = self.urljoiner(self.feed_domain, url if url else path)\n\n feed = self._create_new_feed(feed_type, feed_title, context)\n\n max_items = len(elements)\n if self.settings['FEED_MAX_ITEMS']:\n max_items = min(self.settings['FEED_MAX_ITEMS'], max_items)\n for i in range(max_items):\n self._add_item_to_the_feed(feed, elements[i])\n\n signals.feed_generated.send(context, feed=feed)\n if path:\n complete_path = sanitised_join(self.output_path, path)\n\n try:\n os.makedirs(os.path.dirname(complete_path))\n except Exception:\n pass\n\n encoding = 'utf-8' if six.PY3 else None\n with self._open_w(complete_path, encoding, override_output) as fp:\n feed.write(fp, 'utf-8')\n logger.info('Writing %s', complete_path)\n\n signals.feed_written.send(\n complete_path, context=context, feed=feed)\n return feed\n\n def write_file(self, name, template, context, relative_urls=False,\n paginated=None, template_name=None, override_output=False,\n url=None, **kwargs):\n \"\"\"Render the template and write the file.\n\n :param name: name of the file to output\n :param template: template to use to generate the content\n :param context: dict to pass to the templates.\n :param relative_urls: use relative urls or absolutes ones\n :param paginated: dict of article list to paginate - must have the\n same length (same list in different orders)\n :param template_name: the template name, for pagination\n :param override_output: boolean telling if we can override previous\n output with the same name (and if next files written with the same\n name should be skipped to keep that one)\n :param url: url of the file (needed by the paginator)\n :param **kwargs: additional variables to pass to the templates\n \"\"\"\n\n if name is False or \\\n name == \"\" or \\\n not is_selected_for_writing(self.settings,\n os.path.join(self.output_path, name)):\n return\n elif not name:\n # other stuff, just return for now\n return\n\n def _write_file(template, localcontext, output_path, name, override):\n \"\"\"Render the template write the file.\"\"\"\n # set localsiteurl for context so that Contents can adjust links\n if localcontext['localsiteurl']:\n context['localsiteurl'] = localcontext['localsiteurl']\n output = template.render(localcontext)\n path = sanitised_join(output_path, name)\n\n try:\n os.makedirs(os.path.dirname(path))\n except Exception:\n pass\n\n with self._open_w(path, 'utf-8', override=override) as f:\n f.write(output)\n logger.info('Writing %s', path)\n\n # Send a signal to say we're writing a file with some specific\n # local context.\n signals.content_written.send(path, context=localcontext)\n\n def _get_localcontext(context, name, kwargs, relative_urls):\n localcontext = context.copy()\n localcontext['localsiteurl'] = localcontext.get(\n 'localsiteurl', None)\n if relative_urls:\n relative_url = path_to_url(get_relative_path(name))\n localcontext['SITEURL'] = relative_url\n localcontext['localsiteurl'] = relative_url\n localcontext['output_file'] = name\n localcontext.update(kwargs)\n return localcontext\n\n if paginated is None:\n paginated = {key: val for key, val in kwargs.items()\n if key in {'articles', 'dates'}}\n\n # pagination\n if paginated and template_name in self.settings['PAGINATED_TEMPLATES']:\n # pagination needed\n per_page = self.settings['PAGINATED_TEMPLATES'][template_name] \\\n or self.settings['DEFAULT_PAGINATION']\n\n # init paginators\n paginators = {key: Paginator(name, url, val, self.settings,\n per_page)\n for key, val in paginated.items()}\n\n # generated pages, and write\n for page_num in range(list(paginators.values())[0].num_pages):\n paginated_kwargs = kwargs.copy()\n for key in paginators.keys():\n paginator = paginators[key]\n previous_page = paginator.page(page_num) \\\n if page_num > 0 else None\n page = paginator.page(page_num + 1)\n next_page = paginator.page(page_num + 2) \\\n if page_num + 1 < paginator.num_pages else None\n paginated_kwargs.update(\n {'%s_paginator' % key: paginator,\n '%s_page' % key: page,\n '%s_previous_page' % key: previous_page,\n '%s_next_page' % key: next_page})\n\n localcontext = _get_localcontext(\n context, page.save_as, paginated_kwargs, relative_urls)\n _write_file(template, localcontext, self.output_path,\n page.save_as, override_output)\n else:\n # no pagination\n localcontext = _get_localcontext(\n context, name, kwargs, relative_urls)\n _write_file(template, localcontext, self.output_path, name,\n override_output)\n", "path": "pelican/writers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, unicode_literals, with_statement\n\nimport logging\nimport os\n\nfrom feedgenerator import Atom1Feed, Rss201rev2Feed, get_tag_uri\n\nfrom jinja2 import Markup\n\nimport six\nfrom six.moves.urllib.parse import urljoin\n\nfrom pelican import signals\nfrom pelican.paginator import Paginator\nfrom pelican.utils import (get_relative_path, is_selected_for_writing,\n path_to_url, sanitised_join, set_date_tzinfo)\n\nif not six.PY3:\n from codecs import open\n\nlogger = logging.getLogger(__name__)\n\n\nclass Writer(object):\n\n def __init__(self, output_path, settings=None):\n self.output_path = output_path\n self.reminder = dict()\n self.settings = settings or {}\n self._written_files = set()\n self._overridden_files = set()\n\n # See Content._link_replacer for details\n if self.settings['RELATIVE_URLS']:\n self.urljoiner = os.path.join\n else:\n self.urljoiner = lambda base, url: urljoin(\n base if base.endswith('/') else base + '/', url)\n\n def _create_new_feed(self, feed_type, feed_title, context):\n feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed\n if feed_title:\n feed_title = context['SITENAME'] + ' - ' + feed_title\n else:\n feed_title = context['SITENAME']\n feed = feed_class(\n title=Markup(feed_title).striptags(),\n link=(self.site_url + '/'),\n feed_url=self.feed_url,\n description=context.get('SITESUBTITLE', ''),\n subtitle=context.get('SITESUBTITLE', None))\n return feed\n\n def _add_item_to_the_feed(self, feed, item):\n title = Markup(item.title).striptags()\n link = self.urljoiner(self.site_url, item.url)\n\n if isinstance(feed, Rss201rev2Feed):\n # RSS feeds use a single tag called 'description' for both the full\n # content and the summary\n content = None\n if self.settings.get('RSS_FEED_SUMMARY_ONLY'):\n description = item.summary\n else:\n description = item.get_content(self.site_url)\n\n else:\n # Atom feeds have two different tags for full content (called\n # 'content' by feedgenerator) and summary (called 'description' by\n # feedgenerator).\n #\n # It does not make sense to have the summary be the\n # exact same thing as the full content. If we detect that\n # they are we just remove the summary.\n content = item.get_content(self.site_url)\n description = item.summary\n if description == content:\n description = None\n\n feed.add_item(\n title=title,\n link=link,\n unique_id=get_tag_uri(link, item.date),\n description=description,\n content=content,\n categories=item.tags if hasattr(item, 'tags') else None,\n author_name=getattr(item, 'author', ''),\n pubdate=set_date_tzinfo(\n item.date, self.settings.get('TIMEZONE', None)),\n updateddate=set_date_tzinfo(\n item.modified, self.settings.get('TIMEZONE', None)\n ) if hasattr(item, 'modified') else None)\n\n def _open_w(self, filename, encoding, override=False):\n \"\"\"Open a file to write some content to it.\n\n Exit if we have already written to that file, unless one (and no more\n than one) of the writes has the override parameter set to True.\n \"\"\"\n if filename in self._overridden_files:\n if override:\n raise RuntimeError('File %s is set to be overridden twice'\n % filename)\n else:\n logger.info('Skipping %s', filename)\n filename = os.devnull\n elif filename in self._written_files:\n if override:\n logger.info('Overwriting %s', filename)\n else:\n raise RuntimeError('File %s is to be overwritten' % filename)\n if override:\n self._overridden_files.add(filename)\n self._written_files.add(filename)\n return open(filename, 'w', encoding=encoding)\n\n def write_feed(self, elements, context, path=None, url=None,\n feed_type='atom', override_output=False, feed_title=None):\n \"\"\"Generate a feed with the list of articles provided\n\n Return the feed. If no path or output_path is specified, just\n return the feed object.\n\n :param elements: the articles to put on the feed.\n :param context: the context to get the feed metadata.\n :param path: the path to output.\n :param url: the publicly visible feed URL; if None, path is used\n instead\n :param feed_type: the feed type to use (atom or rss)\n :param override_output: boolean telling if we can override previous\n output with the same name (and if next files written with the same\n name should be skipped to keep that one)\n :param feed_title: the title of the feed.o\n \"\"\"\n if not is_selected_for_writing(self.settings, path):\n return\n\n self.site_url = context.get(\n 'SITEURL', path_to_url(get_relative_path(path)))\n\n self.feed_domain = context.get('FEED_DOMAIN')\n self.feed_url = self.urljoiner(self.feed_domain, url if url else path)\n\n feed = self._create_new_feed(feed_type, feed_title, context)\n\n max_items = len(elements)\n if self.settings['FEED_MAX_ITEMS']:\n max_items = min(self.settings['FEED_MAX_ITEMS'], max_items)\n for i in range(max_items):\n self._add_item_to_the_feed(feed, elements[i])\n\n signals.feed_generated.send(context, feed=feed)\n if path:\n complete_path = sanitised_join(self.output_path, path)\n\n try:\n os.makedirs(os.path.dirname(complete_path))\n except Exception:\n pass\n\n encoding = 'utf-8' if six.PY3 else None\n with self._open_w(complete_path, encoding, override_output) as fp:\n feed.write(fp, 'utf-8')\n logger.info('Writing %s', complete_path)\n\n signals.feed_written.send(\n complete_path, context=context, feed=feed)\n return feed\n\n def write_file(self, name, template, context, relative_urls=False,\n paginated=None, template_name=None, override_output=False,\n url=None, **kwargs):\n \"\"\"Render the template and write the file.\n\n :param name: name of the file to output\n :param template: template to use to generate the content\n :param context: dict to pass to the templates.\n :param relative_urls: use relative urls or absolutes ones\n :param paginated: dict of article list to paginate - must have the\n same length (same list in different orders)\n :param template_name: the template name, for pagination\n :param override_output: boolean telling if we can override previous\n output with the same name (and if next files written with the same\n name should be skipped to keep that one)\n :param url: url of the file (needed by the paginator)\n :param **kwargs: additional variables to pass to the templates\n \"\"\"\n\n if name is False or \\\n name == \"\" or \\\n not is_selected_for_writing(self.settings,\n os.path.join(self.output_path, name)):\n return\n elif not name:\n # other stuff, just return for now\n return\n\n def _write_file(template, localcontext, output_path, name, override):\n \"\"\"Render the template write the file.\"\"\"\n # set localsiteurl for context so that Contents can adjust links\n if localcontext['localsiteurl']:\n context['localsiteurl'] = localcontext['localsiteurl']\n output = template.render(localcontext)\n path = sanitised_join(output_path, name)\n\n try:\n os.makedirs(os.path.dirname(path))\n except Exception:\n pass\n\n with self._open_w(path, 'utf-8', override=override) as f:\n f.write(output)\n logger.info('Writing %s', path)\n\n # Send a signal to say we're writing a file with some specific\n # local context.\n signals.content_written.send(path, context=localcontext)\n\n def _get_localcontext(context, name, kwargs, relative_urls):\n localcontext = context.copy()\n localcontext['localsiteurl'] = localcontext.get(\n 'localsiteurl', None)\n if relative_urls:\n relative_url = path_to_url(get_relative_path(name))\n localcontext['SITEURL'] = relative_url\n localcontext['localsiteurl'] = relative_url\n localcontext['output_file'] = name\n localcontext.update(kwargs)\n return localcontext\n\n if paginated is None:\n paginated = {key: val for key, val in kwargs.items()\n if key in {'articles', 'dates'}}\n\n # pagination\n if paginated and template_name in self.settings['PAGINATED_TEMPLATES']:\n # pagination needed\n per_page = self.settings['PAGINATED_TEMPLATES'][template_name] \\\n or self.settings['DEFAULT_PAGINATION']\n\n # init paginators\n paginators = {key: Paginator(name, url, val, self.settings,\n per_page)\n for key, val in paginated.items()}\n\n # generated pages, and write\n for page_num in range(list(paginators.values())[0].num_pages):\n paginated_kwargs = kwargs.copy()\n for key in paginators.keys():\n paginator = paginators[key]\n previous_page = paginator.page(page_num) \\\n if page_num > 0 else None\n page = paginator.page(page_num + 1)\n next_page = paginator.page(page_num + 2) \\\n if page_num + 1 < paginator.num_pages else None\n paginated_kwargs.update(\n {'%s_paginator' % key: paginator,\n '%s_page' % key: page,\n '%s_previous_page' % key: previous_page,\n '%s_next_page' % key: next_page})\n\n localcontext = _get_localcontext(\n context, page.save_as, paginated_kwargs, relative_urls)\n _write_file(template, localcontext, self.output_path,\n page.save_as, override_output)\n else:\n # no pagination\n localcontext = _get_localcontext(\n context, name, kwargs, relative_urls)\n _write_file(template, localcontext, self.output_path, name,\n override_output)\n", "path": "pelican/writers.py"}]} | 3,503 | 427 |
gh_patches_debug_25229 | rasdani/github-patches | git_diff | microsoft__AzureTRE-217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Task] Change location to azure_location for porter parameters from API
**Description**
Change location to azure_location for porter parameters from API - as location appears to be a reserved word for porter
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `management_api_app/db/repositories/workspaces.py`
Content:
```
1 import uuid
2 from typing import List
3
4 from azure.cosmos import CosmosClient
5 from pydantic import UUID4
6
7 from core import config
8 from db.errors import EntityDoesNotExist
9 from db.repositories.base import BaseRepository
10 from db.repositories.workspace_templates import WorkspaceTemplateRepository
11 from models.domain.resource import Status
12 from models.domain.workspace import Workspace
13 from models.schemas.workspace import WorkspaceInCreate
14
15
16 class WorkspaceRepository(BaseRepository):
17 def __init__(self, client: CosmosClient):
18 super().__init__(client, config.STATE_STORE_RESOURCES_CONTAINER)
19
20 @staticmethod
21 def _active_workspaces_query():
22 return 'SELECT * FROM c WHERE c.resourceType = "workspace" AND c.isDeleted = false'
23
24 def _get_template_version(self, template_name):
25 workspace_template_repo = WorkspaceTemplateRepository(self._client)
26 template = workspace_template_repo.get_current_workspace_template_by_name(template_name)
27 return template["version"]
28
29 def get_all_active_workspaces(self) -> List[Workspace]:
30 query = self._active_workspaces_query()
31 return self.query(query=query)
32
33 def get_workspace_by_workspace_id(self, workspace_id: UUID4) -> Workspace:
34 query = self._active_workspaces_query() + f' AND c.id="{workspace_id}"'
35 workspaces = self.query(query=query)
36 if not workspaces:
37 raise EntityDoesNotExist
38 return workspaces[0]
39
40 def create_workspace_item(self, workspace_create: WorkspaceInCreate) -> Workspace:
41 full_workspace_id = str(uuid.uuid4())
42
43 try:
44 template_version = self._get_template_version(workspace_create.workspaceType)
45 except EntityDoesNotExist:
46 raise ValueError(f"The workspace type '{workspace_create.workspaceType}' does not exist")
47
48 resource_spec_parameters = {
49 "location": config.RESOURCE_LOCATION,
50 "workspace_id": full_workspace_id[-4:],
51 "tre_id": config.TRE_ID,
52 "address_space": "10.2.1.0/24" # TODO: Calculate this value - Issue #52
53 }
54
55 workspace = Workspace(
56 id=full_workspace_id,
57 displayName=workspace_create.displayName,
58 description=workspace_create.description,
59 resourceTemplateName=workspace_create.workspaceType,
60 resourceTemplateVersion=template_version,
61 resourceTemplateParameters=resource_spec_parameters,
62 status=Status.NotDeployed
63 )
64
65 return workspace
66
67 def save_workspace(self, workspace: Workspace):
68 self.create_item(workspace)
69
```
Path: `management_api_app/models/schemas/workspace_template.py`
Content:
```
1 from typing import List
2 from pydantic import BaseModel, Field
3
4 from models.domain.resource import ResourceType
5 from models.domain.resource_template import ResourceTemplate, Parameter
6
7
8 def get_sample_workspace_template_object(template_name: str = "tre-workspace-vanilla") -> ResourceTemplate:
9 return ResourceTemplate(
10 id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb",
11 name=template_name,
12 description="vanilla workspace bundle",
13 version="0.1.0",
14 parameters=[
15 Parameter(name="location", type="string"),
16 Parameter(name="tre_id", type="string"),
17 Parameter(name="workspace_id", type="string"),
18 Parameter(name="address_space", type="string", default="10.2.1.0/24", description="VNet address space for the workspace services")
19 ],
20 resourceType=ResourceType.Workspace,
21 current=True,
22 )
23
24
25 def get_sample_workspace_template() -> dict:
26 return get_sample_workspace_template_object().dict()
27
28
29 class WorkspaceTemplateNamesInList(BaseModel):
30 templateNames: List[str]
31
32 class Config:
33 schema_extra = {
34 "example": {
35 "templateNames": ["tre-workspace-vanilla", "tre-workspace-base"]
36 }
37 }
38
39
40 class WorkspaceTemplateInCreate(BaseModel):
41
42 name: str = Field(title="Name of workspace template")
43 version: str = Field(title="Version of workspace template")
44 description: str = Field(title=" Description of workspace template")
45 properties: dict = Field({}, title="Workspace template properties",
46 description="Values for the properties required by the workspace template")
47 resourceType: str = Field(title="Type of workspace template")
48 current: bool = Field(title="Mark this version as current")
49
50 class Config:
51 schema_extra = {
52 "example": {
53 "name": "My workspace template",
54 "version": "0.0.1",
55 "description": "workspace template for great product",
56 "properties": {},
57 "resourceType": "workspace",
58 "current": "true"
59 }
60 }
61
62
63 class WorkspaceTemplateIdInResponse(BaseModel):
64 resourceTemplateId: str
65
66
67 class WorkspaceTemplateInResponse(BaseModel):
68 workspaceTemplate: ResourceTemplate
69
70 class Config:
71 schema_extra = {
72 "example": {
73 "resourceTemplateId": "49a7445c-aae6-41ec-a539-30dfa90ab1ae",
74 "workspaceTemplate": get_sample_workspace_template()
75 }
76 }
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/management_api_app/db/repositories/workspaces.py b/management_api_app/db/repositories/workspaces.py
--- a/management_api_app/db/repositories/workspaces.py
+++ b/management_api_app/db/repositories/workspaces.py
@@ -46,7 +46,7 @@
raise ValueError(f"The workspace type '{workspace_create.workspaceType}' does not exist")
resource_spec_parameters = {
- "location": config.RESOURCE_LOCATION,
+ "azure_location": config.RESOURCE_LOCATION,
"workspace_id": full_workspace_id[-4:],
"tre_id": config.TRE_ID,
"address_space": "10.2.1.0/24" # TODO: Calculate this value - Issue #52
diff --git a/management_api_app/models/schemas/workspace_template.py b/management_api_app/models/schemas/workspace_template.py
--- a/management_api_app/models/schemas/workspace_template.py
+++ b/management_api_app/models/schemas/workspace_template.py
@@ -12,7 +12,7 @@
description="vanilla workspace bundle",
version="0.1.0",
parameters=[
- Parameter(name="location", type="string"),
+ Parameter(name="azure_location", type="string"),
Parameter(name="tre_id", type="string"),
Parameter(name="workspace_id", type="string"),
Parameter(name="address_space", type="string", default="10.2.1.0/24", description="VNet address space for the workspace services")
| {"golden_diff": "diff --git a/management_api_app/db/repositories/workspaces.py b/management_api_app/db/repositories/workspaces.py\n--- a/management_api_app/db/repositories/workspaces.py\n+++ b/management_api_app/db/repositories/workspaces.py\n@@ -46,7 +46,7 @@\n raise ValueError(f\"The workspace type '{workspace_create.workspaceType}' does not exist\")\n \n resource_spec_parameters = {\n- \"location\": config.RESOURCE_LOCATION,\n+ \"azure_location\": config.RESOURCE_LOCATION,\n \"workspace_id\": full_workspace_id[-4:],\n \"tre_id\": config.TRE_ID,\n \"address_space\": \"10.2.1.0/24\" # TODO: Calculate this value - Issue #52\ndiff --git a/management_api_app/models/schemas/workspace_template.py b/management_api_app/models/schemas/workspace_template.py\n--- a/management_api_app/models/schemas/workspace_template.py\n+++ b/management_api_app/models/schemas/workspace_template.py\n@@ -12,7 +12,7 @@\n description=\"vanilla workspace bundle\",\n version=\"0.1.0\",\n parameters=[\n- Parameter(name=\"location\", type=\"string\"),\n+ Parameter(name=\"azure_location\", type=\"string\"),\n Parameter(name=\"tre_id\", type=\"string\"),\n Parameter(name=\"workspace_id\", type=\"string\"),\n Parameter(name=\"address_space\", type=\"string\", default=\"10.2.1.0/24\", description=\"VNet address space for the workspace services\")\n", "issue": "[Task] Change location to azure_location for porter parameters from API\n**Description**\r\n \r\nChange location to azure_location for porter parameters from API - as location appears to be a reserved word for porter\r\n\r\n\n", "before_files": [{"content": "import uuid\nfrom typing import List\n\nfrom azure.cosmos import CosmosClient\nfrom pydantic import UUID4\n\nfrom core import config\nfrom db.errors import EntityDoesNotExist\nfrom db.repositories.base import BaseRepository\nfrom db.repositories.workspace_templates import WorkspaceTemplateRepository\nfrom models.domain.resource import Status\nfrom models.domain.workspace import Workspace\nfrom models.schemas.workspace import WorkspaceInCreate\n\n\nclass WorkspaceRepository(BaseRepository):\n def __init__(self, client: CosmosClient):\n super().__init__(client, config.STATE_STORE_RESOURCES_CONTAINER)\n\n @staticmethod\n def _active_workspaces_query():\n return 'SELECT * FROM c WHERE c.resourceType = \"workspace\" AND c.isDeleted = false'\n\n def _get_template_version(self, template_name):\n workspace_template_repo = WorkspaceTemplateRepository(self._client)\n template = workspace_template_repo.get_current_workspace_template_by_name(template_name)\n return template[\"version\"]\n\n def get_all_active_workspaces(self) -> List[Workspace]:\n query = self._active_workspaces_query()\n return self.query(query=query)\n\n def get_workspace_by_workspace_id(self, workspace_id: UUID4) -> Workspace:\n query = self._active_workspaces_query() + f' AND c.id=\"{workspace_id}\"'\n workspaces = self.query(query=query)\n if not workspaces:\n raise EntityDoesNotExist\n return workspaces[0]\n\n def create_workspace_item(self, workspace_create: WorkspaceInCreate) -> Workspace:\n full_workspace_id = str(uuid.uuid4())\n\n try:\n template_version = self._get_template_version(workspace_create.workspaceType)\n except EntityDoesNotExist:\n raise ValueError(f\"The workspace type '{workspace_create.workspaceType}' does not exist\")\n\n resource_spec_parameters = {\n \"location\": config.RESOURCE_LOCATION,\n \"workspace_id\": full_workspace_id[-4:],\n \"tre_id\": config.TRE_ID,\n \"address_space\": \"10.2.1.0/24\" # TODO: Calculate this value - Issue #52\n }\n\n workspace = Workspace(\n id=full_workspace_id,\n displayName=workspace_create.displayName,\n description=workspace_create.description,\n resourceTemplateName=workspace_create.workspaceType,\n resourceTemplateVersion=template_version,\n resourceTemplateParameters=resource_spec_parameters,\n status=Status.NotDeployed\n )\n\n return workspace\n\n def save_workspace(self, workspace: Workspace):\n self.create_item(workspace)\n", "path": "management_api_app/db/repositories/workspaces.py"}, {"content": "from typing import List\nfrom pydantic import BaseModel, Field\n\nfrom models.domain.resource import ResourceType\nfrom models.domain.resource_template import ResourceTemplate, Parameter\n\n\ndef get_sample_workspace_template_object(template_name: str = \"tre-workspace-vanilla\") -> ResourceTemplate:\n return ResourceTemplate(\n id=\"a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb\",\n name=template_name,\n description=\"vanilla workspace bundle\",\n version=\"0.1.0\",\n parameters=[\n Parameter(name=\"location\", type=\"string\"),\n Parameter(name=\"tre_id\", type=\"string\"),\n Parameter(name=\"workspace_id\", type=\"string\"),\n Parameter(name=\"address_space\", type=\"string\", default=\"10.2.1.0/24\", description=\"VNet address space for the workspace services\")\n ],\n resourceType=ResourceType.Workspace,\n current=True,\n )\n\n\ndef get_sample_workspace_template() -> dict:\n return get_sample_workspace_template_object().dict()\n\n\nclass WorkspaceTemplateNamesInList(BaseModel):\n templateNames: List[str]\n\n class Config:\n schema_extra = {\n \"example\": {\n \"templateNames\": [\"tre-workspace-vanilla\", \"tre-workspace-base\"]\n }\n }\n\n\nclass WorkspaceTemplateInCreate(BaseModel):\n\n name: str = Field(title=\"Name of workspace template\")\n version: str = Field(title=\"Version of workspace template\")\n description: str = Field(title=\" Description of workspace template\")\n properties: dict = Field({}, title=\"Workspace template properties\",\n description=\"Values for the properties required by the workspace template\")\n resourceType: str = Field(title=\"Type of workspace template\")\n current: bool = Field(title=\"Mark this version as current\")\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"My workspace template\",\n \"version\": \"0.0.1\",\n \"description\": \"workspace template for great product\",\n \"properties\": {},\n \"resourceType\": \"workspace\",\n \"current\": \"true\"\n }\n }\n\n\nclass WorkspaceTemplateIdInResponse(BaseModel):\n resourceTemplateId: str\n\n\nclass WorkspaceTemplateInResponse(BaseModel):\n workspaceTemplate: ResourceTemplate\n\n class Config:\n schema_extra = {\n \"example\": {\n \"resourceTemplateId\": \"49a7445c-aae6-41ec-a539-30dfa90ab1ae\",\n \"workspaceTemplate\": get_sample_workspace_template()\n }\n }\n", "path": "management_api_app/models/schemas/workspace_template.py"}], "after_files": [{"content": "import uuid\nfrom typing import List\n\nfrom azure.cosmos import CosmosClient\nfrom pydantic import UUID4\n\nfrom core import config\nfrom db.errors import EntityDoesNotExist\nfrom db.repositories.base import BaseRepository\nfrom db.repositories.workspace_templates import WorkspaceTemplateRepository\nfrom models.domain.resource import Status\nfrom models.domain.workspace import Workspace\nfrom models.schemas.workspace import WorkspaceInCreate\n\n\nclass WorkspaceRepository(BaseRepository):\n def __init__(self, client: CosmosClient):\n super().__init__(client, config.STATE_STORE_RESOURCES_CONTAINER)\n\n @staticmethod\n def _active_workspaces_query():\n return 'SELECT * FROM c WHERE c.resourceType = \"workspace\" AND c.isDeleted = false'\n\n def _get_template_version(self, template_name):\n workspace_template_repo = WorkspaceTemplateRepository(self._client)\n template = workspace_template_repo.get_current_workspace_template_by_name(template_name)\n return template[\"version\"]\n\n def get_all_active_workspaces(self) -> List[Workspace]:\n query = self._active_workspaces_query()\n return self.query(query=query)\n\n def get_workspace_by_workspace_id(self, workspace_id: UUID4) -> Workspace:\n query = self._active_workspaces_query() + f' AND c.id=\"{workspace_id}\"'\n workspaces = self.query(query=query)\n if not workspaces:\n raise EntityDoesNotExist\n return workspaces[0]\n\n def create_workspace_item(self, workspace_create: WorkspaceInCreate) -> Workspace:\n full_workspace_id = str(uuid.uuid4())\n\n try:\n template_version = self._get_template_version(workspace_create.workspaceType)\n except EntityDoesNotExist:\n raise ValueError(f\"The workspace type '{workspace_create.workspaceType}' does not exist\")\n\n resource_spec_parameters = {\n \"azure_location\": config.RESOURCE_LOCATION,\n \"workspace_id\": full_workspace_id[-4:],\n \"tre_id\": config.TRE_ID,\n \"address_space\": \"10.2.1.0/24\" # TODO: Calculate this value - Issue #52\n }\n\n workspace = Workspace(\n id=full_workspace_id,\n displayName=workspace_create.displayName,\n description=workspace_create.description,\n resourceTemplateName=workspace_create.workspaceType,\n resourceTemplateVersion=template_version,\n resourceTemplateParameters=resource_spec_parameters,\n status=Status.NotDeployed\n )\n\n return workspace\n\n def save_workspace(self, workspace: Workspace):\n self.create_item(workspace)\n", "path": "management_api_app/db/repositories/workspaces.py"}, {"content": "from typing import List\nfrom pydantic import BaseModel, Field\n\nfrom models.domain.resource import ResourceType\nfrom models.domain.resource_template import ResourceTemplate, Parameter\n\n\ndef get_sample_workspace_template_object(template_name: str = \"tre-workspace-vanilla\") -> ResourceTemplate:\n return ResourceTemplate(\n id=\"a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb\",\n name=template_name,\n description=\"vanilla workspace bundle\",\n version=\"0.1.0\",\n parameters=[\n Parameter(name=\"azure_location\", type=\"string\"),\n Parameter(name=\"tre_id\", type=\"string\"),\n Parameter(name=\"workspace_id\", type=\"string\"),\n Parameter(name=\"address_space\", type=\"string\", default=\"10.2.1.0/24\", description=\"VNet address space for the workspace services\")\n ],\n resourceType=ResourceType.Workspace,\n current=True,\n )\n\n\ndef get_sample_workspace_template() -> dict:\n return get_sample_workspace_template_object().dict()\n\n\nclass WorkspaceTemplateNamesInList(BaseModel):\n templateNames: List[str]\n\n class Config:\n schema_extra = {\n \"example\": {\n \"templateNames\": [\"tre-workspace-vanilla\", \"tre-workspace-base\"]\n }\n }\n\n\nclass WorkspaceTemplateInCreate(BaseModel):\n\n name: str = Field(title=\"Name of workspace template\")\n version: str = Field(title=\"Version of workspace template\")\n description: str = Field(title=\" Description of workspace template\")\n properties: dict = Field({}, title=\"Workspace template properties\",\n description=\"Values for the properties required by the workspace template\")\n resourceType: str = Field(title=\"Type of workspace template\")\n current: bool = Field(title=\"Mark this version as current\")\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"My workspace template\",\n \"version\": \"0.0.1\",\n \"description\": \"workspace template for great product\",\n \"properties\": {},\n \"resourceType\": \"workspace\",\n \"current\": \"true\"\n }\n }\n\n\nclass WorkspaceTemplateIdInResponse(BaseModel):\n resourceTemplateId: str\n\n\nclass WorkspaceTemplateInResponse(BaseModel):\n workspaceTemplate: ResourceTemplate\n\n class Config:\n schema_extra = {\n \"example\": {\n \"resourceTemplateId\": \"49a7445c-aae6-41ec-a539-30dfa90ab1ae\",\n \"workspaceTemplate\": get_sample_workspace_template()\n }\n }\n", "path": "management_api_app/models/schemas/workspace_template.py"}]} | 1,680 | 327 |
gh_patches_debug_22785 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1246 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Brazil solar data out of range
```
Traceback (most recent call last):
File "feeder_electricity.py", line 148, in fetch_production
objs = parser(country_code, session, logger=public_logger)
File "/home/electricitymap/parsers/BR.py", line 131, in fetch_production
generation = production_processor(gd, zone_key)
File "/home/electricitymap/parsers/BR.py", line 81, in production_processor
raise ValueError('the solar value is out of range')
ValueError: the solar value is out of range
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/BR.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import arrow
4 from collections import defaultdict
5 import requests
6
7 url = 'http://tr.ons.org.br/Content/GetBalancoEnergetico/null'
8
9 generation_mapping = {
10 u'nuclear': 'nuclear',
11 u'eolica': 'wind',
12 u'termica': 'unknown',
13 u'solar': 'solar',
14 'hydro': 'hydro'
15 }
16
17 regions = {
18 'BR-NE': u'nordeste',
19 'BR-N': u'norte',
20 'BR-CS': u'sudesteECentroOeste',
21 'BR-S': u'sul'
22 }
23
24 region_exchanges = {
25 'BR-CS->BR-S': "sul_sudeste",
26 'BR-CS->BR-NE': "sudeste_nordeste",
27 'BR-CS->BR-N': "sudeste_norteFic",
28 'BR-N->BR-NE': "norteFic_nordeste"
29 }
30
31
32 region_exchanges_directions = {
33 'BR-CS->BR-S': -1,
34 'BR-CS->BR-NE': 1,
35 'BR-CS->BR-N': 1,
36 'BR-N->BR-NE': 1
37 }
38
39 countries_exchange = {
40 'UY': {
41 'name': u'uruguai',
42 'flow': 1
43 },
44 'AR': {
45 'name': u'argentina',
46 'flow': -1
47 },
48 'PY': {
49 'name': u'paraguai',
50 'flow': -1
51 }
52 }
53
54
55 def get_data(session, logger):
56 """Requests generation data in json format."""
57
58 s = session or requests.session()
59 json_data = s.get(url).json()
60 return json_data
61
62
63 def production_processor(json_data, zone_key):
64 """
65 Extracts data timestamp and sums regional data into totals by key.
66 Maps keys to type and returns a tuple.
67 """
68
69 dt = arrow.get(json_data['Data'])
70 totals = defaultdict(lambda: 0.0)
71
72 region = regions[zone_key]
73 breakdown = json_data[region][u'geracao']
74 for generation, val in breakdown.items():
75 # tolerance range
76 if -1 <= totals['solar'] < 0:
77 totals['solar'] = 0.0
78
79 # not tolerance range
80 if totals['solar'] < -1:
81 raise ValueError('the solar value is out of range')
82
83 totals[generation] += val
84
85 # BR_CS contains the Itaipu Dam.
86 # We merge the hydro keys into one, then remove unnecessary keys.
87 totals['hydro'] = totals.get(u'hidraulica', 0.0) + totals.get(u'itaipu50HzBrasil', 0.0) + totals.get(u'itaipu60Hz', 0.0)
88 entriesToRemove = (u'hidraulica', u'itaipu50HzBrasil', u'itaipu60Hz', u'total')
89 for k in entriesToRemove:
90 totals.pop(k, None)
91
92 mapped_totals = {generation_mapping.get(name, 'unknown'): val for name, val
93 in totals.items()}
94
95 return dt, mapped_totals
96
97
98 def fetch_production(zone_key, session=None, target_datetime=None, logger=None):
99 """
100 Requests the last known production mix (in MW) of a given country
101 Arguments:
102 zone_key (optional) -- used in case a parser is able to fetch multiple countries
103 session (optional) -- request session passed in order to re-use an existing session
104 Return:
105 A dictionary in the form:
106 {
107 'zoneKey': 'FR',
108 'datetime': '2017-01-01T00:00:00Z',
109 'production': {
110 'biomass': 0.0,
111 'coal': 0.0,
112 'gas': 0.0,
113 'hydro': 0.0,
114 'nuclear': null,
115 'oil': 0.0,
116 'solar': 0.0,
117 'wind': 0.0,
118 'geothermal': 0.0,
119 'unknown': 0.0
120 },
121 'storage': {
122 'hydro': -10.0,
123 },
124 'source': 'mysource.com'
125 }
126 """
127 if target_datetime:
128 raise NotImplementedError('This parser is not yet able to parse past dates')
129
130 gd = get_data(session, logger)
131 generation = production_processor(gd, zone_key)
132
133 datapoint = {
134 'zoneKey': zone_key,
135 'datetime': generation[0].datetime,
136 'production': generation[1],
137 'storage': {
138 'hydro': None,
139 },
140 'source': 'ons.org.br'
141 }
142
143 return datapoint
144
145
146 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
147 """Requests the last known power exchange (in MW) between two regions
148 Arguments:
149 zone_key1 -- the first country code
150 zone_key2 -- the second country code; order of the two codes in params doesn't matter
151 session (optional) -- request session passed in order to re-use an existing session
152 Return:
153 A dictionary in the form:
154 {
155 'sortedZoneKeys': 'DK->NO',
156 'datetime': '2017-01-01T00:00:00Z',
157 'netFlow': 0.0,
158 'source': 'mysource.com'
159 }
160 where net flow is from DK into NO
161 """
162 if target_datetime:
163 raise NotImplementedError('This parser is not yet able to parse past dates')
164
165 gd = get_data(session, logger)
166
167 if zone_key1 in countries_exchange.keys():
168 country_exchange = countries_exchange[zone_key1]
169
170 if zone_key2 in countries_exchange.keys():
171 country_exchange = countries_exchange[zone_key2]
172
173 data = {
174 'datetime': arrow.get(gd['Data']).datetime,
175 'sortedZoneKeys': '->'.join(sorted([zone_key1, zone_key2])),
176 'netFlow': gd['internacional'][country_exchange['name']] * country_exchange['flow'],
177 'source': 'ons.org.br'
178 }
179
180 return data
181
182
183 def fetch_region_exchange(region1, region2, session=None, target_datetime=None, logger=None):
184 """
185 Requests the last known power exchange (in MW) between two Brazilian regions.
186 Arguments:
187 region1 -- the first region
188 region2 -- the second region; order of the two codes in params doesn't matter
189 session (optional) -- request session passed in order to re-use an existing session
190 Return:
191 A dictionary in the form:
192 {
193 'sortedZoneKeys': 'DK->NO',
194 'datetime': '2017-01-01T00:00:00Z',
195 'netFlow': 0.0,
196 'source': 'mysource.com'
197 }
198 where net flow is from DK into NO
199 """
200 if target_datetime:
201 raise NotImplementedError('This parser is not yet able to parse past dates')
202
203 gd = get_data(session, logger)
204 dt = arrow.get(gd['Data']).datetime
205 scc = '->'.join(sorted([region1, region2]))
206
207 exchange = region_exchanges[scc]
208 nf = gd['intercambio'][exchange] * region_exchanges_directions[scc]
209
210 data = {
211 'datetime': dt,
212 'sortedZoneKeys': scc,
213 'netFlow': nf,
214 'source': 'ons.org.br'
215 }
216
217 return data
218
219
220 if __name__ == '__main__':
221 """Main method, never used by the Electricity Map backend, but handy for testing."""
222
223 print('fetch_production(BR-NE) ->')
224 print(fetch_production('BR-NE'))
225
226 print('fetch_production(BR-N) ->')
227 print(fetch_production('BR-N'))
228
229 print('fetch_production(BR-CS) ->')
230 print(fetch_production('BR-CS'))
231
232 print('fetch_production(BR-S) ->')
233 print(fetch_production('BR-S'))
234
235 print('fetch_exchange(BR-S, UY) ->')
236 print(fetch_exchange('BR-S', 'UY'))
237
238 print('fetch_exchange(BR-S, AR) ->')
239 print(fetch_exchange('BR-S', 'AR'))
240
241 print('fetch_region_exchange(BR-CS->BR-S)')
242 print(fetch_region_exchange('BR-CS', 'BR-S'))
243
244 print('fetch_region_exchange(BR-CS->BR-NE)')
245 print(fetch_region_exchange('BR-CS', 'BR-NE'))
246
247 print('fetch_region_exchange(BR-CS->BR-N)')
248 print(fetch_region_exchange('BR-CS', 'BR-N'))
249
250 print('fetch_region_exchange(BR-N->BR-NE)')
251 print(fetch_region_exchange('BR-N', 'BR-NE'))
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/BR.py b/parsers/BR.py
--- a/parsers/BR.py
+++ b/parsers/BR.py
@@ -1,9 +1,13 @@
#!/usr/bin/env python3
-import arrow
from collections import defaultdict
+
+import arrow
import requests
+from .lib.validation import validate
+
+
url = 'http://tr.ons.org.br/Content/GetBalancoEnergetico/null'
generation_mapping = {
@@ -72,14 +76,6 @@
region = regions[zone_key]
breakdown = json_data[region][u'geracao']
for generation, val in breakdown.items():
- # tolerance range
- if -1 <= totals['solar'] < 0:
- totals['solar'] = 0.0
-
- # not tolerance range
- if totals['solar'] < -1:
- raise ValueError('the solar value is out of range')
-
totals[generation] += val
# BR_CS contains the Itaipu Dam.
@@ -140,6 +136,9 @@
'source': 'ons.org.br'
}
+ datapoint = validate(datapoint, logger,
+ remove_negative=True, required=['hydro'], floor=1000)
+
return datapoint
| {"golden_diff": "diff --git a/parsers/BR.py b/parsers/BR.py\n--- a/parsers/BR.py\n+++ b/parsers/BR.py\n@@ -1,9 +1,13 @@\n #!/usr/bin/env python3\n \n-import arrow\n from collections import defaultdict\n+\n+import arrow\n import requests\n \n+from .lib.validation import validate\n+\n+\n url = 'http://tr.ons.org.br/Content/GetBalancoEnergetico/null'\n \n generation_mapping = {\n@@ -72,14 +76,6 @@\n region = regions[zone_key]\n breakdown = json_data[region][u'geracao']\n for generation, val in breakdown.items():\n- # tolerance range\n- if -1 <= totals['solar'] < 0:\n- totals['solar'] = 0.0\n-\n- # not tolerance range\n- if totals['solar'] < -1:\n- raise ValueError('the solar value is out of range')\n-\n totals[generation] += val\n \n # BR_CS contains the Itaipu Dam.\n@@ -140,6 +136,9 @@\n 'source': 'ons.org.br'\n }\n \n+ datapoint = validate(datapoint, logger,\n+ remove_negative=True, required=['hydro'], floor=1000)\n+\n return datapoint\n", "issue": "Brazil solar data out of range\n```\r\nTraceback (most recent call last):\r\n File \"feeder_electricity.py\", line 148, in fetch_production\r\n objs = parser(country_code, session, logger=public_logger)\r\n File \"/home/electricitymap/parsers/BR.py\", line 131, in fetch_production\r\n generation = production_processor(gd, zone_key)\r\n File \"/home/electricitymap/parsers/BR.py\", line 81, in production_processor\r\n raise ValueError('the solar value is out of range')\r\nValueError: the solar value is out of range\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport arrow\nfrom collections import defaultdict\nimport requests\n\nurl = 'http://tr.ons.org.br/Content/GetBalancoEnergetico/null'\n\ngeneration_mapping = {\n u'nuclear': 'nuclear',\n u'eolica': 'wind',\n u'termica': 'unknown',\n u'solar': 'solar',\n 'hydro': 'hydro'\n }\n\nregions = {\n 'BR-NE': u'nordeste',\n 'BR-N': u'norte',\n 'BR-CS': u'sudesteECentroOeste',\n 'BR-S': u'sul'\n }\n\nregion_exchanges = {\n 'BR-CS->BR-S': \"sul_sudeste\",\n 'BR-CS->BR-NE': \"sudeste_nordeste\",\n 'BR-CS->BR-N': \"sudeste_norteFic\",\n 'BR-N->BR-NE': \"norteFic_nordeste\"\n }\n\n\nregion_exchanges_directions = {\n 'BR-CS->BR-S': -1,\n 'BR-CS->BR-NE': 1,\n 'BR-CS->BR-N': 1,\n 'BR-N->BR-NE': 1\n }\n\ncountries_exchange = {\n 'UY': {\n 'name': u'uruguai',\n 'flow': 1\n },\n 'AR': {\n 'name': u'argentina',\n 'flow': -1\n },\n 'PY': {\n 'name': u'paraguai',\n 'flow': -1\n }\n}\n\n\ndef get_data(session, logger):\n \"\"\"Requests generation data in json format.\"\"\"\n\n s = session or requests.session()\n json_data = s.get(url).json()\n return json_data\n\n\ndef production_processor(json_data, zone_key):\n \"\"\"\n Extracts data timestamp and sums regional data into totals by key.\n Maps keys to type and returns a tuple.\n \"\"\"\n\n dt = arrow.get(json_data['Data'])\n totals = defaultdict(lambda: 0.0)\n\n region = regions[zone_key]\n breakdown = json_data[region][u'geracao']\n for generation, val in breakdown.items():\n # tolerance range\n if -1 <= totals['solar'] < 0:\n totals['solar'] = 0.0\n\n # not tolerance range\n if totals['solar'] < -1:\n raise ValueError('the solar value is out of range')\n\n totals[generation] += val\n\n # BR_CS contains the Itaipu Dam.\n # We merge the hydro keys into one, then remove unnecessary keys.\n totals['hydro'] = totals.get(u'hidraulica', 0.0) + totals.get(u'itaipu50HzBrasil', 0.0) + totals.get(u'itaipu60Hz', 0.0)\n entriesToRemove = (u'hidraulica', u'itaipu50HzBrasil', u'itaipu60Hz', u'total')\n for k in entriesToRemove:\n totals.pop(k, None)\n\n mapped_totals = {generation_mapping.get(name, 'unknown'): val for name, val\n in totals.items()}\n\n return dt, mapped_totals\n\n\ndef fetch_production(zone_key, session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n gd = get_data(session, logger)\n generation = production_processor(gd, zone_key)\n\n datapoint = {\n 'zoneKey': zone_key,\n 'datetime': generation[0].datetime,\n 'production': generation[1],\n 'storage': {\n 'hydro': None,\n },\n 'source': 'ons.org.br'\n }\n\n return datapoint\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two regions\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n gd = get_data(session, logger)\n\n if zone_key1 in countries_exchange.keys():\n country_exchange = countries_exchange[zone_key1]\n\n if zone_key2 in countries_exchange.keys():\n country_exchange = countries_exchange[zone_key2]\n\n data = {\n 'datetime': arrow.get(gd['Data']).datetime,\n 'sortedZoneKeys': '->'.join(sorted([zone_key1, zone_key2])),\n 'netFlow': gd['internacional'][country_exchange['name']] * country_exchange['flow'],\n 'source': 'ons.org.br'\n }\n\n return data\n\n\ndef fetch_region_exchange(region1, region2, session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known power exchange (in MW) between two Brazilian regions.\n Arguments:\n region1 -- the first region\n region2 -- the second region; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n gd = get_data(session, logger)\n dt = arrow.get(gd['Data']).datetime\n scc = '->'.join(sorted([region1, region2]))\n\n exchange = region_exchanges[scc]\n nf = gd['intercambio'][exchange] * region_exchanges_directions[scc]\n\n data = {\n 'datetime': dt,\n 'sortedZoneKeys': scc,\n 'netFlow': nf,\n 'source': 'ons.org.br'\n }\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production(BR-NE) ->')\n print(fetch_production('BR-NE'))\n\n print('fetch_production(BR-N) ->')\n print(fetch_production('BR-N'))\n\n print('fetch_production(BR-CS) ->')\n print(fetch_production('BR-CS'))\n\n print('fetch_production(BR-S) ->')\n print(fetch_production('BR-S'))\n\n print('fetch_exchange(BR-S, UY) ->')\n print(fetch_exchange('BR-S', 'UY'))\n\n print('fetch_exchange(BR-S, AR) ->')\n print(fetch_exchange('BR-S', 'AR'))\n\n print('fetch_region_exchange(BR-CS->BR-S)')\n print(fetch_region_exchange('BR-CS', 'BR-S'))\n\n print('fetch_region_exchange(BR-CS->BR-NE)')\n print(fetch_region_exchange('BR-CS', 'BR-NE'))\n\n print('fetch_region_exchange(BR-CS->BR-N)')\n print(fetch_region_exchange('BR-CS', 'BR-N'))\n\n print('fetch_region_exchange(BR-N->BR-NE)')\n print(fetch_region_exchange('BR-N', 'BR-NE'))\n", "path": "parsers/BR.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nfrom collections import defaultdict\n\nimport arrow\nimport requests\n\nfrom .lib.validation import validate\n\n\nurl = 'http://tr.ons.org.br/Content/GetBalancoEnergetico/null'\n\ngeneration_mapping = {\n u'nuclear': 'nuclear',\n u'eolica': 'wind',\n u'termica': 'unknown',\n u'solar': 'solar',\n 'hydro': 'hydro'\n }\n\nregions = {\n 'BR-NE': u'nordeste',\n 'BR-N': u'norte',\n 'BR-CS': u'sudesteECentroOeste',\n 'BR-S': u'sul'\n }\n\nregion_exchanges = {\n 'BR-CS->BR-S': \"sul_sudeste\",\n 'BR-CS->BR-NE': \"sudeste_nordeste\",\n 'BR-CS->BR-N': \"sudeste_norteFic\",\n 'BR-N->BR-NE': \"norteFic_nordeste\"\n }\n\n\nregion_exchanges_directions = {\n 'BR-CS->BR-S': -1,\n 'BR-CS->BR-NE': 1,\n 'BR-CS->BR-N': 1,\n 'BR-N->BR-NE': 1\n }\n\ncountries_exchange = {\n 'UY': {\n 'name': u'uruguai',\n 'flow': 1\n },\n 'AR': {\n 'name': u'argentina',\n 'flow': -1\n },\n 'PY': {\n 'name': u'paraguai',\n 'flow': -1\n }\n}\n\n\ndef get_data(session, logger):\n \"\"\"Requests generation data in json format.\"\"\"\n\n s = session or requests.session()\n json_data = s.get(url).json()\n return json_data\n\n\ndef production_processor(json_data, zone_key):\n \"\"\"\n Extracts data timestamp and sums regional data into totals by key.\n Maps keys to type and returns a tuple.\n \"\"\"\n\n dt = arrow.get(json_data['Data'])\n totals = defaultdict(lambda: 0.0)\n\n region = regions[zone_key]\n breakdown = json_data[region][u'geracao']\n for generation, val in breakdown.items():\n totals[generation] += val\n\n # BR_CS contains the Itaipu Dam.\n # We merge the hydro keys into one, then remove unnecessary keys.\n totals['hydro'] = totals.get(u'hidraulica', 0.0) + totals.get(u'itaipu50HzBrasil', 0.0) + totals.get(u'itaipu60Hz', 0.0)\n entriesToRemove = (u'hidraulica', u'itaipu50HzBrasil', u'itaipu60Hz', u'total')\n for k in entriesToRemove:\n totals.pop(k, None)\n\n mapped_totals = {generation_mapping.get(name, 'unknown'): val for name, val\n in totals.items()}\n\n return dt, mapped_totals\n\n\ndef fetch_production(zone_key, session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n gd = get_data(session, logger)\n generation = production_processor(gd, zone_key)\n\n datapoint = {\n 'zoneKey': zone_key,\n 'datetime': generation[0].datetime,\n 'production': generation[1],\n 'storage': {\n 'hydro': None,\n },\n 'source': 'ons.org.br'\n }\n\n datapoint = validate(datapoint, logger,\n remove_negative=True, required=['hydro'], floor=1000)\n\n return datapoint\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two regions\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n gd = get_data(session, logger)\n\n if zone_key1 in countries_exchange.keys():\n country_exchange = countries_exchange[zone_key1]\n\n if zone_key2 in countries_exchange.keys():\n country_exchange = countries_exchange[zone_key2]\n\n data = {\n 'datetime': arrow.get(gd['Data']).datetime,\n 'sortedZoneKeys': '->'.join(sorted([zone_key1, zone_key2])),\n 'netFlow': gd['internacional'][country_exchange['name']] * country_exchange['flow'],\n 'source': 'ons.org.br'\n }\n\n return data\n\n\ndef fetch_region_exchange(region1, region2, session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known power exchange (in MW) between two Brazilian regions.\n Arguments:\n region1 -- the first region\n region2 -- the second region; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n gd = get_data(session, logger)\n dt = arrow.get(gd['Data']).datetime\n scc = '->'.join(sorted([region1, region2]))\n\n exchange = region_exchanges[scc]\n nf = gd['intercambio'][exchange] * region_exchanges_directions[scc]\n\n data = {\n 'datetime': dt,\n 'sortedZoneKeys': scc,\n 'netFlow': nf,\n 'source': 'ons.org.br'\n }\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production(BR-NE) ->')\n print(fetch_production('BR-NE'))\n\n print('fetch_production(BR-N) ->')\n print(fetch_production('BR-N'))\n\n print('fetch_production(BR-CS) ->')\n print(fetch_production('BR-CS'))\n\n print('fetch_production(BR-S) ->')\n print(fetch_production('BR-S'))\n\n print('fetch_exchange(BR-S, UY) ->')\n print(fetch_exchange('BR-S', 'UY'))\n\n print('fetch_exchange(BR-S, AR) ->')\n print(fetch_exchange('BR-S', 'AR'))\n\n print('fetch_region_exchange(BR-CS->BR-S)')\n print(fetch_region_exchange('BR-CS', 'BR-S'))\n\n print('fetch_region_exchange(BR-CS->BR-NE)')\n print(fetch_region_exchange('BR-CS', 'BR-NE'))\n\n print('fetch_region_exchange(BR-CS->BR-N)')\n print(fetch_region_exchange('BR-CS', 'BR-N'))\n\n print('fetch_region_exchange(BR-N->BR-NE)')\n print(fetch_region_exchange('BR-N', 'BR-NE'))\n", "path": "parsers/BR.py"}]} | 3,023 | 296 |
gh_patches_debug_20758 | rasdani/github-patches | git_diff | pypa__pip-5117 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
config entry misdisplayed in pip 10 help
* Pip version: 10.0.0.dev0 (94d186b)
* Python version: 3.6
* Operating system: Arch Linux
### Description:
see below
### What I've run:
```
$ pip -h
Usage:
pip <command> [options]
Commands:
install Install packages.
download Download packages.
uninstall Uninstall packages.
freeze Output installed packages in requirements format.
list List installed packages.
show Show information about installed packages.
check Verify installed packages have compatible dependencies.
config
Manage local and global configuration.
Subcommands:
list: List the active configuration (or from the file specified)
edit: Edit the configuration file in an editor
get: Get the value associated with name
set: Set the name=value
unset: Unset the value associated with name
If none of --user, --global and --venv are passed, a virtual
environment configuration file is used if one is active and the file
exists. Otherwise, all modifications happen on the to the user file by
default.
search Search PyPI for packages.
wheel Build wheels from your requirements.
hash Compute hashes of package archives.
completion A helper command used for command completion.
help Show help for commands.
<elided>
```
note how the config entry in the help displays way too much.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/commands/configuration.py`
Content:
```
1 import logging
2 import os
3 import subprocess
4
5 from pip._internal.basecommand import Command
6 from pip._internal.configuration import Configuration, kinds
7 from pip._internal.exceptions import PipError
8 from pip._internal.locations import venv_config_file
9 from pip._internal.status_codes import ERROR, SUCCESS
10 from pip._internal.utils.misc import get_prog
11
12 logger = logging.getLogger(__name__)
13
14
15 class ConfigurationCommand(Command):
16 """Manage local and global configuration."""
17 name = 'config'
18 usage = """
19 %prog [<file-option>] list
20 %prog [<file-option>] [--editor <editor-path>] edit
21
22 %prog [<file-option>] get name
23 %prog [<file-option>] set name value
24 %prog [<file-option>] unset name
25 """
26
27 summary = """
28 Manage local and global configuration.
29
30 Subcommands:
31
32 list: List the active configuration (or from the file specified)
33 edit: Edit the configuration file in an editor
34 get: Get the value associated with name
35 set: Set the name=value
36 unset: Unset the value associated with name
37
38 If none of --user, --global and --venv are passed, a virtual
39 environment configuration file is used if one is active and the file
40 exists. Otherwise, all modifications happen on the to the user file by
41 default.
42 """
43
44 def __init__(self, *args, **kwargs):
45 super(ConfigurationCommand, self).__init__(*args, **kwargs)
46
47 self.configuration = None
48
49 self.cmd_opts.add_option(
50 '--editor',
51 dest='editor',
52 action='store',
53 default=None,
54 help=(
55 'Editor to use to edit the file. Uses VISUAL or EDITOR '
56 'environment variables if not provided.'
57 )
58 )
59
60 self.cmd_opts.add_option(
61 '--global',
62 dest='global_file',
63 action='store_true',
64 default=False,
65 help='Use the system-wide configuration file only'
66 )
67
68 self.cmd_opts.add_option(
69 '--user',
70 dest='user_file',
71 action='store_true',
72 default=False,
73 help='Use the user configuration file only'
74 )
75
76 self.cmd_opts.add_option(
77 '--venv',
78 dest='venv_file',
79 action='store_true',
80 default=False,
81 help='Use the virtualenv configuration file only'
82 )
83
84 self.parser.insert_option_group(0, self.cmd_opts)
85
86 def run(self, options, args):
87 handlers = {
88 "list": self.list_values,
89 "edit": self.open_in_editor,
90 "get": self.get_name,
91 "set": self.set_name_value,
92 "unset": self.unset_name
93 }
94
95 # Determine action
96 if not args or args[0] not in handlers:
97 logger.error("Need an action ({}) to perform.".format(
98 ", ".join(sorted(handlers)))
99 )
100 return ERROR
101
102 action = args[0]
103
104 # Determine which configuration files are to be loaded
105 # Depends on whether the command is modifying.
106 try:
107 load_only = self._determine_file(
108 options, need_value=(action in ["get", "set", "unset", "edit"])
109 )
110 except PipError as e:
111 logger.error(e.args[0])
112 return ERROR
113
114 # Load a new configuration
115 self.configuration = Configuration(
116 isolated=options.isolated_mode, load_only=load_only
117 )
118 self.configuration.load()
119
120 # Error handling happens here, not in the action-handlers.
121 try:
122 handlers[action](options, args[1:])
123 except PipError as e:
124 logger.error(e.args[0])
125 return ERROR
126
127 return SUCCESS
128
129 def _determine_file(self, options, need_value):
130 file_options = {
131 kinds.USER: options.user_file,
132 kinds.GLOBAL: options.global_file,
133 kinds.VENV: options.venv_file
134 }
135
136 if sum(file_options.values()) == 0:
137 if not need_value:
138 return None
139 # Default to user, unless there's a virtualenv file.
140 elif os.path.exists(venv_config_file):
141 return kinds.VENV
142 else:
143 return kinds.USER
144 elif sum(file_options.values()) == 1:
145 # There's probably a better expression for this.
146 return [key for key in file_options if file_options[key]][0]
147
148 raise PipError(
149 "Need exactly one file to operate upon "
150 "(--user, --venv, --global) to perform."
151 )
152
153 def list_values(self, options, args):
154 self._get_n_args(args, "list", n=0)
155
156 for key, value in sorted(self.configuration.items()):
157 logger.info("%s=%r", key, value)
158
159 def get_name(self, options, args):
160 key = self._get_n_args(args, "get [name]", n=1)
161 value = self.configuration.get_value(key)
162
163 logger.info("%s", value)
164
165 def set_name_value(self, options, args):
166 key, value = self._get_n_args(args, "set [name] [value]", n=2)
167 self.configuration.set_value(key, value)
168
169 self._save_configuration()
170
171 def unset_name(self, options, args):
172 key = self._get_n_args(args, "unset [name]", n=1)
173 self.configuration.unset_value(key)
174
175 self._save_configuration()
176
177 def open_in_editor(self, options, args):
178 editor = self._determine_editor(options)
179
180 fname = self.configuration.get_file_to_edit()
181 if fname is None:
182 raise PipError("Could not determine appropriate file.")
183
184 try:
185 subprocess.check_call([editor, fname])
186 except subprocess.CalledProcessError as e:
187 raise PipError(
188 "Editor Subprocess exited with exit code {}"
189 .format(e.returncode)
190 )
191
192 def _get_n_args(self, args, example, n):
193 """Helper to make sure the command got the right number of arguments
194 """
195 if len(args) != n:
196 msg = (
197 'Got unexpected number of arguments, expected {}. '
198 '(example: "{} config {}")'
199 ).format(n, get_prog(), example)
200 raise PipError(msg)
201
202 if n == 1:
203 return args[0]
204 else:
205 return args
206
207 def _save_configuration(self):
208 # We successfully ran a modifying command. Need to save the
209 # configuration.
210 try:
211 self.configuration.save()
212 except Exception:
213 logger.error(
214 "Unable to save configuration. Please report this as a bug.",
215 exc_info=1
216 )
217 raise PipError("Internal Error.")
218
219 def _determine_editor(self, options):
220 if options.editor is not None:
221 return options.editor
222 elif "VISUAL" in os.environ:
223 return os.environ["VISUAL"]
224 elif "EDITOR" in os.environ:
225 return os.environ["EDITOR"]
226 else:
227 raise PipError("Could not determine editor to use.")
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/commands/configuration.py b/src/pip/_internal/commands/configuration.py
--- a/src/pip/_internal/commands/configuration.py
+++ b/src/pip/_internal/commands/configuration.py
@@ -13,19 +13,7 @@
class ConfigurationCommand(Command):
- """Manage local and global configuration."""
- name = 'config'
- usage = """
- %prog [<file-option>] list
- %prog [<file-option>] [--editor <editor-path>] edit
-
- %prog [<file-option>] get name
- %prog [<file-option>] set name value
- %prog [<file-option>] unset name
- """
-
- summary = """
- Manage local and global configuration.
+ """Manage local and global configuration.
Subcommands:
@@ -41,6 +29,18 @@
default.
"""
+ name = 'config'
+ usage = """
+ %prog [<file-option>] list
+ %prog [<file-option>] [--editor <editor-path>] edit
+
+ %prog [<file-option>] get name
+ %prog [<file-option>] set name value
+ %prog [<file-option>] unset name
+ """
+
+ summary = "Manage local and global configuration."
+
def __init__(self, *args, **kwargs):
super(ConfigurationCommand, self).__init__(*args, **kwargs)
| {"golden_diff": "diff --git a/src/pip/_internal/commands/configuration.py b/src/pip/_internal/commands/configuration.py\n--- a/src/pip/_internal/commands/configuration.py\n+++ b/src/pip/_internal/commands/configuration.py\n@@ -13,19 +13,7 @@\n \n \n class ConfigurationCommand(Command):\n- \"\"\"Manage local and global configuration.\"\"\"\n- name = 'config'\n- usage = \"\"\"\n- %prog [<file-option>] list\n- %prog [<file-option>] [--editor <editor-path>] edit\n-\n- %prog [<file-option>] get name\n- %prog [<file-option>] set name value\n- %prog [<file-option>] unset name\n- \"\"\"\n-\n- summary = \"\"\"\n- Manage local and global configuration.\n+ \"\"\"Manage local and global configuration.\n \n Subcommands:\n \n@@ -41,6 +29,18 @@\n default.\n \"\"\"\n \n+ name = 'config'\n+ usage = \"\"\"\n+ %prog [<file-option>] list\n+ %prog [<file-option>] [--editor <editor-path>] edit\n+\n+ %prog [<file-option>] get name\n+ %prog [<file-option>] set name value\n+ %prog [<file-option>] unset name\n+ \"\"\"\n+\n+ summary = \"Manage local and global configuration.\"\n+\n def __init__(self, *args, **kwargs):\n super(ConfigurationCommand, self).__init__(*args, **kwargs)\n", "issue": "config entry misdisplayed in pip 10 help\n* Pip version: 10.0.0.dev0 (94d186b)\r\n* Python version: 3.6\r\n* Operating system: Arch Linux\r\n\r\n### Description:\r\n\r\nsee below\r\n\r\n### What I've run:\r\n\r\n```\r\n$ pip -h\r\n\r\nUsage: \r\n pip <command> [options]\r\n\r\nCommands:\r\n install Install packages.\r\n download Download packages.\r\n uninstall Uninstall packages.\r\n freeze Output installed packages in requirements format.\r\n list List installed packages.\r\n show Show information about installed packages.\r\n check Verify installed packages have compatible dependencies.\r\n config \r\n Manage local and global configuration.\r\n \r\n Subcommands:\r\n \r\n list: List the active configuration (or from the file specified)\r\n edit: Edit the configuration file in an editor\r\n get: Get the value associated with name\r\n set: Set the name=value\r\n unset: Unset the value associated with name\r\n \r\n If none of --user, --global and --venv are passed, a virtual\r\n environment configuration file is used if one is active and the file\r\n exists. Otherwise, all modifications happen on the to the user file by\r\n default.\r\n \r\n search Search PyPI for packages.\r\n wheel Build wheels from your requirements.\r\n hash Compute hashes of package archives.\r\n completion A helper command used for command completion.\r\n help Show help for commands.\r\n\r\n<elided>\r\n```\r\n\r\nnote how the config entry in the help displays way too much.\n", "before_files": [{"content": "import logging\nimport os\nimport subprocess\n\nfrom pip._internal.basecommand import Command\nfrom pip._internal.configuration import Configuration, kinds\nfrom pip._internal.exceptions import PipError\nfrom pip._internal.locations import venv_config_file\nfrom pip._internal.status_codes import ERROR, SUCCESS\nfrom pip._internal.utils.misc import get_prog\n\nlogger = logging.getLogger(__name__)\n\n\nclass ConfigurationCommand(Command):\n \"\"\"Manage local and global configuration.\"\"\"\n name = 'config'\n usage = \"\"\"\n %prog [<file-option>] list\n %prog [<file-option>] [--editor <editor-path>] edit\n\n %prog [<file-option>] get name\n %prog [<file-option>] set name value\n %prog [<file-option>] unset name\n \"\"\"\n\n summary = \"\"\"\n Manage local and global configuration.\n\n Subcommands:\n\n list: List the active configuration (or from the file specified)\n edit: Edit the configuration file in an editor\n get: Get the value associated with name\n set: Set the name=value\n unset: Unset the value associated with name\n\n If none of --user, --global and --venv are passed, a virtual\n environment configuration file is used if one is active and the file\n exists. Otherwise, all modifications happen on the to the user file by\n default.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationCommand, self).__init__(*args, **kwargs)\n\n self.configuration = None\n\n self.cmd_opts.add_option(\n '--editor',\n dest='editor',\n action='store',\n default=None,\n help=(\n 'Editor to use to edit the file. Uses VISUAL or EDITOR '\n 'environment variables if not provided.'\n )\n )\n\n self.cmd_opts.add_option(\n '--global',\n dest='global_file',\n action='store_true',\n default=False,\n help='Use the system-wide configuration file only'\n )\n\n self.cmd_opts.add_option(\n '--user',\n dest='user_file',\n action='store_true',\n default=False,\n help='Use the user configuration file only'\n )\n\n self.cmd_opts.add_option(\n '--venv',\n dest='venv_file',\n action='store_true',\n default=False,\n help='Use the virtualenv configuration file only'\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options, args):\n handlers = {\n \"list\": self.list_values,\n \"edit\": self.open_in_editor,\n \"get\": self.get_name,\n \"set\": self.set_name_value,\n \"unset\": self.unset_name\n }\n\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\"Need an action ({}) to perform.\".format(\n \", \".join(sorted(handlers)))\n )\n return ERROR\n\n action = args[0]\n\n # Determine which configuration files are to be loaded\n # Depends on whether the command is modifying.\n try:\n load_only = self._determine_file(\n options, need_value=(action in [\"get\", \"set\", \"unset\", \"edit\"])\n )\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n # Load a new configuration\n self.configuration = Configuration(\n isolated=options.isolated_mode, load_only=load_only\n )\n self.configuration.load()\n\n # Error handling happens here, not in the action-handlers.\n try:\n handlers[action](options, args[1:])\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n return SUCCESS\n\n def _determine_file(self, options, need_value):\n file_options = {\n kinds.USER: options.user_file,\n kinds.GLOBAL: options.global_file,\n kinds.VENV: options.venv_file\n }\n\n if sum(file_options.values()) == 0:\n if not need_value:\n return None\n # Default to user, unless there's a virtualenv file.\n elif os.path.exists(venv_config_file):\n return kinds.VENV\n else:\n return kinds.USER\n elif sum(file_options.values()) == 1:\n # There's probably a better expression for this.\n return [key for key in file_options if file_options[key]][0]\n\n raise PipError(\n \"Need exactly one file to operate upon \"\n \"(--user, --venv, --global) to perform.\"\n )\n\n def list_values(self, options, args):\n self._get_n_args(args, \"list\", n=0)\n\n for key, value in sorted(self.configuration.items()):\n logger.info(\"%s=%r\", key, value)\n\n def get_name(self, options, args):\n key = self._get_n_args(args, \"get [name]\", n=1)\n value = self.configuration.get_value(key)\n\n logger.info(\"%s\", value)\n\n def set_name_value(self, options, args):\n key, value = self._get_n_args(args, \"set [name] [value]\", n=2)\n self.configuration.set_value(key, value)\n\n self._save_configuration()\n\n def unset_name(self, options, args):\n key = self._get_n_args(args, \"unset [name]\", n=1)\n self.configuration.unset_value(key)\n\n self._save_configuration()\n\n def open_in_editor(self, options, args):\n editor = self._determine_editor(options)\n\n fname = self.configuration.get_file_to_edit()\n if fname is None:\n raise PipError(\"Could not determine appropriate file.\")\n\n try:\n subprocess.check_call([editor, fname])\n except subprocess.CalledProcessError as e:\n raise PipError(\n \"Editor Subprocess exited with exit code {}\"\n .format(e.returncode)\n )\n\n def _get_n_args(self, args, example, n):\n \"\"\"Helper to make sure the command got the right number of arguments\n \"\"\"\n if len(args) != n:\n msg = (\n 'Got unexpected number of arguments, expected {}. '\n '(example: \"{} config {}\")'\n ).format(n, get_prog(), example)\n raise PipError(msg)\n\n if n == 1:\n return args[0]\n else:\n return args\n\n def _save_configuration(self):\n # We successfully ran a modifying command. Need to save the\n # configuration.\n try:\n self.configuration.save()\n except Exception:\n logger.error(\n \"Unable to save configuration. Please report this as a bug.\",\n exc_info=1\n )\n raise PipError(\"Internal Error.\")\n\n def _determine_editor(self, options):\n if options.editor is not None:\n return options.editor\n elif \"VISUAL\" in os.environ:\n return os.environ[\"VISUAL\"]\n elif \"EDITOR\" in os.environ:\n return os.environ[\"EDITOR\"]\n else:\n raise PipError(\"Could not determine editor to use.\")\n", "path": "src/pip/_internal/commands/configuration.py"}], "after_files": [{"content": "import logging\nimport os\nimport subprocess\n\nfrom pip._internal.basecommand import Command\nfrom pip._internal.configuration import Configuration, kinds\nfrom pip._internal.exceptions import PipError\nfrom pip._internal.locations import venv_config_file\nfrom pip._internal.status_codes import ERROR, SUCCESS\nfrom pip._internal.utils.misc import get_prog\n\nlogger = logging.getLogger(__name__)\n\n\nclass ConfigurationCommand(Command):\n \"\"\"Manage local and global configuration.\n\n Subcommands:\n\n list: List the active configuration (or from the file specified)\n edit: Edit the configuration file in an editor\n get: Get the value associated with name\n set: Set the name=value\n unset: Unset the value associated with name\n\n If none of --user, --global and --venv are passed, a virtual\n environment configuration file is used if one is active and the file\n exists. Otherwise, all modifications happen on the to the user file by\n default.\n \"\"\"\n\n name = 'config'\n usage = \"\"\"\n %prog [<file-option>] list\n %prog [<file-option>] [--editor <editor-path>] edit\n\n %prog [<file-option>] get name\n %prog [<file-option>] set name value\n %prog [<file-option>] unset name\n \"\"\"\n\n summary = \"Manage local and global configuration.\"\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationCommand, self).__init__(*args, **kwargs)\n\n self.configuration = None\n\n self.cmd_opts.add_option(\n '--editor',\n dest='editor',\n action='store',\n default=None,\n help=(\n 'Editor to use to edit the file. Uses VISUAL or EDITOR '\n 'environment variables if not provided.'\n )\n )\n\n self.cmd_opts.add_option(\n '--global',\n dest='global_file',\n action='store_true',\n default=False,\n help='Use the system-wide configuration file only'\n )\n\n self.cmd_opts.add_option(\n '--user',\n dest='user_file',\n action='store_true',\n default=False,\n help='Use the user configuration file only'\n )\n\n self.cmd_opts.add_option(\n '--venv',\n dest='venv_file',\n action='store_true',\n default=False,\n help='Use the virtualenv configuration file only'\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options, args):\n handlers = {\n \"list\": self.list_values,\n \"edit\": self.open_in_editor,\n \"get\": self.get_name,\n \"set\": self.set_name_value,\n \"unset\": self.unset_name\n }\n\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\"Need an action ({}) to perform.\".format(\n \", \".join(sorted(handlers)))\n )\n return ERROR\n\n action = args[0]\n\n # Determine which configuration files are to be loaded\n # Depends on whether the command is modifying.\n try:\n load_only = self._determine_file(\n options, need_value=(action in [\"get\", \"set\", \"unset\", \"edit\"])\n )\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n # Load a new configuration\n self.configuration = Configuration(\n isolated=options.isolated_mode, load_only=load_only\n )\n self.configuration.load()\n\n # Error handling happens here, not in the action-handlers.\n try:\n handlers[action](options, args[1:])\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n return SUCCESS\n\n def _determine_file(self, options, need_value):\n file_options = {\n kinds.USER: options.user_file,\n kinds.GLOBAL: options.global_file,\n kinds.VENV: options.venv_file\n }\n\n if sum(file_options.values()) == 0:\n if not need_value:\n return None\n # Default to user, unless there's a virtualenv file.\n elif os.path.exists(venv_config_file):\n return kinds.VENV\n else:\n return kinds.USER\n elif sum(file_options.values()) == 1:\n # There's probably a better expression for this.\n return [key for key in file_options if file_options[key]][0]\n\n raise PipError(\n \"Need exactly one file to operate upon \"\n \"(--user, --venv, --global) to perform.\"\n )\n\n def list_values(self, options, args):\n self._get_n_args(args, \"list\", n=0)\n\n for key, value in sorted(self.configuration.items()):\n logger.info(\"%s=%r\", key, value)\n\n def get_name(self, options, args):\n key = self._get_n_args(args, \"get [name]\", n=1)\n value = self.configuration.get_value(key)\n\n logger.info(\"%s\", value)\n\n def set_name_value(self, options, args):\n key, value = self._get_n_args(args, \"set [name] [value]\", n=2)\n self.configuration.set_value(key, value)\n\n self._save_configuration()\n\n def unset_name(self, options, args):\n key = self._get_n_args(args, \"unset [name]\", n=1)\n self.configuration.unset_value(key)\n\n self._save_configuration()\n\n def open_in_editor(self, options, args):\n editor = self._determine_editor(options)\n\n fname = self.configuration.get_file_to_edit()\n if fname is None:\n raise PipError(\"Could not determine appropriate file.\")\n\n try:\n subprocess.check_call([editor, fname])\n except subprocess.CalledProcessError as e:\n raise PipError(\n \"Editor Subprocess exited with exit code {}\"\n .format(e.returncode)\n )\n\n def _get_n_args(self, args, example, n):\n \"\"\"Helper to make sure the command got the right number of arguments\n \"\"\"\n if len(args) != n:\n msg = (\n 'Got unexpected number of arguments, expected {}. '\n '(example: \"{} config {}\")'\n ).format(n, get_prog(), example)\n raise PipError(msg)\n\n if n == 1:\n return args[0]\n else:\n return args\n\n def _save_configuration(self):\n # We successfully ran a modifying command. Need to save the\n # configuration.\n try:\n self.configuration.save()\n except Exception:\n logger.error(\n \"Unable to save configuration. Please report this as a bug.\",\n exc_info=1\n )\n raise PipError(\"Internal Error.\")\n\n def _determine_editor(self, options):\n if options.editor is not None:\n return options.editor\n elif \"VISUAL\" in os.environ:\n return os.environ[\"VISUAL\"]\n elif \"EDITOR\" in os.environ:\n return os.environ[\"EDITOR\"]\n else:\n raise PipError(\"Could not determine editor to use.\")\n", "path": "src/pip/_internal/commands/configuration.py"}]} | 2,682 | 316 |
gh_patches_debug_626 | rasdani/github-patches | git_diff | bridgecrewio__checkov-2810 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HCL2 parser cannot parse functions with comments interleaved in the arguments.
**Describe the issue**
The HCL2 parser fails to parse a file that contains an expression with a Terraform function call that contains comments interleaved within the arguments.
**Example Value**
A file that contains the following exaple variable will fail to parse.
```hcl
variable "example" {
default = function(
# this comment is fine
argument1,
# this comment causes a parsing error
argument2
# this comment is fine
)
}
```
This seems to be a replicated issue in the downstream as well > https://github.com/amplify-education/python-hcl2/issues/95.
I have opened a PR to fix this in the bridgecrewio specific parser > https://github.com/bridgecrewio/python-hcl2/pull/29.
**Question**
Is the bridgecrewio HCL2 Parser intened to be merged upstream?
If not, I will implement the change in Amplify's codebase separately.
**An aside**
Checkov is an awesome tool, it makes the jobs of myself and the rest of the Platform/DevOps Engineers on my team so much easier!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage==5.5",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit",
32 "jsonschema",
33 ]
34 },
35 install_requires=[
36 "bc-python-hcl2==0.3.38",
37 "cloudsplaining>=0.4.1",
38 "deep_merge",
39 "tabulate",
40 "colorama",
41 "termcolor",
42 "junit-xml>=1.9",
43 "dpath>=1.5.0,<2",
44 "pyyaml>=5.4.1",
45 "boto3>=1.17",
46 "GitPython",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker",
55 "configargparse",
56 "argcomplete",
57 "detect-secrets",
58 "policyuniverse",
59 "typing-extensions",
60 "cachetools",
61 "cyclonedx-python-lib>=0.11.0,<1.0.0",
62 "click>=8.0.0",
63 "aiohttp",
64 "aiodns",
65 "aiomultiprocess",
66 "jsonpath_ng",
67 "jsonschema~=3.0",
68 "prettytable>=3.0.0",
69 "pycep-parser==0.3.4",
70 "charset-normalizer",
71 ],
72 license="Apache License 2.0",
73 name="checkov",
74 version=version,
75 python_requires=">=3.7",
76 description="Infrastructure as code static analysis",
77 author="bridgecrew",
78 author_email="[email protected]",
79 url="https://github.com/bridgecrewio/checkov",
80 packages=setuptools.find_packages(exclude=["tests*", "integration_tests*"]),
81 include_package_data=True,
82 package_dir={
83 "checkov.bicep.checks.graph_checks": "checkov/bicep/checks/graph_checks",
84 "checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks",
85 },
86 package_data={
87 "checkov.terraform.checks.graph_checks": [
88 "aws/*.yaml",
89 "gcp/*.yaml",
90 "azure/*.yaml",
91 ],
92 "checkov.common.util.templates": [
93 "*.jinja2"
94 ]
95 },
96 scripts=["bin/checkov", "bin/checkov.cmd"],
97 long_description=long_description,
98 long_description_content_type="text/markdown",
99 classifiers=[
100 "Environment :: Console",
101 "Intended Audience :: Developers",
102 "Intended Audience :: System Administrators",
103 "Programming Language :: Python :: 3.7",
104 "Programming Language :: Python :: 3.8",
105 "Programming Language :: Python :: 3.9",
106 "Programming Language :: Python :: 3.10",
107 "Topic :: Security",
108 "Topic :: Software Development :: Build Tools",
109 ],
110 )
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
]
},
install_requires=[
- "bc-python-hcl2==0.3.38",
+ "bc-python-hcl2==0.3.39",
"cloudsplaining>=0.4.1",
"deep_merge",
"tabulate",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n ]\n },\n install_requires=[\n- \"bc-python-hcl2==0.3.38\",\n+ \"bc-python-hcl2==0.3.39\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n", "issue": "HCL2 parser cannot parse functions with comments interleaved in the arguments.\n**Describe the issue**\r\nThe HCL2 parser fails to parse a file that contains an expression with a Terraform function call that contains comments interleaved within the arguments.\r\n\r\n**Example Value**\r\nA file that contains the following exaple variable will fail to parse.\r\n```hcl\r\nvariable \"example\" {\r\n default = function(\r\n # this comment is fine\r\n argument1,\r\n # this comment causes a parsing error\r\n argument2\r\n # this comment is fine\r\n )\r\n}\r\n```\r\n\r\nThis seems to be a replicated issue in the downstream as well > https://github.com/amplify-education/python-hcl2/issues/95.\r\n\r\nI have opened a PR to fix this in the bridgecrewio specific parser > https://github.com/bridgecrewio/python-hcl2/pull/29.\r\n\r\n**Question**\r\nIs the bridgecrewio HCL2 Parser intened to be merged upstream?\r\nIf not, I will implement the change in Amplify's codebase separately.\r\n\r\n**An aside**\r\nCheckov is an awesome tool, it makes the jobs of myself and the rest of the Platform/DevOps Engineers on my team so much easier!\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.38\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions\",\n \"cachetools\",\n \"cyclonedx-python-lib>=0.11.0,<1.0.0\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath_ng\",\n \"jsonschema~=3.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.4\",\n \"charset-normalizer\",\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.bicep.checks.graph_checks\": \"checkov/bicep/checks/graph_checks\",\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\n },\n package_data={\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ],\n \"checkov.common.util.templates\": [\n \"*.jinja2\"\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.39\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions\",\n \"cachetools\",\n \"cyclonedx-python-lib>=0.11.0,<1.0.0\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath_ng\",\n \"jsonschema~=3.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.4\",\n \"charset-normalizer\",\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.bicep.checks.graph_checks\": \"checkov/bicep/checks/graph_checks\",\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\n },\n package_data={\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ],\n \"checkov.common.util.templates\": [\n \"*.jinja2\"\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n ],\n)\n", "path": "setup.py"}]} | 1,543 | 94 |
gh_patches_debug_20117 | rasdani/github-patches | git_diff | iterative__dvc-3843 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
run-cache: fails for params
```sh
$ echo "train: 1\nlr: 2" > params.yaml
$ dvc run -n test -p train,lr -o params2.yaml "cat params.yaml > params2.yaml"
ERROR: failed to reproduce 'dvc.yaml': 'deps'
```
Verbose log:
```sh
------------------------------------------------------------
Traceback (most recent call last):
File "/home/saugat/repos/iterative/dvc/dvc/main.py", line 48, in main
ret = cmd.run()
File "/home/saugat/repos/iterative/dvc/dvc/command/run.py", line 51, in run
single_stage=self.args.single_stage,
File "/home/saugat/repos/iterative/dvc/dvc/repo/__init__.py", line 25, in wrapper
ret = f(repo, *args, **kwargs)
File "/home/saugat/repos/iterative/dvc/dvc/repo/scm_context.py", line 4, in run
result = method(repo, *args, **kw)
File "/home/saugat/repos/iterative/dvc/dvc/repo/run.py", line 88, in run
run_cache=kwargs.get("run_cache", True),
File "/home/saugat/repos/iterative/dvc/.env/py36/lib/python3.6/site-packages/funcy/decorators.py", line 39, in wrapper
return deco(call, *dargs, **dkwargs)
File "/home/saugat/repos/iterative/dvc/dvc/stage/decorators.py", line 35, in rwlocked
return call()
File "/home/saugat/repos/iterative/dvc/.env/py36/lib/python3.6/site-packages/funcy/decorators.py", line 60, in __call__
return self._func(*self._args, **self._kwargs)
File "/home/saugat/repos/iterative/dvc/dvc/stage/__init__.py", line 417, in run
self.save()
File "/home/saugat/repos/iterative/dvc/dvc/stage/__init__.py", line 368, in save
self.repo.stage_cache.save(self)
File "/home/saugat/repos/iterative/dvc/dvc/stage/cache.py", line 130, in save
for out in self._uncached_outs(stage, cache):
File "/home/saugat/repos/iterative/dvc/dvc/stage/cache.py", line 107, in _uncached_outs
cached_stage = self._create_stage(cache)
File "/home/saugat/repos/iterative/dvc/dvc/stage/cache.py", line 88, in _create_stage
deps=[dep["path"] for dep in cache["deps"]],
KeyError: 'deps'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/stage/cache.py`
Content:
```
1 import logging
2 import os
3 from contextlib import contextmanager
4
5 import yaml
6 from funcy import first
7 from voluptuous import Invalid
8
9 from dvc.schema import COMPILED_LOCK_FILE_STAGE_SCHEMA
10 from dvc.serialize import to_single_stage_lockfile
11 from dvc.stage.loader import StageLoader
12 from dvc.utils import dict_sha256, relpath
13 from dvc.utils.fs import makedirs
14 from dvc.utils.stage import dump_stage_file
15
16 logger = logging.getLogger(__name__)
17
18
19 def _get_cache_hash(cache, key=False):
20 if key:
21 cache["outs"] = [out["path"] for out in cache.get("outs", [])]
22 return dict_sha256(cache)
23
24
25 def _get_stage_hash(stage):
26 if not (stage.cmd and stage.deps and stage.outs):
27 return None
28
29 for dep in stage.deps:
30 if not (dep.scheme == "local" and dep.def_path and dep.get_checksum()):
31 return None
32
33 for out in stage.outs:
34 if out.scheme != "local" or not out.def_path or out.persist:
35 return None
36
37 return _get_cache_hash(to_single_stage_lockfile(stage), key=True)
38
39
40 class StageCache:
41 def __init__(self, repo):
42 self.repo = repo
43 self.cache_dir = os.path.join(repo.cache.local.cache_dir, "runs")
44
45 def _get_cache_dir(self, key):
46 return os.path.join(self.cache_dir, key[:2], key)
47
48 def _get_cache_path(self, key, value):
49 return os.path.join(self._get_cache_dir(key), value)
50
51 def _load_cache(self, key, value):
52 path = self._get_cache_path(key, value)
53
54 try:
55 with open(path) as fobj:
56 return COMPILED_LOCK_FILE_STAGE_SCHEMA(yaml.safe_load(fobj))
57 except FileNotFoundError:
58 return None
59 except (yaml.error.YAMLError, Invalid):
60 logger.warning("corrupted cache file '%s'.", relpath(path))
61 os.unlink(path)
62 return None
63
64 def _load(self, stage):
65 key = _get_stage_hash(stage)
66 if not key:
67 return None
68
69 cache_dir = self._get_cache_dir(key)
70 if not os.path.exists(cache_dir):
71 return None
72
73 for value in os.listdir(cache_dir):
74 cache = self._load_cache(key, value)
75 if cache:
76 return cache
77
78 return None
79
80 def _create_stage(self, cache):
81 from dvc.stage import create_stage, PipelineStage
82
83 stage = create_stage(
84 PipelineStage,
85 repo=self.repo,
86 path="dvc.yaml",
87 cmd=cache["cmd"],
88 deps=[dep["path"] for dep in cache["deps"]],
89 outs=[out["path"] for out in cache["outs"]],
90 )
91 StageLoader.fill_from_lock(stage, cache)
92 return stage
93
94 @contextmanager
95 def _cache_type_copy(self):
96 cache_types = self.repo.cache.local.cache_types
97 self.repo.cache.local.cache_types = ["copy"]
98 try:
99 yield
100 finally:
101 self.repo.cache.local.cache_types = cache_types
102
103 def _uncached_outs(self, stage, cache):
104 # NOTE: using temporary stage to avoid accidentally modifying original
105 # stage and to workaround `commit/checkout` not working for uncached
106 # outputs.
107 cached_stage = self._create_stage(cache)
108
109 outs_no_cache = [
110 out.def_path for out in stage.outs if not out.use_cache
111 ]
112
113 # NOTE: using copy link to make it look like a git-tracked file
114 with self._cache_type_copy():
115 for out in cached_stage.outs:
116 if out.def_path in outs_no_cache:
117 yield out
118
119 def save(self, stage):
120 cache_key = _get_stage_hash(stage)
121 if not cache_key:
122 return
123
124 cache = to_single_stage_lockfile(stage)
125 cache_value = _get_cache_hash(cache)
126
127 existing_cache = self._load_cache(cache_key, cache_value)
128 cache = existing_cache or cache
129
130 for out in self._uncached_outs(stage, cache):
131 out.commit()
132
133 if existing_cache:
134 return
135
136 # sanity check
137 COMPILED_LOCK_FILE_STAGE_SCHEMA(cache)
138
139 path = self._get_cache_path(cache_key, cache_value)
140 dpath = os.path.dirname(path)
141 makedirs(dpath, exist_ok=True)
142 dump_stage_file(path, cache)
143
144 def is_cached(self, stage):
145 return bool(self._load(stage))
146
147 def restore(self, stage):
148 cache = self._load(stage)
149 if not cache:
150 return
151 StageLoader.fill_from_lock(stage, cache)
152
153 for out in self._uncached_outs(stage, cache):
154 out.checkout()
155
156 @staticmethod
157 def _transfer(func, from_remote, to_remote):
158 ret = []
159
160 runs = from_remote.path_info / "runs"
161 if not from_remote.exists(runs):
162 return []
163
164 for src in from_remote.walk_files(runs):
165 rel = src.relative_to(from_remote.path_info)
166 dst = to_remote.path_info / rel
167 key = dst.parent
168 # check if any build cache already exists for this key
169 if to_remote.exists(key) and first(to_remote.walk_files(key)):
170 continue
171 func(src, dst)
172 ret.append((src.parent.name, src.name))
173
174 return ret
175
176 def push(self, remote):
177 remote = self.repo.cloud.get_remote(remote)
178 return self._transfer(remote.upload, self.repo.cache.local, remote)
179
180 def pull(self, remote):
181 remote = self.repo.cloud.get_remote(remote)
182 return self._transfer(remote.download, remote, self.repo.cache.local)
183
184 def get_used_cache(self, used_run_cache, *args, **kwargs):
185 from dvc.cache import NamedCache
186
187 cache = NamedCache()
188
189 for key, value in used_run_cache:
190 entry = self._load_cache(key, value)
191 if not entry:
192 continue
193 stage = self._create_stage(entry)
194 cache.update(stage.get_used_cache(*args, **kwargs))
195 return cache
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/stage/cache.py b/dvc/stage/cache.py
--- a/dvc/stage/cache.py
+++ b/dvc/stage/cache.py
@@ -80,12 +80,26 @@
def _create_stage(self, cache):
from dvc.stage import create_stage, PipelineStage
+ params = []
+ for param in cache.get("params", []):
+ if isinstance(param, str):
+ params.append(param)
+ continue
+
+ assert isinstance(param, dict)
+ assert len(param) == 1
+ path = list(param.keys())[0]
+ params_list = param[path]
+ assert isinstance(params_list, list)
+ params.append(f"{path}:" + ",".join(params_list))
+
stage = create_stage(
PipelineStage,
repo=self.repo,
path="dvc.yaml",
cmd=cache["cmd"],
- deps=[dep["path"] for dep in cache["deps"]],
+ params=params,
+ deps=[dep["path"] for dep in cache.get("deps", [])],
outs=[out["path"] for out in cache["outs"]],
)
StageLoader.fill_from_lock(stage, cache)
| {"golden_diff": "diff --git a/dvc/stage/cache.py b/dvc/stage/cache.py\n--- a/dvc/stage/cache.py\n+++ b/dvc/stage/cache.py\n@@ -80,12 +80,26 @@\n def _create_stage(self, cache):\n from dvc.stage import create_stage, PipelineStage\n \n+ params = []\n+ for param in cache.get(\"params\", []):\n+ if isinstance(param, str):\n+ params.append(param)\n+ continue\n+\n+ assert isinstance(param, dict)\n+ assert len(param) == 1\n+ path = list(param.keys())[0]\n+ params_list = param[path]\n+ assert isinstance(params_list, list)\n+ params.append(f\"{path}:\" + \",\".join(params_list))\n+\n stage = create_stage(\n PipelineStage,\n repo=self.repo,\n path=\"dvc.yaml\",\n cmd=cache[\"cmd\"],\n- deps=[dep[\"path\"] for dep in cache[\"deps\"]],\n+ params=params,\n+ deps=[dep[\"path\"] for dep in cache.get(\"deps\", [])],\n outs=[out[\"path\"] for out in cache[\"outs\"]],\n )\n StageLoader.fill_from_lock(stage, cache)\n", "issue": "run-cache: fails for params\n```sh\r\n$ echo \"train: 1\\nlr: 2\" > params.yaml\r\n$ dvc run -n test -p train,lr -o params2.yaml \"cat params.yaml > params2.yaml\"\r\nERROR: failed to reproduce 'dvc.yaml': 'deps'\r\n```\r\nVerbose log:\r\n```sh\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/home/saugat/repos/iterative/dvc/dvc/main.py\", line 48, in main\r\n ret = cmd.run()\r\n File \"/home/saugat/repos/iterative/dvc/dvc/command/run.py\", line 51, in run\r\n single_stage=self.args.single_stage,\r\n File \"/home/saugat/repos/iterative/dvc/dvc/repo/__init__.py\", line 25, in wrapper\r\n ret = f(repo, *args, **kwargs)\r\n File \"/home/saugat/repos/iterative/dvc/dvc/repo/scm_context.py\", line 4, in run\r\n result = method(repo, *args, **kw)\r\n File \"/home/saugat/repos/iterative/dvc/dvc/repo/run.py\", line 88, in run\r\n run_cache=kwargs.get(\"run_cache\", True),\r\n File \"/home/saugat/repos/iterative/dvc/.env/py36/lib/python3.6/site-packages/funcy/decorators.py\", line 39, in wrapper\r\n return deco(call, *dargs, **dkwargs)\r\n File \"/home/saugat/repos/iterative/dvc/dvc/stage/decorators.py\", line 35, in rwlocked\r\n return call()\r\n File \"/home/saugat/repos/iterative/dvc/.env/py36/lib/python3.6/site-packages/funcy/decorators.py\", line 60, in __call__\r\n return self._func(*self._args, **self._kwargs)\r\n File \"/home/saugat/repos/iterative/dvc/dvc/stage/__init__.py\", line 417, in run\r\n self.save()\r\n File \"/home/saugat/repos/iterative/dvc/dvc/stage/__init__.py\", line 368, in save\r\n self.repo.stage_cache.save(self)\r\n File \"/home/saugat/repos/iterative/dvc/dvc/stage/cache.py\", line 130, in save\r\n for out in self._uncached_outs(stage, cache):\r\n File \"/home/saugat/repos/iterative/dvc/dvc/stage/cache.py\", line 107, in _uncached_outs\r\n cached_stage = self._create_stage(cache)\r\n File \"/home/saugat/repos/iterative/dvc/dvc/stage/cache.py\", line 88, in _create_stage\r\n deps=[dep[\"path\"] for dep in cache[\"deps\"]],\r\nKeyError: 'deps'\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\nimport os\nfrom contextlib import contextmanager\n\nimport yaml\nfrom funcy import first\nfrom voluptuous import Invalid\n\nfrom dvc.schema import COMPILED_LOCK_FILE_STAGE_SCHEMA\nfrom dvc.serialize import to_single_stage_lockfile\nfrom dvc.stage.loader import StageLoader\nfrom dvc.utils import dict_sha256, relpath\nfrom dvc.utils.fs import makedirs\nfrom dvc.utils.stage import dump_stage_file\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_cache_hash(cache, key=False):\n if key:\n cache[\"outs\"] = [out[\"path\"] for out in cache.get(\"outs\", [])]\n return dict_sha256(cache)\n\n\ndef _get_stage_hash(stage):\n if not (stage.cmd and stage.deps and stage.outs):\n return None\n\n for dep in stage.deps:\n if not (dep.scheme == \"local\" and dep.def_path and dep.get_checksum()):\n return None\n\n for out in stage.outs:\n if out.scheme != \"local\" or not out.def_path or out.persist:\n return None\n\n return _get_cache_hash(to_single_stage_lockfile(stage), key=True)\n\n\nclass StageCache:\n def __init__(self, repo):\n self.repo = repo\n self.cache_dir = os.path.join(repo.cache.local.cache_dir, \"runs\")\n\n def _get_cache_dir(self, key):\n return os.path.join(self.cache_dir, key[:2], key)\n\n def _get_cache_path(self, key, value):\n return os.path.join(self._get_cache_dir(key), value)\n\n def _load_cache(self, key, value):\n path = self._get_cache_path(key, value)\n\n try:\n with open(path) as fobj:\n return COMPILED_LOCK_FILE_STAGE_SCHEMA(yaml.safe_load(fobj))\n except FileNotFoundError:\n return None\n except (yaml.error.YAMLError, Invalid):\n logger.warning(\"corrupted cache file '%s'.\", relpath(path))\n os.unlink(path)\n return None\n\n def _load(self, stage):\n key = _get_stage_hash(stage)\n if not key:\n return None\n\n cache_dir = self._get_cache_dir(key)\n if not os.path.exists(cache_dir):\n return None\n\n for value in os.listdir(cache_dir):\n cache = self._load_cache(key, value)\n if cache:\n return cache\n\n return None\n\n def _create_stage(self, cache):\n from dvc.stage import create_stage, PipelineStage\n\n stage = create_stage(\n PipelineStage,\n repo=self.repo,\n path=\"dvc.yaml\",\n cmd=cache[\"cmd\"],\n deps=[dep[\"path\"] for dep in cache[\"deps\"]],\n outs=[out[\"path\"] for out in cache[\"outs\"]],\n )\n StageLoader.fill_from_lock(stage, cache)\n return stage\n\n @contextmanager\n def _cache_type_copy(self):\n cache_types = self.repo.cache.local.cache_types\n self.repo.cache.local.cache_types = [\"copy\"]\n try:\n yield\n finally:\n self.repo.cache.local.cache_types = cache_types\n\n def _uncached_outs(self, stage, cache):\n # NOTE: using temporary stage to avoid accidentally modifying original\n # stage and to workaround `commit/checkout` not working for uncached\n # outputs.\n cached_stage = self._create_stage(cache)\n\n outs_no_cache = [\n out.def_path for out in stage.outs if not out.use_cache\n ]\n\n # NOTE: using copy link to make it look like a git-tracked file\n with self._cache_type_copy():\n for out in cached_stage.outs:\n if out.def_path in outs_no_cache:\n yield out\n\n def save(self, stage):\n cache_key = _get_stage_hash(stage)\n if not cache_key:\n return\n\n cache = to_single_stage_lockfile(stage)\n cache_value = _get_cache_hash(cache)\n\n existing_cache = self._load_cache(cache_key, cache_value)\n cache = existing_cache or cache\n\n for out in self._uncached_outs(stage, cache):\n out.commit()\n\n if existing_cache:\n return\n\n # sanity check\n COMPILED_LOCK_FILE_STAGE_SCHEMA(cache)\n\n path = self._get_cache_path(cache_key, cache_value)\n dpath = os.path.dirname(path)\n makedirs(dpath, exist_ok=True)\n dump_stage_file(path, cache)\n\n def is_cached(self, stage):\n return bool(self._load(stage))\n\n def restore(self, stage):\n cache = self._load(stage)\n if not cache:\n return\n StageLoader.fill_from_lock(stage, cache)\n\n for out in self._uncached_outs(stage, cache):\n out.checkout()\n\n @staticmethod\n def _transfer(func, from_remote, to_remote):\n ret = []\n\n runs = from_remote.path_info / \"runs\"\n if not from_remote.exists(runs):\n return []\n\n for src in from_remote.walk_files(runs):\n rel = src.relative_to(from_remote.path_info)\n dst = to_remote.path_info / rel\n key = dst.parent\n # check if any build cache already exists for this key\n if to_remote.exists(key) and first(to_remote.walk_files(key)):\n continue\n func(src, dst)\n ret.append((src.parent.name, src.name))\n\n return ret\n\n def push(self, remote):\n remote = self.repo.cloud.get_remote(remote)\n return self._transfer(remote.upload, self.repo.cache.local, remote)\n\n def pull(self, remote):\n remote = self.repo.cloud.get_remote(remote)\n return self._transfer(remote.download, remote, self.repo.cache.local)\n\n def get_used_cache(self, used_run_cache, *args, **kwargs):\n from dvc.cache import NamedCache\n\n cache = NamedCache()\n\n for key, value in used_run_cache:\n entry = self._load_cache(key, value)\n if not entry:\n continue\n stage = self._create_stage(entry)\n cache.update(stage.get_used_cache(*args, **kwargs))\n return cache\n", "path": "dvc/stage/cache.py"}], "after_files": [{"content": "import logging\nimport os\nfrom contextlib import contextmanager\n\nimport yaml\nfrom funcy import first\nfrom voluptuous import Invalid\n\nfrom dvc.schema import COMPILED_LOCK_FILE_STAGE_SCHEMA\nfrom dvc.serialize import to_single_stage_lockfile\nfrom dvc.stage.loader import StageLoader\nfrom dvc.utils import dict_sha256, relpath\nfrom dvc.utils.fs import makedirs\nfrom dvc.utils.stage import dump_stage_file\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_cache_hash(cache, key=False):\n if key:\n cache[\"outs\"] = [out[\"path\"] for out in cache.get(\"outs\", [])]\n return dict_sha256(cache)\n\n\ndef _get_stage_hash(stage):\n if not (stage.cmd and stage.deps and stage.outs):\n return None\n\n for dep in stage.deps:\n if not (dep.scheme == \"local\" and dep.def_path and dep.get_checksum()):\n return None\n\n for out in stage.outs:\n if out.scheme != \"local\" or not out.def_path or out.persist:\n return None\n\n return _get_cache_hash(to_single_stage_lockfile(stage), key=True)\n\n\nclass StageCache:\n def __init__(self, repo):\n self.repo = repo\n self.cache_dir = os.path.join(repo.cache.local.cache_dir, \"runs\")\n\n def _get_cache_dir(self, key):\n return os.path.join(self.cache_dir, key[:2], key)\n\n def _get_cache_path(self, key, value):\n return os.path.join(self._get_cache_dir(key), value)\n\n def _load_cache(self, key, value):\n path = self._get_cache_path(key, value)\n\n try:\n with open(path) as fobj:\n return COMPILED_LOCK_FILE_STAGE_SCHEMA(yaml.safe_load(fobj))\n except FileNotFoundError:\n return None\n except (yaml.error.YAMLError, Invalid):\n logger.warning(\"corrupted cache file '%s'.\", relpath(path))\n os.unlink(path)\n return None\n\n def _load(self, stage):\n key = _get_stage_hash(stage)\n if not key:\n return None\n\n cache_dir = self._get_cache_dir(key)\n if not os.path.exists(cache_dir):\n return None\n\n for value in os.listdir(cache_dir):\n cache = self._load_cache(key, value)\n if cache:\n return cache\n\n return None\n\n def _create_stage(self, cache):\n from dvc.stage import create_stage, PipelineStage\n\n params = []\n for param in cache.get(\"params\", []):\n if isinstance(param, str):\n params.append(param)\n continue\n\n assert isinstance(param, dict)\n assert len(param) == 1\n path = list(param.keys())[0]\n params_list = param[path]\n assert isinstance(params_list, list)\n params.append(f\"{path}:\" + \",\".join(params_list))\n\n stage = create_stage(\n PipelineStage,\n repo=self.repo,\n path=\"dvc.yaml\",\n cmd=cache[\"cmd\"],\n params=params,\n deps=[dep[\"path\"] for dep in cache.get(\"deps\", [])],\n outs=[out[\"path\"] for out in cache[\"outs\"]],\n )\n StageLoader.fill_from_lock(stage, cache)\n return stage\n\n @contextmanager\n def _cache_type_copy(self):\n cache_types = self.repo.cache.local.cache_types\n self.repo.cache.local.cache_types = [\"copy\"]\n try:\n yield\n finally:\n self.repo.cache.local.cache_types = cache_types\n\n def _uncached_outs(self, stage, cache):\n # NOTE: using temporary stage to avoid accidentally modifying original\n # stage and to workaround `commit/checkout` not working for uncached\n # outputs.\n cached_stage = self._create_stage(cache)\n\n outs_no_cache = [\n out.def_path for out in stage.outs if not out.use_cache\n ]\n\n # NOTE: using copy link to make it look like a git-tracked file\n with self._cache_type_copy():\n for out in cached_stage.outs:\n if out.def_path in outs_no_cache:\n yield out\n\n def save(self, stage):\n cache_key = _get_stage_hash(stage)\n if not cache_key:\n return\n\n cache = to_single_stage_lockfile(stage)\n cache_value = _get_cache_hash(cache)\n\n existing_cache = self._load_cache(cache_key, cache_value)\n cache = existing_cache or cache\n\n for out in self._uncached_outs(stage, cache):\n out.commit()\n\n if existing_cache:\n return\n\n # sanity check\n COMPILED_LOCK_FILE_STAGE_SCHEMA(cache)\n\n path = self._get_cache_path(cache_key, cache_value)\n dpath = os.path.dirname(path)\n makedirs(dpath, exist_ok=True)\n dump_stage_file(path, cache)\n\n def is_cached(self, stage):\n return bool(self._load(stage))\n\n def restore(self, stage):\n cache = self._load(stage)\n if not cache:\n return\n StageLoader.fill_from_lock(stage, cache)\n\n for out in self._uncached_outs(stage, cache):\n out.checkout()\n\n @staticmethod\n def _transfer(func, from_remote, to_remote):\n ret = []\n\n runs = from_remote.path_info / \"runs\"\n if not from_remote.exists(runs):\n return []\n\n for src in from_remote.walk_files(runs):\n rel = src.relative_to(from_remote.path_info)\n dst = to_remote.path_info / rel\n key = dst.parent\n # check if any build cache already exists for this key\n if to_remote.exists(key) and first(to_remote.walk_files(key)):\n continue\n func(src, dst)\n ret.append((src.parent.name, src.name))\n\n return ret\n\n def push(self, remote):\n remote = self.repo.cloud.get_remote(remote)\n return self._transfer(remote.upload, self.repo.cache.local, remote)\n\n def pull(self, remote):\n remote = self.repo.cloud.get_remote(remote)\n return self._transfer(remote.download, remote, self.repo.cache.local)\n\n def get_used_cache(self, used_run_cache, *args, **kwargs):\n from dvc.cache import NamedCache\n\n cache = NamedCache()\n\n for key, value in used_run_cache:\n entry = self._load_cache(key, value)\n if not entry:\n continue\n stage = self._create_stage(entry)\n cache.update(stage.get_used_cache(*args, **kwargs))\n return cache\n", "path": "dvc/stage/cache.py"}]} | 2,714 | 262 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.